diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock index 00d301601a..369147856d 100644 --- a/polkadot/Cargo.lock +++ b/polkadot/Cargo.lock @@ -682,21 +682,33 @@ dependencies = [ ] [[package]] -name = "bp-kusama" +name = "bp-header-chain" version = "0.1.0" dependencies = [ - "bp-message-lane", - "bp-runtime", + "bp-test-utils", + "finality-grandpa", "frame-support", - "frame-system", - "sp-api", + "parity-scale-codec", + "serde", "sp-core", + "sp-finality-grandpa", "sp-runtime", "sp-std", ] [[package]] -name = "bp-message-lane" +name = "bp-kusama" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "sp-api", + "sp-std", +] + +[[package]] +name = "bp-messages" version = "0.1.0" dependencies = [ "bp-runtime", @@ -710,14 +722,43 @@ dependencies = [ name = "bp-polkadot" version = "0.1.0" dependencies = [ - "bp-message-lane", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "sp-api", + "sp-std", +] + +[[package]] +name = "bp-polkadot-core" +version = "0.1.0" +dependencies = [ + "bp-messages", "bp-runtime", "frame-support", "frame-system", + "hex", + "parity-scale-codec", "sp-api", "sp-core", "sp-runtime", "sp-std", + "sp-version", +] + +[[package]] +name = "bp-rococo" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "parity-scale-codec", + "sp-api", + "sp-runtime", + "sp-std", + "sp-version", ] [[package]] @@ -725,12 +766,44 @@ name = "bp-runtime" version = "0.1.0" dependencies = [ "frame-support", + "hash-db", "num-traits", "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", + "sp-state-machine", "sp-std", + "sp-trie", +] + +[[package]] +name = "bp-test-utils" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "ed25519-dalek", + "finality-grandpa", + "parity-scale-codec", + "sp-application-crypto", + "sp-finality-grandpa", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "bp-westend" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "parity-scale-codec", + "sp-api", + "sp-runtime", + "sp-std", + "sp-version", ] [[package]] diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index f8914a160d..824e80dd33 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -26,8 +26,10 @@ tempfile = "3.2.0" [workspace] members = [ - "bridges/primitives/kusama", - "bridges/primitives/polkadot", + "bridges/primitives/chain-kusama", + "bridges/primitives/chain-polkadot", + "bridges/primitives/chain-rococo", + "bridges/primitives/chain-westend", "bridges/primitives/runtime", "cli", "core-primitives", diff --git a/polkadot/bridges/.dependabot/config.yml b/polkadot/bridges/.dependabot/config.yml index 1972b3b94a..61599ccba9 100644 --- a/polkadot/bridges/.dependabot/config.yml +++ b/polkadot/bridges/.dependabot/config.yml @@ -14,6 +14,8 @@ update_configs: dependency_name: "frame-*" - match: dependency_name: "pallet-*" + - match: + dependency_name: "node-inspect" automerged_updates: - match: update_type: "all" diff --git a/polkadot/bridges/.github/workflows/deny.yml b/polkadot/bridges/.github/workflows/deny.yml index e444b11da8..9f9f7264ae 100644 --- a/polkadot/bridges/.github/workflows/deny.yml +++ b/polkadot/bridges/.github/workflows/deny.yml @@ -10,12 +10,19 @@ on: tags: - v* paths-ignore: - - '**/README.md' + - '**.md' - diagrams/* - docs/* jobs: cargo-deny: runs-on: ubuntu-latest + strategy: + matrix: + checks: + - advisories + - bans licenses sources + # Prevent sudden announcement of a new advisory from failing CI: + continue-on-error: ${{ matrix.checks == 'advisories' }} steps: - name: Cancel Previous Runs uses: styfle/cancel-workflow-action@0.4.1 @@ -29,4 +36,4 @@ jobs: - name: Cargo deny uses: EmbarkStudios/cargo-deny-action@v1 with: - command: "check --hide-inclusion-graph" + command: check ${{ matrix.checks }} diff --git a/polkadot/bridges/.github/workflows/lint.yml b/polkadot/bridges/.github/workflows/lint.yml index b30a72a4c4..4ebd12e0d6 100644 --- a/polkadot/bridges/.github/workflows/lint.yml +++ b/polkadot/bridges/.github/workflows/lint.yml @@ -8,7 +8,7 @@ on: tags: - v* paths-ignore: - - '**/README.md' + - '**.md' - diagrams/* - docs/* schedule: # Weekly build @@ -21,17 +21,21 @@ jobs: env: RUST_BACKTRACE: full steps: + - name: Cancel Previous Runs uses: styfle/cancel-workflow-action@0.4.1 with: access_token: ${{ github.token }} + - name: Checkout sources & submodules uses: actions/checkout@master with: fetch-depth: 5 submodules: recursive + - name: Add rustfmt run: rustup component add rustfmt + - name: rust-fmt check uses: actions-rs/cargo@master with: diff --git a/polkadot/bridges/.github/workflows/publish-deps.yml b/polkadot/bridges/.github/workflows/publish-deps.yml index 799aa8f966..16d56a5d78 100644 --- a/polkadot/bridges/.github/workflows/publish-deps.yml +++ b/polkadot/bridges/.github/workflows/publish-deps.yml @@ -1,49 +1,76 @@ -name: Publish Dependencies to Docker hub +name: Publish Dependencies to Docker hub on: push: tags: - v* paths-ignore: - - '**/README.md' + - '**.md' - diagrams/* - docs/* - schedule: # Weekly build - - cron: '0 0 * * 0' + schedule: # Weekly build + - cron: '0 0 * * 0' jobs: -## Publish to Docker hub + ## Publish to Docker hub publish: - name: Publishing - runs-on: ubuntu-latest + name: Publishing + runs-on: ubuntu-latest container: - image: docker:git + image: docker:git steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master + access_token: ${{ github.token }} + + - name: Checkout sources & submodules + uses: actions/checkout@v2 with: - fetch-depth: 5 - submodules: recursive - - name: Build and push dependencies - uses: docker/build-push-action@v1 + fetch-depth: 5 + submodules: recursive + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to DockerHub + uses: docker/login-action@v1 with: - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASSWORD }} - repository: paritytech/bridge-dependencies - dockerfile: deployments/BridgeDeps.Dockerfile - tag_with_ref: true - tag_with_sha: true - labels: - org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common", - org.opencontainers.image.authors="devops-team@parity.io", - org.opencontainers.image.vendor="Parity Technologies", - org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common", - org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/README.md", - org.opencontainers.image.title=${{ matrix.project }}, - org.opencontainers.image.description="${{ matrix.project }} - component of Parity Bridges Common", - org.opencontainers.image.licenses="GPL-3.0 License" - add_git_labels: true + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Prepare + id: prep + run: | + DOCKER_IMAGE=paritytech/bridge-dependencies + VERSION=latest + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') + fi + TAGS=${DOCKER_IMAGE}:${VERSION} + TAGS=$TAGS,${DOCKER_IMAGE}:sha-${GITHUB_SHA::8} + echo ::set-output name=TAGS::${TAGS} + echo ::set-output name=DATE::$(date +%d-%m-%Y) + + - name: Build and push + uses: docker/build-push-action@v2 + with: + file: deployments/BridgeDeps.Dockerfile + push: true + cache-from: type=registry,ref=paritytech/bridge-dependencies:latest + cache-to: type=inline + tags: ${{ steps.prep.outputs.TAGS }} + labels: | + org.opencontainers.image.title=bridge-dependencies + org.opencontainers.image.description=bridge-dependencies - component of Parity Bridges Common + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.url=https://github.com/paritytech/parity-bridges-common + org.opencontainers.image.documentation=https://github.com/paritytech/parity-bridges-common/README.md + org.opencontainers.image.created=${{ steps.prep.outputs.DATE }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.authors=devops-team@parity.io + org.opencontainers.image.vendor=Parity Technologies + org.opencontainers.image.licenses=GPL-3.0 License diff --git a/polkadot/bridges/.github/workflows/publish-docker.yml b/polkadot/bridges/.github/workflows/publish-docker.yml index 3e00ead610..5a4670b6ea 100644 --- a/polkadot/bridges/.github/workflows/publish-docker.yml +++ b/polkadot/bridges/.github/workflows/publish-docker.yml @@ -1,20 +1,20 @@ -name: Publish images to Docker hub +name: Publish images to Docker hub on: push: tags: - v* paths-ignore: - - '**/README.md' + - '**.md' - diagrams/* - docs/* - schedule: # Nightly build - - cron: '0 1 * * *' + schedule: # Nightly build + - cron: '0 1 * * *' jobs: -## Publish to Docker hub + ## Publish to Docker hub publish: - name: Publishing + name: Publishing strategy: matrix: project: @@ -31,46 +31,63 @@ jobs: healthcheck: http://localhost:9616/metrics - project: substrate-relay healthcheck: http://localhost:9616/metrics - runs-on: ubuntu-latest - container: - image: docker:git + + runs-on: ubuntu-latest steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master + access_token: ${{ github.token }} + + - name: Checkout sources & submodules + uses: actions/checkout@v2 with: - fetch-depth: 5 - submodules: recursive - - name: Set vars - id: vars - run: | - echo ::set-output name=DATE::$(date +%d-%m-%Y) - if [[ ${GITHUB_REF} = refs/tags/* ]] - then - echo ::set-output name=TAG::${GITHUB_REF#refs/tags/} - else - echo ::set-output name=TAG::nightly-$(date +%d-%m-%Y) + fetch-depth: 5 + submodules: recursive + + - name: Prepare + id: prep + run: | + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') fi - - name: Build and push ${{ matrix.project }} - uses: docker/build-push-action@v1 + TAGS="${VERSION} sha-${GITHUB_SHA::8} latest" + echo ::set-output name=TAGS::${VERSION} + echo ::set-output name=TAGS::${TAGS} + echo ::set-output name=DATE::$(date +%d-%m-%Y) + + - name: Workaround rootless build + run: | + sudo apt-get install fuse-overlayfs + mkdir -vp ~/.config/containers + printf "[storage.options]\nmount_program=\"/usr/bin/fuse-overlayfs\"" > ~/.config/containers/storage.conf + + - name: Build image for ${{ matrix.project }} + uses: redhat-actions/buildah-build@v2.2 with: + image: ${{ matrix.project }} + tags: ${{ steps.prep.outputs.TAGS }} + dockerfiles: ./Dockerfile + build-args: | + PROJECT=${{ matrix.project }} + HEALTH=${{ matrix.healthcheck }} + VCS_REF=sha-${GITHUB_SHA::8} + BUILD_DATE=${{ steps.prep.outputs.DATE }} + VERSION=${{ steps.prep.outputs.VERSION }} + + - name: Push ${{ matrix.project }} image to docker.io + id: push-to-dockerhub + uses: redhat-actions/push-to-registry@v2.1.1 + with: + registry: docker.io/paritytech + image: ${{ matrix.project }} + tags: ${{ steps.prep.outputs.TAGS }} username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_PASSWORD }} - repository: paritytech/${{ matrix.project }} - build_args: PROJECT=${{ matrix.project }}, HEALTH=${{ matrix.healthcheck }} - tags: ${{ steps.vars.outputs.TAG }}, latest - labels: - org.opencontainers.image.created=${{ steps.vars.outputs.DATE }}, - org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common", - org.opencontainers.image.authors="devops-team@parity.io", - org.opencontainers.image.vendor="Parity Technologies", - org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common", - org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/README.md", - org.opencontainers.image.version=${{ steps.vars.outputs.TAG }}, - org.opencontainers.image.title=${{ matrix.project }}, - org.opencontainers.image.description="${{ matrix.project }} - component of Parity Bridges Common", - org.opencontainers.image.licenses="GPL-3.0 License" - add_git_labels: true + + - name: Check the image + run: | + echo "New image has been pushed to ${{ steps.push-to-dockerhub.outputs.registry-path }}" diff --git a/polkadot/bridges/.github/workflows/rust.yml b/polkadot/bridges/.github/workflows/rust.yml index 3fe73363bf..e6f7939efb 100644 --- a/polkadot/bridges/.github/workflows/rust.yml +++ b/polkadot/bridges/.github/workflows/rust.yml @@ -8,7 +8,7 @@ on: tags: - v* paths-ignore: - - '**/README.md' + - '**.md' - diagrams/* - docs/* schedule: # Weekly build @@ -23,27 +23,30 @@ jobs: toolchain: - stable #- beta - - nightly + - nightly-2021-04-10 runs-on: ubuntu-latest env: RUST_BACKTRACE: full - NIGHTLY: nightly #if necessary, specify the version, nightly-2020-10-04, etc. + NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. steps: + - name: Cancel Previous Runs uses: styfle/cancel-workflow-action@0.4.1 with: access_token: ${{ github.token }} + - name: Checkout sources & submodules uses: actions/checkout@master with: fetch-depth: 5 submodules: recursive + - name: Install Toolchain run: rustup toolchain add $NIGHTLY + - name: Add WASM Utilities run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY - - name: Rust Cache - uses: Swatinem/rust-cache@v1.2.0 + - name: Checking rust-${{ matrix.toolchain }} uses: actions-rs/cargo@master with: @@ -66,13 +69,14 @@ jobs: with: command: check toolchain: ${{ matrix.toolchain }} - args: --manifest-path ./bin/rialto/node/Cargo.toml --no-default-features --features runtime-benchmarks --verbose + args: -p rialto-runtime --features runtime-benchmarks --verbose + - name: Check Millau benchmarks runtime ${{ matrix.platform }} rust-${{ matrix.toolchain }} uses: actions-rs/cargo@master with: command: check toolchain: ${{ matrix.toolchain }} - args: --manifest-path ./bin/millau/node/Cargo.toml --no-default-features --features runtime-benchmarks --verbose + args: -p millau-runtime --features runtime-benchmarks --verbose ## Build Stage build: @@ -86,23 +90,26 @@ jobs: runs-on: ubuntu-latest env: RUST_BACKTRACE: full - NIGHTLY: nightly #if necessary, specify the version, nightly-2020-10-04, etc. + NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. steps: + - name: Cancel Previous Runs uses: styfle/cancel-workflow-action@0.4.1 with: access_token: ${{ github.token }} + - name: Checkout sources & submodules uses: actions/checkout@master with: fetch-depth: 5 submodules: recursive + - name: Install Toolchain run: rustup toolchain add $NIGHTLY + - name: Add WASM Utilities run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY - - name: Rust Cache - uses: Swatinem/rust-cache@v1.2.0 + - name: Building rust-${{ matrix.toolchain }} uses: actions-rs/cargo@master if: github.ref == 'refs/heads/master' @@ -110,6 +117,7 @@ jobs: command: build toolchain: ${{ matrix.toolchain }} args: --all --verbose + - name: Prepare artifacts if: github.ref == 'refs/heads/master' run: | @@ -119,6 +127,7 @@ jobs: mv -v target/debug/ethereum-poa-relay ./artifacts/; mv -v target/debug/substrate-relay ./artifacts/; shell: bash + - name: Upload artifacts if: github.ref == 'refs/heads/master' uses: actions/upload-artifact@v1 @@ -132,28 +141,35 @@ jobs: runs-on: ubuntu-latest env: RUST_BACKTRACE: full - NIGHTLY: nightly #if necessary, specify the version, nightly-2020-10-04, etc. + NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. steps: + - name: Cancel Previous Runs uses: styfle/cancel-workflow-action@0.4.1 with: access_token: ${{ github.token }} + - name: Checkout sources & submodules uses: actions/checkout@master with: fetch-depth: 5 submodules: recursive + - name: Install Toolchain run: rustup toolchain add $NIGHTLY + - name: Add WASM Utilities run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY + - name: Add clippy run: rustup component add clippy --toolchain $NIGHTLY + - name: Rust Cache uses: Swatinem/rust-cache@v1.2.0 + - name: Clippy uses: actions-rs/cargo@master with: command: clippy - toolchain: nightly #if necessary, specify the version, nightly-2020-10-04, etc. + toolchain: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. args: --all-targets -- -D warnings diff --git a/polkadot/bridges/.gitignore b/polkadot/bridges/.gitignore index cc9ede9aef..0ab0857843 100644 --- a/polkadot/bridges/.gitignore +++ b/polkadot/bridges/.gitignore @@ -2,6 +2,9 @@ **/.env **/.env2 **/rust-toolchain +hfuzz_target +hfuzz_workspace +**/Cargo.lock **/*.rs.bk diff --git a/polkadot/bridges/.maintain/rialto-weight-template.hbs b/polkadot/bridges/.maintain/rialto-weight-template.hbs index c8d6725a7f..4bf856948a 100644 --- a/polkadot/bridges/.maintain/rialto-weight-template.hbs +++ b/polkadot/bridges/.maintain/rialto-weight-template.hbs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/CODE_OF_CONDUCT.md b/polkadot/bridges/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..70541fb72f --- /dev/null +++ b/polkadot/bridges/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers +pledge to making participation in our project and our community a harassment-free experience for +everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity +and expression, level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit + permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +### Facilitation, Not Strongarming + +We recognise that this software is merely a tool for users to create and maintain their blockchain +of preference. We see that blockchains are naturally community platforms with users being the +ultimate decision makers. We assert that good software will maximise user agency by facilitate +user-expression on the network. As such: + +- This project will strive to give users as much choice as is both reasonable and possible over what + protocol they adhere to; but +- use of the project's technical forums, commenting systems, pull requests and issue trackers as a + means to express individual protocol preferences is forbidden. + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are +expected to take appropriate and fair corrective action in response to any instances of unacceptable +behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, +code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or +to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is +representing the project or its community. Examples of representing a project or community include +using an official project e-mail address, posting via an official social media account, or acting as +an appointed representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting +the project team at admin@parity.io. All complaints will be reviewed and investigated and will +result in a response that is deemed necessary and appropriate to the circumstances. The project team +is obligated to maintain confidentiality with regard to the reporter of an incident. Further +details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face +temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/polkadot/bridges/Cargo.lock b/polkadot/bridges/Cargo.lock index 8e013b7dae..5811d7ad3b 100644 --- a/polkadot/bridges/Cargo.lock +++ b/polkadot/bridges/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "Inflector" version = "0.11.4" @@ -12,18 +14,18 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" dependencies = [ "gimli", ] [[package]] name = "adler" -version = "0.2.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aead" @@ -79,12 +81,6 @@ dependencies = [ "opaque-debug 0.3.0", ] -[[package]] -name = "ahash" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" - [[package]] name = "ahash" version = "0.4.7" @@ -120,9 +116,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee67c11feeac938fae061b232e38e0b6d94f97a9df10e6271319325ac4c56a86" +checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" [[package]] name = "approx" @@ -133,6 +129,18 @@ dependencies = [ "num-traits", ] +[[package]] +name = "arbitrary" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "698b65a961a9d730fb45b6b0327e20207810c9f61ee421b082b27ba003f49e2b" + +[[package]] +name = "array_tool" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f8cb5d814eb646a863c4f24978cff2880c4be96ad8cde2c0f0678732902e271" + [[package]] name = "arrayref" version = "0.3.6" @@ -175,9 +183,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59740d83946db6a5af71ae25ddf9562c2b176b2ca42cf99a455f09f4a220d6b9" +checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" dependencies = [ "concurrent-queue", "event-listener", @@ -254,13 +262,13 @@ dependencies = [ [[package]] name = "async-process" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8cea09c1fb10a317d1b5af8024eeba256d6554763e85ecd90ff8df31c7bbda" +checksum = "ef37b86e2fa961bae5a4d212708ea0154f904ce31d1a4a7f47e1bbc33a0c040b" dependencies = [ "async-io", "blocking", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "event-listener", "futures-lite", "once_cell", @@ -279,7 +287,7 @@ dependencies = [ "async-io", "async-lock", "async-process", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", "futures-channel", "futures-core", "futures-io", @@ -296,6 +304,20 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "async-std-resolver" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665c56111e244fe38e7708ee10948a4356ad6a548997c21f5a63a0f4e0edc4d" +dependencies = [ + "async-std", + "async-trait", + "futures-io", + "futures-util", + "pin-utils", + "trust-dns-resolver", +] + [[package]] name = "async-task" version = "4.0.3" @@ -304,14 +326,15 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-tls" -version = "0.6.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce6977f57fa68da77ffe5542950d47e9c23d65f5bc7cb0a9f8700996913eec7" +checksum = "2f23d769dbf1838d5df5156e7b1ad404f4c463d1ac2c6aeb6cd943630f8a8400" dependencies = [ - "futures 0.3.12", - "rustls 0.16.0", - "webpki", - "webpki-roots 0.17.0", + "futures-core", + "futures-io", + "rustls 0.19.0", + "webpki 0.21.4", + "webpki-roots", ] [[package]] @@ -331,7 +354,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb4401f0a3622dad2e0763fa79e0eb328bc70fb7dccfdd645341f00d671247d6" dependencies = [ - "bytes 1.0.0", + "bytes 1.0.1", "futures-sink", "futures-util", "memchr", @@ -344,7 +367,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" dependencies = [ - "bytes 1.0.0", + "bytes 1.0.1", "futures-sink", "futures-util", "memchr", @@ -395,9 +418,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" +checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" dependencies = [ "addr2line", "cfg-if 1.0.0", @@ -419,21 +442,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -dependencies = [ - "byteorder", -] - -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - [[package]] name = "base64" version = "0.12.3" @@ -447,10 +455,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] -name = "bincode" -version = "1.3.1" +name = "beef" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" +checksum = "6736e2428df2ca2848d846c43e88745121a6654696e349ce0054a420815a7409" + +[[package]] +name = "bincode" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d175dfa69e619905c4c3cdb7c3c203fa3bdd5d51184e3afdb2742c0280493772" dependencies = [ "byteorder", "serde", @@ -486,6 +500,16 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +[[package]] +name = "bitvec" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +dependencies = [ + "either", + "radium 0.3.0", +] + [[package]] name = "bitvec" version = "0.20.1" @@ -493,7 +517,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5011ffc90248764d7005b0e10c7294f5aa1bd87d9dd7248f4ad475b347c294d" dependencies = [ "funty", - "radium", + "radium 0.6.2", "tap", "wyz", ] @@ -621,7 +645,7 @@ name = "bp-currency-exchange" version = "0.1.0" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-std", ] @@ -638,10 +662,10 @@ dependencies = [ "impl-serde", "libsecp256k1", "parity-bytes", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "plain_hasher", "primitive-types", - "rlp 0.5.0", + "rlp", "serde", "serde-big-array", "sp-api", @@ -656,9 +680,9 @@ name = "bp-header-chain" version = "0.1.0" dependencies = [ "bp-test-utils", - "finality-grandpa", + "finality-grandpa 0.14.0", "frame-support", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-finality-grandpa", @@ -670,13 +694,10 @@ dependencies = [ name = "bp-kusama" version = "0.1.0" dependencies = [ - "bp-message-lane", + "bp-messages", + "bp-polkadot-core", "bp-runtime", - "frame-support", - "frame-system", "sp-api", - "sp-core", - "sp-runtime", "sp-std", ] @@ -685,17 +706,17 @@ name = "bp-message-dispatch" version = "0.1.0" dependencies = [ "bp-runtime", - "parity-scale-codec", + "parity-scale-codec 2.0.1", ] [[package]] -name = "bp-message-lane" +name = "bp-messages" version = "0.1.0" dependencies = [ "bp-runtime", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-std", ] @@ -703,7 +724,7 @@ dependencies = [ name = "bp-millau" version = "0.1.0" dependencies = [ - "bp-message-lane", + "bp-messages", "bp-runtime", "fixed-hash", "frame-support", @@ -725,7 +746,35 @@ dependencies = [ name = "bp-polkadot" version = "0.1.0" dependencies = [ - "bp-message-lane", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "sp-api", + "sp-std", +] + +[[package]] +name = "bp-polkadot-core" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-runtime", + "frame-support", + "frame-system", + "hex", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-core", + "sp-runtime", + "sp-std", + "sp-version", +] + +[[package]] +name = "bp-rialto" +version = "0.1.0" +dependencies = [ + "bp-messages", "bp-runtime", "frame-support", "frame-system", @@ -736,17 +785,18 @@ dependencies = [ ] [[package]] -name = "bp-rialto" +name = "bp-rococo" version = "0.1.0" dependencies = [ - "bp-message-lane", + "bp-header-chain", + "bp-messages", + "bp-polkadot-core", "bp-runtime", - "frame-support", - "frame-system", + "parity-scale-codec 2.0.1", "sp-api", - "sp-core", "sp-runtime", "sp-std", + "sp-version", ] [[package]] @@ -754,12 +804,15 @@ name = "bp-runtime" version = "0.1.0" dependencies = [ "frame-support", + "hash-db", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-core", "sp-io", "sp-runtime", + "sp-state-machine", "sp-std", + "sp-trie", ] [[package]] @@ -767,10 +820,28 @@ name = "bp-test-utils" version = "0.1.0" dependencies = [ "bp-header-chain", - "finality-grandpa", + "ed25519-dalek", + "finality-grandpa 0.14.0", + "parity-scale-codec 2.0.1", + "sp-application-crypto", "sp-finality-grandpa", - "sp-keyring", "sp-runtime", + "sp-std", +] + +[[package]] +name = "bp-westend" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-runtime", + "sp-std", + "sp-version", ] [[package]] @@ -778,15 +849,15 @@ name = "bridge-runtime-common" version = "0.1.0" dependencies = [ "bp-message-dispatch", - "bp-message-lane", + "bp-messages", "bp-runtime", "ed25519-dalek", "frame-support", "hash-db", - "pallet-bridge-call-dispatch", - "pallet-message-lane", - "pallet-substrate-bridge", - "parity-scale-codec", + "pallet-bridge-dispatch", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "parity-scale-codec 2.0.1", "sp-core", "sp-runtime", "sp-state-machine", @@ -794,12 +865,6 @@ dependencies = [ "sp-trie", ] -[[package]] -name = "bs58" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" - [[package]] name = "bs58" version = "0.4.0" @@ -808,18 +873,24 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473fc6b38233f9af7baa94fb5852dca389e3d95b8e21c8e3719301462c5d9faf" +checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" dependencies = [ "memchr", ] [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" + +[[package]] +name = "byte-slice-cast" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" [[package]] name = "byte-slice-cast" @@ -858,9 +929,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "bytes" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1f8e949d755f9d79112b5bb46938e0ef9d3804a0b16dfab13aafcaa5f0fa72" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" [[package]] name = "cache-padded" @@ -870,9 +941,9 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cc" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" dependencies = [ "jobserver", ] @@ -936,9 +1007,9 @@ dependencies = [ [[package]] name = "cid" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d88f30b1e74e7063df5711496f3ee6e74a9735d62062242d70cddf77717f18e" +checksum = "ff0e3bc0b6446b3f9663c1a6aba6ef06c5aeaa1bc92bd18077be337198ab9768" dependencies = [ "multibase", "multihash", @@ -1001,9 +1072,9 @@ dependencies = [ [[package]] name = "const_fn" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] name = "constant_time_eq" @@ -1017,7 +1088,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", "libc", ] @@ -1027,6 +1098,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +[[package]] +name = "core-foundation-sys" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" + [[package]] name = "cpp_demangle" version = "0.3.2" @@ -1051,18 +1128,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "cranelift-bforest" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4066fd63b502d73eb8c5fa6bcab9c7962b05cd580f6b149ee83a8e730d8ce7fb" +checksum = "bcee7a5107071484772b89fdf37f0f460b7db75f476e43ea7a684fd942470bcf" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a54e4beb833a3c873a18a8fe735d73d732044004c7539a072c8faa35ccb0c60" +checksum = "654ab96f0f1cab71c0d323618a58360a492da2c341eb2c1f977fc195c664001b" dependencies = [ "byteorder", "cranelift-bforest", @@ -1080,9 +1157,9 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54cac7cacb443658d8f0ff36a3545822613fa202c946c0891897843bc933810" +checksum = "65994cfc5be9d5fd10c5fc30bcdddfa50c04bb79c91329287bff846434ff8f14" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -1090,24 +1167,27 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a109760aff76788b2cdaeefad6875a73c2b450be13906524f6c2a81e05b8d83c" +checksum = "889d720b688b8b7df5e4903f9b788c3c59396050f5548e516e58ccb7312463ab" +dependencies = [ + "serde", +] [[package]] name = "cranelift-entity" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b044234aa32531f89a08b487630ddc6744696ec04c8123a1ad388de837f5de3" +checksum = "1a2e6884a363e42a9ba980193ea8603a4272f8a92bd8bbaf9f57a94dbea0ff96" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5452b3e4e97538ee5ef2cc071301c69a86c7adf2770916b9d04e9727096abd93" +checksum = "e6f41e2f9b57d2c030e249d0958f1cdc2c3cd46accf8c0438b3d1944e9153444" dependencies = [ "cranelift-codegen", "log", @@ -1117,25 +1197,24 @@ dependencies = [ [[package]] name = "cranelift-native" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f68035c10b2e80f26cc29c32fa824380877f38483504c2a47b54e7da311caaf3" +checksum = "aab70ba7575665375d31cbdea2462916ce58be887834e1b83c860b43b51af637" dependencies = [ "cranelift-codegen", - "raw-cpuid", "target-lexicon", ] [[package]] name = "cranelift-wasm" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a530eb9d1c95b3309deb24c3d179d8b0ba5837ed98914a429787c395f614949d" +checksum = "f2fc3d2e70da6439adf97648dcdf81834363154f2907405345b6fbe7ca38918c" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "itertools", + "itertools 0.10.0", "log", "serde", "smallvec 1.6.1", @@ -1159,7 +1238,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", ] [[package]] @@ -1180,8 +1259,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.1", - "crossbeam-utils 0.8.1", + "crossbeam-epoch 0.9.3", + "crossbeam-utils 0.8.3", ] [[package]] @@ -1201,13 +1280,12 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" dependencies = [ "cfg-if 1.0.0", - "const_fn", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", "lazy_static", "memoffset 0.6.1", "scopeguard", @@ -1237,9 +1315,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" dependencies = [ "autocfg", "cfg-if 1.0.0", @@ -1283,9 +1361,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10bcb9d7dcbf7002aaffbb53eac22906b64cdcc127971dcc387d8eb7c95d5560" +checksum = "e8f45d9ad417bcef4817d614a501ab55cdd96a6fdb24f49aab89a54acfd66b19" dependencies = [ "quote", "syn", @@ -1303,10 +1381,41 @@ dependencies = [ ] [[package]] -name = "curve25519-dalek" -version = "2.1.0" +name = "curl" +version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" +checksum = "5a872858e9cb9e3b96c80dd78774ad9e32e44d3b05dc31e142b858d14aebc82c" +dependencies = [ + "curl-sys", + "libc", + "openssl-probe", + "openssl-sys", + "schannel", + "socket2 0.3.19", + "winapi 0.3.9", +] + +[[package]] +name = "curl-sys" +version = "0.4.41+curl-7.75.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ec466abd277c7cab2905948f3e94d10bc4963f1f5d47921c1cc4ffd2028fe65" +dependencies = [ + "cc", + "libc", + "libnghttp2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", + "winapi 0.3.9", +] + +[[package]] +name = "curve25519-dalek" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8" dependencies = [ "byteorder", "digest 0.8.1", @@ -1317,9 +1426,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.0.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8492de420e9e60bc9a1d66e2dbb91825390b738a388606600663fc529b4b307" +checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" dependencies = [ "byteorder", "digest 0.9.0", @@ -1330,9 +1439,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993a608597367c6377b258c25d7120740f00ed23a2252b729b1932dd7866f908" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" [[package]] name = "data-encoding-macro" @@ -1469,9 +1578,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d55796afa1b20c2945ca8eabfc421839f2b766619209f1ede813cf2484f31804" +checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" [[package]] name = "ed25519" @@ -1488,11 +1597,11 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek 3.0.0", + "curve25519-dalek 3.0.2", "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.2", + "sha2 0.9.3", "zeroize", ] @@ -1502,6 +1611,27 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +[[package]] +name = "encoding_rs" +version = "0.8.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "enum-as-inner" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "env_logger" version = "0.7.1" @@ -1522,7 +1652,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" dependencies = [ "atty", - "humantime 2.0.1", + "humantime 2.1.0", "log", "regex", "termcolor", @@ -1627,10 +1757,10 @@ name = "ethereum-contract-builtin" version = "0.1.0" dependencies = [ "ethereum-types", - "finality-grandpa", + "finality-grandpa 0.14.0", "hex", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "rialto-runtime", "sc-finality-grandpa", "sp-blockchain", @@ -1655,7 +1785,7 @@ dependencies = [ "ethabi-derive", "exchange-relay", "frame-system", - "futures 0.3.12", + "futures 0.3.13", "headers-relay", "hex", "hex-literal 0.3.1", @@ -1664,7 +1794,7 @@ dependencies = [ "messages-relay", "num-traits", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "relay-ethereum-client", "relay-rialto-client", "relay-substrate-client", @@ -1706,7 +1836,7 @@ dependencies = [ "async-std", "async-trait", "backoff", - "futures 0.3.12", + "futures 0.3.13", "log", "num-traits", "parking_lot 0.11.1", @@ -1719,7 +1849,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", ] [[package]] @@ -1784,6 +1914,21 @@ dependencies = [ "log", ] +[[package]] +name = "finality-grandpa" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" +dependencies = [ + "either", + "futures 0.3.13", + "futures-timer 2.0.2", + "log", + "num-traits", + "parity-scale-codec 1.3.7", + "parking_lot 0.9.0", +] + [[package]] name = "finality-grandpa" version = "0.14.0" @@ -1791,14 +1936,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6447e2f8178843749e8c8003206def83ec124a7859475395777a28b5338647c" dependencies = [ "either", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", ] +[[package]] +name = "finality-relay" +version = "0.1.0" +dependencies = [ + "async-std", + "async-trait", + "backoff", + "bp-header-chain", + "futures 0.3.13", + "headers-relay", + "log", + "num-traits", + "parking_lot 0.11.1", + "relay-utils", +] + [[package]] name = "fixed-hash" version = "0.7.0" @@ -1819,9 +1980,9 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129" +checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" dependencies = [ "cfg-if 1.0.0", "crc32fast", @@ -1830,6 +1991,18 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "flume" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "531a685ab99b8f60a271b44d5dd1a76e55124a8c9fa0407b7a8e9cd172d5b588" +dependencies = [ + "futures-core", + "futures-sink", + "pin-project 1.0.5", + "spinning_top", +] + [[package]] name = "fnv" version = "1.0.7" @@ -1839,16 +2012,16 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", ] [[package]] name = "form_urlencoded" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ "matches", "percent-encoding 2.1.0", @@ -1856,13 +2029,14 @@ dependencies = [ [[package]] name = "frame-benchmarking" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +version = "3.1.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support", "frame-system", "linregress", - "parity-scale-codec", + "log", + "parity-scale-codec 2.0.1", "paste 1.0.4", "sp-api", "sp-io", @@ -1875,13 +2049,13 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "Inflector", "chrono", "frame-benchmarking", "handlebars", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-cli", "sc-client-db", "sc-executor", @@ -1898,12 +2072,11 @@ dependencies = [ [[package]] name = "frame-executive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", - "serde", + "parity-scale-codec 2.0.1", "sp-core", "sp-io", "sp-runtime", @@ -1914,9 +2087,9 @@ dependencies = [ [[package]] name = "frame-metadata" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-std", @@ -1925,7 +2098,7 @@ dependencies = [ [[package]] name = "frame-support" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "bitflags", "frame-metadata", @@ -1933,7 +2106,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "once_cell", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "paste 1.0.4", "serde", "smallvec 1.6.1", @@ -1951,7 +2124,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "Inflector", "frame-support-procedural-tools", @@ -1963,10 +2136,10 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support-procedural-tools-derive", - "proc-macro-crate", + "proc-macro-crate 1.0.0", "proc-macro2", "quote", "syn", @@ -1975,7 +2148,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "proc-macro2", "quote", @@ -1985,11 +2158,12 @@ dependencies = [ [[package]] name = "frame-system" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support", "impl-trait-for-tuples", - "parity-scale-codec", + "log", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -2001,9 +2175,9 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", ] @@ -2019,6 +2193,16 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "fuchsia-cprng" version = "0.1.1" @@ -2049,15 +2233,15 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7e4c2612746b0df8fed4ce0c69156021b704c9aefa360311c04e6e9e002eed" +checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" +checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" dependencies = [ "futures-channel", "futures-core", @@ -2070,9 +2254,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" +checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" dependencies = [ "futures-core", "futures-sink", @@ -2080,9 +2264,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" +checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" [[package]] name = "futures-cpupool" @@ -2090,7 +2274,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "num_cpus", ] @@ -2100,8 +2284,8 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" dependencies = [ - "futures 0.1.30", - "futures 0.3.12", + "futures 0.1.31", + "futures 0.3.13", "lazy_static", "log", "parking_lot 0.9.0", @@ -2112,9 +2296,9 @@ dependencies = [ [[package]] name = "futures-executor" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" +checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" dependencies = [ "futures-core", "futures-task", @@ -2124,9 +2308,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" +checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" [[package]] name = "futures-lite" @@ -2145,9 +2329,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" +checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -2163,23 +2347,20 @@ checksum = "3a1387e07917c711fb4ee4f48ea0adb04a3c9739e53ef85bf43ae1edc2937a8b" dependencies = [ "futures-io", "rustls 0.19.0", - "webpki", + "webpki 0.21.4", ] [[package]] name = "futures-sink" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" +checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" [[package]] name = "futures-task" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" -dependencies = [ - "once_cell", -] +checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" [[package]] name = "futures-timer" @@ -2195,11 +2376,11 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" +checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "futures-channel", "futures-core", "futures-io", @@ -2220,19 +2401,6 @@ version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" -[[package]] -name = "generator" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc" -dependencies = [ - "cc", - "libc", - "log", - "rustc_version", - "winapi 0.3.9", -] - [[package]] name = "generic-array" version = "0.12.3" @@ -2263,20 +2431,20 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ "cfg-if 1.0.0", "libc", @@ -2345,7 +2513,7 @@ dependencies = [ "byteorder", "bytes 0.4.12", "fnv", - "futures 0.1.30", + "futures 0.1.31", "http 0.1.21", "indexmap", "log", @@ -2365,10 +2533,10 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.2", + "http 0.2.3", "indexmap", "slab", - "tokio 0.2.24", + "tokio 0.2.25", "tokio-util", "tracing", "tracing-futures", @@ -2376,9 +2544,9 @@ dependencies = [ [[package]] name = "handlebars" -version = "3.5.1" +version = "3.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2764f9796c0ddca4b82c07f25dd2cb3db30b9a8f47940e78e1c883d9e95c3db9" +checksum = "cdb0867bbc5a3da37a753e78021d5fcf8a4db00e18dd2dd90fd36e24190e162d" dependencies = [ "log", "pest", @@ -2403,23 +2571,13 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96282e96bfcd3da0d3aa9938bedf1e50df3269b6db08b4876d2da0bb1a0841cf" -dependencies = [ - "ahash 0.3.8", - "autocfg", -] - [[package]] name = "hashbrown" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" dependencies = [ - "ahash 0.4.7", + "ahash", ] [[package]] @@ -2429,7 +2587,7 @@ dependencies = [ "async-std", "async-trait", "backoff", - "futures 0.3.12", + "futures 0.3.13", "linked-hash-map", "log", "num-traits", @@ -2448,18 +2606,18 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] [[package]] name = "hex" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" @@ -2523,6 +2681,28 @@ dependencies = [ "hmac 0.7.1", ] +[[package]] +name = "honggfuzz" +version = "0.5.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" +dependencies = [ + "arbitrary", + "lazy_static", + "memmap", +] + +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi 0.3.9", +] + [[package]] name = "http" version = "0.1.21" @@ -2536,11 +2716,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "fnv", "itoa", ] @@ -2552,7 +2732,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "http 0.1.21", "tokio-buf", ] @@ -2564,14 +2744,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ "bytes 0.5.6", - "http 0.2.2", + "http 0.2.3", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" [[package]] name = "httpdate" @@ -2590,18 +2770,18 @@ dependencies = [ [[package]] name = "humantime" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.12.35" +version = "0.12.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" +checksum = "5c843caf6296fc1f93444735205af9ed4e109a539005abb2564ae1d6fad34c52" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "futures-cpupool", "h2 0.1.26", "http 0.1.21", @@ -2635,14 +2815,14 @@ dependencies = [ "futures-core", "futures-util", "h2 0.2.7", - "http 0.2.2", + "http 0.2.3", "http-body 0.3.1", "httparse", "httpdate", "itoa", "pin-project 1.0.5", - "socket2", - "tokio 0.2.24", + "socket2 0.3.19", + "tokio 0.2.25", "tower-service", "tracing", "want 0.3.0", @@ -2661,9 +2841,9 @@ dependencies = [ "log", "rustls 0.18.1", "rustls-native-certs", - "tokio 0.2.24", + "tokio 0.2.25", "tokio-rustls", - "webpki", + "webpki 0.21.4", ] [[package]] @@ -2679,9 +2859,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" dependencies = [ "matches", "unicode-bidi", @@ -2711,12 +2891,12 @@ dependencies = [ [[package]] name = "if-watch" -version = "0.1.8" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b8538953a3f0d0d3868f0a706eb4273535e10d72acb5c82c1c23ae48835c85" +checksum = "6a6d52908d4ea4ab2bc22474ba149bf1011c8e2c3ebc1ff593ae28ac44f494b6" dependencies = [ "async-io", - "futures 0.3.12", + "futures 0.3.13", "futures-lite", "if-addrs", "ipnet", @@ -2731,7 +2911,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df170efa359aebdd5cb7fe78edcc67107748e4737bdca8a8fb40d15ea7a877ed" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", ] [[package]] @@ -2740,7 +2920,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" dependencies = [ - "rlp 0.5.0", + "rlp", ] [[package]] @@ -2770,7 +2950,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", - "hashbrown 0.9.1", + "hashbrown", "serde", ] @@ -2798,7 +2978,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "futures-timer 2.0.2", ] @@ -2817,12 +2997,49 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ee15951c035f79eddbef745611ec962f63f4558f1dadf98ab723cc603487c6f" +[[package]] +name = "ipconfig" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2 0.3.19", + "widestring", + "winapi 0.3.9", + "winreg", +] + [[package]] name = "ipnet" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +[[package]] +name = "isahc" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b24d2aed6bbe6faeab0e164ec2e9e6193fcfcfe489b6eb59fb0d0d34947d73" +dependencies = [ + "crossbeam-utils 0.8.3", + "curl", + "curl-sys", + "encoding_rs", + "flume", + "futures-lite", + "http 0.2.3", + "log", + "mime", + "once_cell", + "polling", + "slab", + "sluice", + "tracing", + "tracing-futures", + "url 2.2.1", + "waker-fn", +] + [[package]] name = "itertools" version = "0.9.0" @@ -2832,6 +3049,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "0.4.7" @@ -2849,13 +3075,26 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.46" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonpath_lib" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61352ec23883402b7d30b3313c16cbabefb8907361c4eb669d990cbb87ceee5a" +dependencies = [ + "array_tool", + "env_logger 0.7.1", + "log", + "serde", + "serde_json", +] + [[package]] name = "jsonrpc-client-transports" version = "15.1.0" @@ -2863,7 +3102,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" dependencies = [ "failure", - "futures 0.1.30", + "futures 0.1.31", "jsonrpc-core 15.1.0", "jsonrpc-pubsub", "log", @@ -2878,7 +3117,7 @@ version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "log", "serde", "serde_derive", @@ -2891,7 +3130,7 @@ version = "17.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07569945133257ff557eb37b015497104cea61a2c9edaf126c1cbd6e8332397f" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "log", "serde", "serde_derive", @@ -2913,7 +3152,7 @@ version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99a847f9ec7bb52149b2786a17c9cb260d6effc6b8eeb8c16b343a487a7563a3" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 0.1.5", "proc-macro2", "quote", "syn", @@ -2925,7 +3164,7 @@ version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7" dependencies = [ - "hyper 0.12.35", + "hyper 0.12.36", "jsonrpc-core 15.1.0", "jsonrpc-server-utils", "log", @@ -2991,49 +3230,57 @@ dependencies = [ "slab", ] -[[package]] -name = "jsonrpsee" -version = "1.0.0" -source = "git+https://github.com/svyatonik/jsonrpsee.git?branch=shared-client-in-rpc-api#1597b09c4a9140cd0f1320948c7a8fb237af58fb" -dependencies = [ - "async-std", - "async-tls", - "bs58 0.3.1", - "bytes 0.5.6", - "fnv", - "futures 0.3.12", - "futures-timer 3.0.2", - "globset", - "hashbrown 0.7.2", - "hyper 0.13.10", - "jsonrpsee-proc-macros", - "lazy_static", - "log", - "parking_lot 0.10.2", - "pin-project 0.4.27", - "rand 0.7.3", - "serde", - "serde_json", - "smallvec 1.6.1", - "soketto 0.3.2", - "thiserror", - "tokio 0.2.24", - "unicase", - "url 2.2.0", - "webpki", -] - [[package]] name = "jsonrpsee-proc-macros" -version = "1.0.0" -source = "git+https://github.com/svyatonik/jsonrpsee.git?branch=shared-client-in-rpc-api#1597b09c4a9140cd0f1320948c7a8fb237af58fb" +version = "0.2.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0cbaee9ca6440e191545a68c7bf28db0ff918359a904e37a6e7cf7edd132f5a" dependencies = [ "Inflector", + "proc-macro-crate 1.0.0", "proc-macro2", "quote", "syn", ] +[[package]] +name = "jsonrpsee-types" +version = "0.2.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4ce2de6884fb4abee16eca02329a1eec1eb8df8aed751a8e929083820c78ce7" +dependencies = [ + "async-trait", + "beef", + "futures-channel", + "futures-util", + "log", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonrpsee-ws-client" +version = "0.2.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03ece6acc5ef1e7877fd53887e8937b273466713dc8c017a32223c2b9b678d63" +dependencies = [ + "async-std", + "async-tls", + "async-trait", + "fnv", + "futures 0.3.13", + "jsonrpsee-types", + "log", + "pin-project 1.0.5", + "serde", + "serde_json", + "soketto", + "thiserror", + "url 2.2.1", + "webpki 0.22.0", +] + [[package]] name = "keccak" version = "0.1.0" @@ -3118,9 +3365,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.81" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" +checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" [[package]] name = "libloading" @@ -3139,14 +3386,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] -name = "libp2p" -version = "0.35.1" +name = "libnghttp2-sys" +version = "0.1.6+1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc225a49973cf9ab10d0cdd6a4b8f0cda299df9b760824bbb623f15f8f0c95a" +checksum = "0af55541a8827e138d59ec9e5877fb6095ece63fb6f4da45e7491b4fbd262855" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "libp2p" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe5759b526f75102829c15e4d8566603b4bf502ed19b5f35920d98113873470d" dependencies = [ "atomic", - "bytes 1.0.0", - "futures 0.3.12", + "bytes 1.0.1", + "futures 0.3.13", "lazy_static", "libp2p-core", "libp2p-deflate", @@ -3161,6 +3418,7 @@ dependencies = [ "libp2p-ping", "libp2p-plaintext", "libp2p-pnet", + "libp2p-relay", "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-derive", @@ -3178,16 +3436,16 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.27.1" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2d56aadc2c2bf22cd7797f86e56a65b5b3994a0136b65be3106938acae7a26" +checksum = "c1e1797734bbd4c453664fefb029628f77c356ffc5bce98f06b18a7db3ebb0f7" dependencies = [ "asn1_der", - "bs58 0.4.0", + "bs58", "ed25519-dalek", "either", "fnv", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "lazy_static", "libsecp256k1", @@ -3202,7 +3460,7 @@ dependencies = [ "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.9.2", + "sha2 0.9.3", "smallvec 1.6.1", "thiserror", "unsigned-varint 0.7.0", @@ -3212,35 +3470,38 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d42eed63305f0420736fa487f9acef720c4528bd7852a6a760f5ccde4813345" +checksum = "a2181a641cd15f9b6ba71b1335800f309012a0a97a29ffaabbbf40e9d3d58f08" dependencies = [ "flate2", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.27.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5153b6db68fd4baa3b304e377db744dd8fea8ff4e4504509ee636abcde88d3e3" +checksum = "62e63dab8b5ff35e0c101a3e51e843ba782c07bbb1682f5fd827622e0d02b98b" dependencies = [ - "futures 0.3.12", + "async-std-resolver", + "futures 0.3.13", "libp2p-core", "log", + "smallvec 1.6.1", + "trust-dns-resolver", ] [[package]] name = "libp2p-floodsub" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c63dfa06581b24b1d12bf9815b43689a784424be217d6545c800c7c75a207f" +checksum = "897645f99e9b396df256a6aa8ba8c4bc019ac6b7c62556f624b5feea9acc82bb" dependencies = [ "cuckoofilter", "fnv", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", @@ -3252,16 +3513,16 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502dc5fcbfec4aa1c63ef3f7307ffe20e90c1a1387bf23ed0bec087f2dde58a1" +checksum = "794b0c85f5df1acbc1fc38414d37272594811193b6325c76d3931c3e3f5df8c0" dependencies = [ "asynchronous-codec 0.6.0", "base64 0.13.0", "byteorder", - "bytes 1.0.0", + "bytes 1.0.1", "fnv", - "futures 0.3.12", + "futures 0.3.13", "hex_fmt", "libp2p-core", "libp2p-swarm", @@ -3270,7 +3531,7 @@ dependencies = [ "prost-build", "rand 0.7.3", "regex", - "sha2 0.9.2", + "sha2 0.9.3", "smallvec 1.6.1", "unsigned-varint 0.7.0", "wasm-timer", @@ -3278,11 +3539,11 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40fb36a059b7a8cce1514bd8b546fa612e006c9937caa7f5950cb20021fe91e" +checksum = "f88ebc841d744979176ab4b8b294a3e655a7ba4ef26a905d073a52b49ed4dff5" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", @@ -3294,23 +3555,23 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3da6c9acbcc05f93235d201d7d45ef4e8b88a45d8836f98becd8b4d443f066" +checksum = "bbb5b90b6bda749023a85f60b49ea74b387c25f17d8df541ae72a3c75dd52e63" dependencies = [ "arrayvec 0.5.2", "asynchronous-codec 0.6.0", - "bytes 1.0.0", + "bytes 1.0.1", "either", "fnv", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.2", + "sha2 0.9.3", "smallvec 1.6.1", "uint", "unsigned-varint 0.7.0", @@ -3320,34 +3581,34 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9e6374814d1b118d97ccabdfc975c8910bd16dc38a8bc058eeb08bf2080fe1" +checksum = "be28ca13bb648d249a9baebd750ebc64ce7040ddd5f0ce1035ff1f4549fb596d" dependencies = [ "async-io", "data-encoding", "dns-parser", - "futures 0.3.12", + "futures 0.3.13", "if-watch", "lazy_static", "libp2p-core", "libp2p-swarm", "log", - "rand 0.7.3", + "rand 0.8.3", "smallvec 1.6.1", - "socket2", + "socket2 0.4.0", "void", ] [[package]] name = "libp2p-mplex" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350ce8b3923594aedabd5d6e3f875d058435052a29c3f32df378bc70d10be464" +checksum = "85e9b544335d1ed30af71daa96edbefadef6f19c7a55f078b9fc92c87163105d" dependencies = [ "asynchronous-codec 0.6.0", - "bytes 1.0.0", - "futures 0.3.12", + "bytes 1.0.1", + "futures 0.3.13", "libp2p-core", "log", "nohash-hasher", @@ -3359,20 +3620,20 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aca322b52a0c5136142a7c3971446fb1e9964923a526c9cc6ef3b7c94e57778" +checksum = "36db0f0db3b0433f5b9463f1c0cd9eadc0a3734a9170439ce501ff99733a88bd" dependencies = [ - "bytes 1.0.0", - "curve25519-dalek 3.0.0", - "futures 0.3.12", + "bytes 1.0.1", + "curve25519-dalek 3.0.2", + "futures 0.3.13", "lazy_static", "libp2p-core", "log", "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.2", + "sha2 0.9.3", "snow", "static_assertions", "x25519-dalek", @@ -3381,11 +3642,11 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f3813276d0708c8db0f500d8beda1bda9ad955723b9cb272c41f4727256f73c" +checksum = "dea10fc5209260915ea65b78f612d7ff78a29ab288e7aa3250796866af861c45" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", @@ -3396,13 +3657,13 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d58defcadb646ae4b033e130b48d87410bf76394dc3335496cae99dac803e61" +checksum = "0c8c37b4d2a075b4be8442760a5f8c037180f0c8dd5b5734b9978ab868b3aa11" dependencies = [ "asynchronous-codec 0.6.0", - "bytes 1.0.0", - "futures 0.3.12", + "bytes 1.0.1", + "futures 0.3.13", "libp2p-core", "log", "prost", @@ -3417,7 +3678,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ce3374f3b28162db9d3442c9347c4f14cb01e8290052615c7d341d40eae0599" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "log", "pin-project 1.0.5", "rand 0.7.3", @@ -3426,14 +3687,37 @@ dependencies = [ ] [[package]] -name = "libp2p-request-response" -version = "0.9.1" +name = "libp2p-relay" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10e5552827c33d8326502682da73a0ba4bfa40c1b55b216af3c303f32169dd89" +checksum = "3ff268be6a9d6f3c6cca3b81bbab597b15217f9ad8787c6c40fc548c1af7cd24" +dependencies = [ + "asynchronous-codec 0.6.0", + "bytes 1.0.1", + "futures 0.3.13", + "futures-timer 3.0.2", + "libp2p-core", + "libp2p-swarm", + "log", + "pin-project 1.0.5", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-request-response" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "725367dd2318c54c5ab1a6418592e5b01c63b0dedfbbfb8389220b2bcf691899" dependencies = [ "async-trait", - "bytes 1.0.0", - "futures 0.3.12", + "bytes 1.0.1", + "futures 0.3.13", "libp2p-core", "libp2p-swarm", "log", @@ -3447,12 +3731,12 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7955b973e1fd2bd61ffd43ce261c1223f61f4aacd5bae362a924993f9a25fd98" +checksum = "75c26980cadd7c25d89071cb23e1f7f5df4863128cc91d83c6ddc72338cecafa" dependencies = [ "either", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "log", "rand 0.7.3", @@ -3473,40 +3757,40 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a5aef80e519a6cb8e2663605142f97baaaea1a252eecbf8756184765f7471b" +checksum = "2b1a27d21c477951799e99d5c105d78868258502ce092988040a808d5a19bbd9" dependencies = [ "async-io", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "if-watch", "ipnet", "libc", "libp2p-core", "log", - "socket2", + "socket2 0.4.0", ] [[package]] name = "libp2p-uds" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80ac51ce419f60be966e02103c17f67ff5dc4422ba83ba54d251d6c62a4ed487" +checksum = "ffd6564bb3b7ff203661ccbb69003c2b551e34cef974f2d6c6a28306a12170b5" dependencies = [ "async-std", - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "log", ] [[package]] name = "libp2p-wasm-ext" -version = "0.27.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6149c46cb76935c80bc8be6ec6e3ebd5f5e1679765a255fb34331d54610f15dd" +checksum = "cef45d61e43c313531b5e903e4e8415212ff6338e0c54c47da5b9b412b5760de" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -3516,29 +3800,29 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3b1c6a3431045da8b925ed83384e4c5163e14b990572307fca9c507435d4d22" +checksum = "cace60995ef6f637e4752cccbb2590f6bc358e8741a0d066307636c69a4b3a74" dependencies = [ "either", - "futures 0.3.12", + "futures 0.3.13", "futures-rustls", "libp2p-core", "log", "quicksink", "rw-stream-sink", - "soketto 0.4.2", - "url 2.2.0", - "webpki-roots 0.21.0", + "soketto", + "url 2.2.1", + "webpki-roots", ] [[package]] name = "libp2p-yamux" -version = "0.30.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4819358c542a86ff95f6ae691efb4b94ddaf477079b01a686f5705b79bfc232a" +checksum = "96d6144cc94143fb0a8dd1e7c2fbcc32a2808168bcd1d69920635424d5993b7b" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "libp2p-core", "parking_lot 0.11.1", "thiserror", @@ -3580,15 +3864,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" dependencies = [ "cc", + "libc", "pkg-config", "vcpkg", ] [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "linked_hash_set" @@ -3638,25 +3923,21 @@ dependencies = [ ] [[package]] -name = "loom" -version = "0.3.6" +name = "lru" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" +checksum = "1f374d42cdfc1d7dbf3d3dec28afab2eb97ffbf43a3234d795b5986dbf4b90ba" dependencies = [ - "cfg-if 0.1.10", - "generator", - "scoped-tls", - "serde", - "serde_json", + "hashbrown", ] [[package]] -name = "lru" -version = "0.6.3" +name = "lru-cache" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3aae342b73d57ad0b8b364bd12584819f2c1fe9114285dfcf8b0722607671635" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" dependencies = [ - "hashbrown 0.9.1", + "linked-hash-map", ] [[package]] @@ -3674,6 +3955,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.0.1" @@ -3710,6 +3997,16 @@ version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "memmap2" version = "0.2.1" @@ -3744,7 +4041,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "814bbecfc0451fc314eeea34f05bbcd5b98a7ad7af37faee088b86a1e633f1d4" dependencies = [ "hash-db", - "hashbrown 0.9.1", + "hashbrown", "parity-util-mem", ] @@ -3756,9 +4053,9 @@ checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" [[package]] name = "merlin" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" dependencies = [ "byteorder", "keccak", @@ -3772,8 +4069,8 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-message-lane", - "futures 0.3.12", + "bp-messages", + "futures 0.3.13", "hex", "log", "parking_lot 0.11.1", @@ -3784,15 +4081,16 @@ dependencies = [ name = "millau-bridge-node" version = "0.1.0" dependencies = [ - "bp-message-lane", + "bp-messages", "bp-millau", "bp-runtime", "frame-benchmarking", "frame-benchmarking-cli", "jsonrpc-core 15.1.0", "millau-runtime", - "pallet-message-lane", - "pallet-message-lane-rpc", + "node-inspect", + "pallet-bridge-messages", + "pallet-transaction-payment-rpc", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -3804,6 +4102,7 @@ dependencies = [ "sc-keystore", "sc-rpc", "sc-service", + "sc-telemetry", "sc-transaction-pool", "sp-consensus", "sp-consensus-aura", @@ -3814,7 +4113,6 @@ dependencies = [ "structopt", "substrate-build-script-utils", "substrate-frame-rpc-system", - "vergen", ] [[package]] @@ -3822,10 +4120,11 @@ name = "millau-runtime" version = "0.1.0" dependencies = [ "bp-header-chain", - "bp-message-lane", + "bp-messages", "bp-millau", "bp-rialto", "bp-runtime", + "bp-westend", "bridge-runtime-common", "frame-executive", "frame-support", @@ -3834,18 +4133,18 @@ dependencies = [ "hex-literal 0.3.1", "pallet-aura", "pallet-balances", - "pallet-bridge-call-dispatch", - "pallet-finality-verifier", + "pallet-bridge-dispatch", + "pallet-bridge-grandpa", + "pallet-bridge-messages", "pallet-grandpa", - "pallet-message-lane", "pallet-randomness-collective-flip", "pallet-session", "pallet-shift-session-manager", - "pallet-substrate-bridge", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", - "parity-scale-codec", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-block-builder", @@ -3864,19 +4163,25 @@ dependencies = [ ] [[package]] -name = "minicbor" -version = "0.7.0" +name = "mime" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0164190d1771b1458c3742075b057ed55d25cd9dfb930aade99315a1eb1fe12d" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + +[[package]] +name = "minicbor" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea79ce4ab9f445ec6b71833a2290ac0a29c9dde0fa7cae4c481eecae021d9bd9" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e071b3159835ee91df62dbdbfdd7ec366b7ea77c838f43aff4acda6b61bcfb9" +checksum = "19ce18b5423c573a13e80cb3046ea0af6379ef725dc3af4886bdb8f4e5093068" dependencies = [ "proc-macro2", "quote", @@ -3885,9 +4190,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", "autocfg", @@ -3965,7 +4270,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "socket2", + "socket2 0.3.19", "winapi 0.3.9", ] @@ -3998,7 +4303,7 @@ dependencies = [ "digest 0.9.0", "generic-array 0.14.4", "multihash-derive", - "sha2 0.9.2", + "sha2 0.9.3", "sha3", "unsigned-varint 0.5.1", ] @@ -4009,7 +4314,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85ee3c48cb9d9b275ad967a0e96715badc13c6029adb92f34fa17b9ff28fd81f" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 0.1.5", "proc-macro-error", "proc-macro2", "quote", @@ -4025,16 +4330,16 @@ checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "multistream-select" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10ddc0eb0117736f19d556355464fc87efc8ad98b29e3fd84f02531eb6e90840" +checksum = "5df70763c86c98487451f307e1b68b4100da9076f4c12146905fc2054277f4e8" dependencies = [ - "bytes 1.0.0", - "futures 0.3.12", + "bytes 1.0.1", + "futures 0.3.13", "log", "pin-project 1.0.5", "smallvec 1.6.1", - "unsigned-varint 0.6.0", + "unsigned-varint 0.7.0", ] [[package]] @@ -4066,12 +4371,12 @@ dependencies = [ [[package]] name = "nb-connect" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8123a81538e457d44b933a02faf885d3fe8408806b23fa700e8f01c6c3a98998" +checksum = "670361df1bc2399ee1ff50406a0d422587dd3bb0da596e1978fe8e05dabddf4f" dependencies = [ "libc", - "winapi 0.3.9", + "socket2 0.3.19", ] [[package]] @@ -4085,6 +4390,23 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "node-inspect" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "derive_more", + "log", + "parity-scale-codec 2.0.1", + "sc-cli", + "sc-client-api", + "sc-service", + "sp-blockchain", + "sp-core", + "sp-runtime", + "structopt", +] + [[package]] name = "nodrop" version = "0.1.14" @@ -4137,6 +4459,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-format" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465" +dependencies = [ + "arrayvec 0.4.12", + "itoa", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -4181,9 +4513,9 @@ dependencies = [ [[package]] name = "object" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" dependencies = [ "crc32fast", "indexmap", @@ -4191,9 +4523,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.5.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "10acf907b94fc1b1a152d08ef97e7759650268cf986bf127f387e602b02c7e5a" dependencies = [ "parking_lot 0.11.1", ] @@ -4216,6 +4548,19 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +[[package]] +name = "openssl-sys" +version = "0.9.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "owning_ref" version = "0.4.1" @@ -4228,30 +4573,28 @@ dependencies = [ [[package]] name = "pallet-aura" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support", "frame-system", "pallet-session", "pallet-timestamp", - "parity-scale-codec", - "serde", + "parity-scale-codec 2.0.1", "sp-application-crypto", "sp-consensus-aura", "sp-runtime", "sp-std", - "sp-timestamp", ] [[package]] name = "pallet-authorship" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-authorship", "sp-inherents", "sp-runtime", @@ -4261,29 +4604,13 @@ dependencies = [ [[package]] name = "pallet-balances" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", - "serde", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bridge-call-dispatch" -version = "0.1.0" -dependencies = [ - "bp-message-dispatch", - "bp-runtime", - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", + "log", + "parity-scale-codec 2.0.1", "sp-runtime", "sp-std", ] @@ -4297,7 +4624,25 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "log", + "parity-scale-codec 2.0.1", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-bridge-dispatch" +version = "0.1.0" +dependencies = [ + "bp-message-dispatch", + "bp-runtime", + "frame-support", + "frame-system", + "log", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -4315,7 +4660,8 @@ dependencies = [ "frame-system", "hex-literal 0.3.1", "libsecp256k1", - "parity-scale-codec", + "log", + "parity-scale-codec 2.0.1", "serde", "sp-io", "sp-runtime", @@ -4323,18 +4669,45 @@ dependencies = [ ] [[package]] -name = "pallet-finality-verifier" +name = "pallet-bridge-grandpa" version = "0.1.0" dependencies = [ "bp-header-chain", "bp-runtime", "bp-test-utils", - "finality-grandpa", + "finality-grandpa 0.14.0", + "frame-benchmarking", "frame-support", "frame-system", - "pallet-substrate-bridge", - "parity-scale-codec", + "log", + "num-traits", + "parity-scale-codec 2.0.1", "serde", + "sp-finality-grandpa", + "sp-io", + "sp-runtime", + "sp-std", + "sp-trie", +] + +[[package]] +name = "pallet-bridge-messages" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-rialto", + "bp-runtime", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex", + "hex-literal 0.3.1", + "log", + "num-traits", + "pallet-balances", + "parity-scale-codec 2.0.1", + "serde", + "sp-core", "sp-io", "sp-runtime", "sp-std", @@ -4343,15 +4716,15 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", - "serde", + "parity-scale-codec 2.0.1", "sp-application-crypto", "sp-core", "sp-finality-grandpa", @@ -4361,55 +4734,14 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-message-lane" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-rialto", - "bp-runtime", - "frame-benchmarking", - "frame-support", - "frame-system", - "hex-literal 0.3.1", - "num-traits", - "pallet-balances", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-message-lane-rpc" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-runtime", - "derive_more", - "futures 0.3.12", - "jsonrpc-core 15.1.0", - "jsonrpc-core-client", - "jsonrpc-derive", - "log", - "sc-client-api", - "sp-blockchain", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-trie", -] - [[package]] name = "pallet-randomness-collective-flip" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "safe-mix", "sp-runtime", "sp-std", @@ -4418,14 +4750,13 @@ dependencies = [ [[package]] name = "pallet-session" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", "pallet-timestamp", - "parity-scale-codec", - "serde", + "parity-scale-codec 2.0.1", "sp-core", "sp-io", "sp-runtime", @@ -4442,7 +4773,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-runtime", @@ -4450,37 +4781,14 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-substrate-bridge" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-runtime", - "bp-test-utils", - "finality-grandpa", - "frame-support", - "frame-system", - "hash-db", - "parity-scale-codec", - "serde", - "sp-core", - "sp-finality-grandpa", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", -] - [[package]] name = "pallet-sudo" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", - "serde", + "parity-scale-codec 2.0.1", "sp-io", "sp-runtime", "sp-std", @@ -4489,14 +4797,14 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", - "serde", + "log", + "parity-scale-codec 2.0.1", "sp-inherents", "sp-runtime", "sp-std", @@ -4506,11 +4814,11 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "smallvec 1.6.1", "sp-core", @@ -4519,6 +4827,34 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-transaction-payment-rpc" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "jsonrpc-core 15.1.0", + "jsonrpc-core-client", + "jsonrpc-derive", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-rpc", + "sp-runtime", +] + +[[package]] +name = "pallet-transaction-payment-rpc-runtime-api" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-runtime", +] + [[package]] name = "parity-bytes" version = "0.1.2" @@ -4527,12 +4863,13 @@ checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67" [[package]] name = "parity-db" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111e193c96758d476d272093a853882668da17489f76bf4361b8decae0b6c515" +checksum = "495197c078e54b8735181aa35c00a327f7f3a3cc00a1ee8c95926dd010f0ec6b" dependencies = [ "blake2-rfc", "crc32fast", + "fs2", "hex", "libc", "log", @@ -4543,12 +4880,12 @@ dependencies = [ [[package]] name = "parity-multiaddr" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c6805f98667a3828afb2ec2c396a8d610497e8d546f5447188aae47c5a79ec" +checksum = "58341485071825827b7f03cf7efd1cb21e6a709bea778fb50227fd45d2f361b4" dependencies = [ "arrayref", - "bs58 0.4.0", + "bs58", "byteorder", "data-encoding", "multihash", @@ -4556,29 +4893,54 @@ dependencies = [ "serde", "static_assertions", "unsigned-varint 0.7.0", - "url 2.2.0", + "url 2.2.1", ] [[package]] name = "parity-scale-codec" -version = "2.0.0" +version = "1.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c823fdae1bb5ff5708ee61a62697e6296175dc671710876871c853f48592b3" +checksum = "a4b26b16c7687c3075982af47719e481815df30bc544f7a6690763a25ca16e9d" dependencies = [ "arrayvec 0.5.2", - "bitvec", - "byte-slice-cast", - "parity-scale-codec-derive", + "bitvec 0.17.4", + "byte-slice-cast 0.3.5", + "parity-scale-codec-derive 1.2.3", + "serde", +] + +[[package]] +name = "parity-scale-codec" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cd3dab59b5cf4bc81069ade0fc470341a1ef3ad5fa73e5a8943bed2ec12b2e8" +dependencies = [ + "arrayvec 0.5.2", + "bitvec 0.20.1", + "byte-slice-cast 1.0.0", + "parity-scale-codec-derive 2.0.1", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "2.0.0" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9029e65297c7fd6d7013f0579e193ec2b34ae78eabca854c9417504ad8a2d214" +checksum = "c41512944b1faff334a5f1b9447611bf4ef40638ccb6328173dacefb338e878c" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 0.1.5", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa04976a81fde04924b40cc4036c4d12841e8bb04325a5cf2ada75731a150a7d" +dependencies = [ + "proc-macro-crate 0.1.5", "proc-macro2", "quote", "syn", @@ -4597,7 +4959,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e57fea504fea33f9fbb5f49f378359030e7e026a6ab849bb9e8f0787376f1bf" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "libc", "log", "mio-named-pipes", @@ -4616,7 +4978,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "664a8c6b8e62d8f9f2f937e391982eb433ab285b4cd9545b342441e04a906e42" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.9.1", + "hashbrown", "impl-trait-for-tuples", "parity-util-mem-derive", "parking_lot 0.11.1", @@ -4657,7 +5019,7 @@ dependencies = [ "rand 0.7.3", "sha-1 0.8.2", "slab", - "url 2.2.0", + "url 2.2.1", ] [[package]] @@ -4695,7 +5057,7 @@ checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" dependencies = [ "instant", "lock_api 0.4.2", - "parking_lot_core 0.8.2", + "parking_lot_core 0.8.3", ] [[package]] @@ -4729,14 +5091,14 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall 0.1.57", + "redox_syscall 0.2.5", "smallvec 1.6.1", "winapi 0.3.9", ] @@ -4937,9 +5299,9 @@ dependencies = [ [[package]] name = "platforms" -version = "0.2.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e" +checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" [[package]] name = "polling" @@ -5003,6 +5365,16 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-crate" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" +dependencies = [ + "thiserror", + "toml", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -5035,15 +5407,15 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" dependencies = [ "unicode-xid", ] @@ -5068,7 +5440,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" dependencies = [ - "bytes 1.0.0", + "bytes 1.0.1", "prost-derive", ] @@ -5078,9 +5450,9 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" dependencies = [ - "bytes 1.0.0", + "bytes 1.0.1", "heck", - "itertools", + "itertools 0.9.0", "log", "multimap", "petgraph", @@ -5097,7 +5469,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" dependencies = [ "anyhow", - "itertools", + "itertools 0.9.0", "proc-macro2", "quote", "syn", @@ -5109,7 +5481,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" dependencies = [ - "bytes 1.0.0", + "bytes 1.0.1", "prost", ] @@ -5158,13 +5530,19 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" + [[package]] name = "radium" version = "0.6.2" @@ -5200,7 +5578,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -5261,7 +5639,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", ] [[package]] @@ -5270,7 +5648,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" dependencies = [ - "getrandom 0.2.1", + "getrandom 0.2.2", ] [[package]] @@ -5309,17 +5687,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "raw-cpuid" -version = "8.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fdf7d9dbd43f3d81d94a49c1c3df73cc2b3827995147e6cf7f89d4ec5483e73" -dependencies = [ - "bitflags", - "cc", - "rustc_version", -] - [[package]] name = "rawpointer" version = "0.2.1" @@ -5346,7 +5713,7 @@ checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ "crossbeam-channel", "crossbeam-deque 0.8.0", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", "lazy_static", "num_cpus", ] @@ -5368,9 +5735,9 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_syscall" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" dependencies = [ "bitflags", ] @@ -5381,7 +5748,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "redox_syscall 0.1.57", "rust-argon2", ] @@ -5392,24 +5759,24 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.2.1", - "redox_syscall 0.2.4", + "getrandom 0.2.2", + "redox_syscall 0.2.5", ] [[package]] name = "ref-cast" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e84b8a3c77dd38893c11b59284a40f304a1346d4da020e603fab3671727df95d" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d5173fc07aa6595363a38ca7d69d438cc32cca4216ccd1a3a8f2d4b10bbcd0" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" dependencies = [ "proc-macro2", "quote", @@ -5424,14 +5791,15 @@ checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" dependencies = [ "log", "rustc-hash", + "serde", "smallvec 1.6.1", ] [[package]] name = "regex" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" dependencies = [ "aho-corasick", "memchr", @@ -5451,9 +5819,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" [[package]] name = "region" @@ -5474,10 +5842,11 @@ dependencies = [ "bp-eth-poa", "headers-relay", "hex-literal 0.3.1", - "jsonrpsee", + "jsonrpsee-proc-macros", + "jsonrpsee-ws-client", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "relay-utils", "web3", ] @@ -5491,7 +5860,7 @@ dependencies = [ "frame-system", "headers-relay", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "relay-substrate-client", "relay-utils", "sp-core", @@ -5508,7 +5877,7 @@ dependencies = [ "headers-relay", "millau-runtime", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "relay-substrate-client", "relay-utils", "sp-core", @@ -5525,7 +5894,7 @@ dependencies = [ "frame-system", "headers-relay", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "relay-substrate-client", "relay-utils", "sp-core", @@ -5541,7 +5910,7 @@ dependencies = [ "frame-system", "headers-relay", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "relay-substrate-client", "relay-utils", "rialto-runtime", @@ -5550,29 +5919,51 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "relay-rococo-client" +version = "0.1.0" +dependencies = [ + "bp-rococo", + "frame-support", + "frame-system", + "headers-relay", + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "relay-substrate-client", + "relay-utils", + "sp-core", + "sp-keyring", + "sp-runtime", +] + [[package]] name = "relay-substrate-client" version = "0.1.0" dependencies = [ "async-std", "async-trait", - "bp-message-lane", + "bp-header-chain", + "bp-messages", "bp-runtime", + "finality-relay", "frame-support", "frame-system", - "futures 0.3.12", + "futures 0.3.13", "headers-relay", - "jsonrpsee", + "jsonrpsee-proc-macros", + "jsonrpsee-ws-client", "log", "num-traits", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "rand 0.7.3", "relay-utils", "sc-rpc-api", "sp-core", + "sp-finality-grandpa", "sp-runtime", "sp-std", + "sp-storage", "sp-trie", "sp-version", ] @@ -5586,14 +5977,34 @@ dependencies = [ "async-trait", "backoff", "env_logger 0.8.3", - "futures 0.3.12", + "futures 0.3.13", + "isahc", + "jsonpath_lib", "log", "num-traits", + "serde_json", "substrate-prometheus-endpoint", "sysinfo", "time 0.2.25", ] +[[package]] +name = "relay-westend-client" +version = "0.1.0" +dependencies = [ + "bp-westend", + "frame-support", + "frame-system", + "headers-relay", + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "relay-substrate-client", + "relay-utils", + "sp-core", + "sp-keyring", + "sp-runtime", +] + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -5603,6 +6014,16 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error 1.2.3", +] + [[package]] name = "retain_mut" version = "0.1.2" @@ -5613,14 +6034,15 @@ checksum = "53552c6c49e1e13f1a203ef0080ab3bbef0beb570a528993e83df057a9d9bba1" name = "rialto-bridge-node" version = "0.1.0" dependencies = [ - "bp-message-lane", + "bp-messages", "bp-rialto", "bp-runtime", "frame-benchmarking", "frame-benchmarking-cli", "jsonrpc-core 15.1.0", - "pallet-message-lane", - "pallet-message-lane-rpc", + "node-inspect", + "pallet-bridge-messages", + "pallet-transaction-payment-rpc", "rialto-runtime", "sc-basic-authorship", "sc-cli", @@ -5644,7 +6066,6 @@ dependencies = [ "structopt", "substrate-build-script-utils", "substrate-frame-rpc-system", - "vergen", ] [[package]] @@ -5655,7 +6076,7 @@ dependencies = [ "bp-eth-poa", "bp-header-chain", "bp-message-dispatch", - "bp-message-lane", + "bp-messages", "bp-millau", "bp-rialto", "bp-runtime", @@ -5667,22 +6088,23 @@ dependencies = [ "frame-system-rpc-runtime-api", "hex-literal 0.3.1", "libsecp256k1", + "log", "pallet-aura", "pallet-balances", - "pallet-bridge-call-dispatch", "pallet-bridge-currency-exchange", + "pallet-bridge-dispatch", "pallet-bridge-eth-poa", - "pallet-finality-verifier", + "pallet-bridge-grandpa", + "pallet-bridge-messages", "pallet-grandpa", - "pallet-message-lane", "pallet-randomness-collective-flip", "pallet-session", "pallet-shift-session-manager", - "pallet-substrate-bridge", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", - "parity-scale-codec", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-block-builder", @@ -5703,9 +6125,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.19" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -5716,22 +6138,13 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "rlp" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1190dcc8c3a512f1eef5d09bb8c84c7f39e1054e174d1795482e18f5272f2e73" -dependencies = [ - "rustc-hex", -] - [[package]] name = "rlp" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e54369147e3e7796c9b885c7304db87ca3d09a0a98f72843d532868675bbfba8" dependencies = [ - "bytes 1.0.0", + "bytes 1.0.1", "rustc-hex", ] @@ -5747,9 +6160,9 @@ dependencies = [ [[package]] name = "rpassword" -version = "5.0.0" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d755237fc0f99d98641540e66abac8bc46a0652f19148ac9e21de2da06b326c9" +checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" dependencies = [ "libc", "winapi 0.3.9", @@ -5764,7 +6177,7 @@ dependencies = [ "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.1", + "crossbeam-utils 0.8.3", ] [[package]] @@ -5794,19 +6207,6 @@ dependencies = [ "semver", ] -[[package]] -name = "rustls" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" -dependencies = [ - "base64 0.10.1", - "log", - "ring", - "sct", - "webpki", -] - [[package]] name = "rustls" version = "0.18.1" @@ -5817,7 +6217,7 @@ dependencies = [ "log", "ring", "sct", - "webpki", + "webpki 0.21.4", ] [[package]] @@ -5830,7 +6230,7 @@ dependencies = [ "log", "ring", "sct", - "webpki", + "webpki 0.21.4", ] [[package]] @@ -5845,13 +6245,23 @@ dependencies = [ "security-framework", ] +[[package]] +name = "ruzstd" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d425143485a37727c7a46e689bbe3b883a00f42b4a52c4ac0f44855c1009b00" +dependencies = [ + "byteorder", + "twox-hash", +] + [[package]] name = "rw-stream-sink" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "pin-project 0.4.27", "static_assertions", ] @@ -5883,12 +6293,12 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -5906,14 +6316,13 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-client-api", "sp-api", "sp-block-builder", "sp-blockchain", - "sp-consensus", "sp-core", "sp-inherents", "sp-runtime", @@ -5923,10 +6332,10 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-chain-spec-derive", "sc-consensus-babe", "sc-consensus-epochs", @@ -5944,9 +6353,9 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.0.0", "proc-macro2", "quote", "syn", @@ -5955,16 +6364,16 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "chrono", "fdlimit", - "futures 0.3.12", + "futures 0.3.13", "hex", "libp2p", "log", "names", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "rand 0.7.3", "regex", "rpassword", @@ -5987,22 +6396,22 @@ dependencies = [ "structopt", "thiserror", "tiny-bip39", - "tokio 0.2.24", + "tokio 0.2.25", ] [[package]] name = "sc-client-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "derive_more", "fnv", - "futures 0.3.12", + "futures 0.3.13", "hash-db", "kvdb", "lazy_static", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-executor", "sp-api", @@ -6027,7 +6436,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "blake2-rfc", "hash-db", @@ -6037,7 +6446,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parking_lot 0.11.1", "sc-client-api", @@ -6057,8 +6466,9 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ + "parking_lot 0.11.1", "sc-client-api", "sp-blockchain", "sp-consensus", @@ -6068,14 +6478,14 @@ dependencies = [ [[package]] name = "sc-consensus-aura" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ + "async-trait", "derive_more", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec", - "parking_lot 0.11.1", + "parity-scale-codec 2.0.1", "sc-block-builder", "sc-client-api", "sc-consensus-slots", @@ -6100,18 +6510,19 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ + "async-trait", "derive_more", "fork-tree", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "log", "merlin", "num-bigint", "num-rational", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "pdqselect", "rand 0.7.3", @@ -6146,12 +6557,12 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "fork-tree", - "parity-scale-codec", - "parking_lot 0.11.1", + "parity-scale-codec 2.0.1", "sc-client-api", + "sc-consensus", "sp-blockchain", "sp-runtime", ] @@ -6159,13 +6570,13 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "async-trait", + "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec", - "parking_lot 0.11.1", + "parity-scale-codec 2.0.1", "sc-client-api", "sc-telemetry", "sp-api", @@ -6178,6 +6589,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-timestamp", "sp-trie", "thiserror", ] @@ -6185,7 +6597,7 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "log", "sc-client-api", @@ -6199,13 +6611,13 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "derive_more", "lazy_static", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-wasm", "parking_lot 0.11.1", "sc-executor-common", @@ -6215,6 +6627,7 @@ dependencies = [ "sp-core", "sp-externalities", "sp-io", + "sp-maybe-compressed-blob", "sp-panic-handler", "sp-runtime-interface", "sp-serializer", @@ -6228,11 +6641,12 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "derive_more", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-wasm", + "pwasm-utils", "sp-allocator", "sp-core", "sp-serializer", @@ -6244,10 +6658,10 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-executor-common", "sp-allocator", "sp-core", @@ -6259,10 +6673,10 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-wasm", "pwasm-utils", "sc-executor-common", @@ -6277,17 +6691,18 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ + "async-trait", "derive_more", "dyn-clone", - "finality-grandpa", + "finality-grandpa 0.14.0", "fork-tree", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "pin-project 1.0.5", "rand 0.7.3", @@ -6316,17 +6731,17 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "derive_more", - "finality-grandpa", - "futures 0.3.12", + "finality-grandpa 0.14.0", + "futures 0.3.13", "jsonrpc-core 15.1.0", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-client-api", "sc-finality-grandpa", "sc-rpc", @@ -6340,10 +6755,10 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "ansi_term 0.12.1", - "futures 0.3.12", + "futures 0.3.13", "log", "parity-util-mem", "sc-client-api", @@ -6358,11 +6773,11 @@ dependencies = [ [[package]] name = "sc-keystore" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "async-trait", "derive_more", - "futures 0.3.12", + "futures 0.3.13", "futures-util", "hex", "merlin", @@ -6378,11 +6793,11 @@ dependencies = [ [[package]] name = "sc-light" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "hash-db", "lazy_static", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-client-api", "sc-executor", @@ -6397,21 +6812,21 @@ dependencies = [ [[package]] name = "sc-network" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "async-std", "async-trait", "asynchronous-codec 0.5.0", "bitflags", - "bs58 0.4.0", - "bytes 1.0.0", + "bs58", + "bytes 1.0.1", "cid", "derive_more", "either", "erased-serde", "fnv", "fork-tree", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "hex", "ip_network", @@ -6421,7 +6836,7 @@ dependencies = [ "log", "lru", "nohash-hasher", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "pin-project 1.0.5", "prost", @@ -6450,9 +6865,9 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "libp2p", "log", @@ -6460,23 +6875,25 @@ dependencies = [ "sc-network", "sp-runtime", "substrate-prometheus-endpoint", + "tracing", "wasm-timer", ] [[package]] name = "sc-offchain" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "bytes 0.5.6", "fnv", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", + "hex", "hyper 0.13.10", "hyper-rustls", "log", "num_cpus", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "rand 0.7.3", "sc-client-api", @@ -6493,9 +6910,9 @@ dependencies = [ [[package]] name = "sc-peerset" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "libp2p", "log", "serde_json", @@ -6506,7 +6923,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -6515,14 +6932,14 @@ dependencies = [ [[package]] name = "sc-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "hash-db", "jsonrpc-core 15.1.0", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", @@ -6549,16 +6966,16 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "derive_more", - "futures 0.3.12", + "futures 0.3.13", "jsonrpc-core 15.1.0", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "serde", "serde_json", @@ -6573,9 +6990,9 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "jsonrpc-core 15.1.0", "jsonrpc-http-server", "jsonrpc-ipc-server", @@ -6591,19 +7008,20 @@ dependencies = [ [[package]] name = "sc-service" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ + "async-trait", "directories", "exit-future", - "futures 0.1.30", - "futures 0.3.12", + "futures 0.1.31", + "futures 0.3.13", "futures-timer 3.0.2", "hash-db", "jsonrpc-core 15.1.0", "jsonrpc-pubsub", "lazy_static", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parking_lot 0.11.1", "pin-project 1.0.5", @@ -6654,10 +7072,10 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.11.1", @@ -6669,10 +7087,10 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "chrono", - "futures 0.3.12", + "futures 0.3.13", "libp2p", "log", "parking_lot 0.11.1", @@ -6680,10 +7098,8 @@ dependencies = [ "rand 0.7.3", "serde", "serde_json", - "sp-utils", "take_mut", - "tracing", - "tracing-subscriber", + "thiserror", "void", "wasm-timer", ] @@ -6691,7 +7107,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "ansi_term 0.12.1", "atty", @@ -6702,7 +7118,6 @@ dependencies = [ "parking_lot 0.11.1", "regex", "rustc-hash", - "sc-telemetry", "sc-tracing-proc-macro", "serde", "serde_json", @@ -6719,9 +7134,9 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.0.0", "proc-macro2", "quote", "syn", @@ -6730,10 +7145,10 @@ dependencies = [ [[package]] name = "sc-transaction-graph" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "derive_more", - "futures 0.3.12", + "futures 0.3.13", "linked-hash-map", "log", "parity-util-mem", @@ -6752,13 +7167,13 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "futures-diagnose", "intervalier", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parking_lot 0.11.1", "sc-client-api", @@ -6793,8 +7208,8 @@ checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" dependencies = [ "arrayref", "arrayvec 0.5.2", - "curve25519-dalek 2.1.0", - "getrandom 0.1.15", + "curve25519-dalek 2.1.2", + "getrandom 0.1.16", "merlin", "rand 0.7.3", "rand_core 0.5.1", @@ -6827,9 +7242,9 @@ dependencies = [ [[package]] name = "scroll_derive" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12bd20b94c7cdfda8c7ba9b92ad0d9a56e3fa018c25fca83b51aa664c9b4c0d" +checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" dependencies = [ "proc-macro2", "quote", @@ -6863,7 +7278,7 @@ checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" dependencies = [ "bitflags", "core-foundation", - "core-foundation-sys", + "core-foundation-sys 0.7.0", "libc", "security-framework-sys", ] @@ -6874,7 +7289,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", "libc", ] @@ -6895,9 +7310,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.123" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" +checksum = "bd761ff957cb2a45fbb9ab3da6512de9de55872866160b23c25f1a841e99d29f" dependencies = [ "serde_derive", ] @@ -6914,9 +7329,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.123" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" +checksum = "1800f7693e94e186f5e25a28291ae1570da908aff7d97a095dec1e56ff99069b" dependencies = [ "proc-macro2", "quote", @@ -6925,10 +7340,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.62" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea1c6153794552ea7cf7cf63b1231a25de00ec90db326ba6264440fa08e31486" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" dependencies = [ + "indexmap", "itoa", "ryu", "serde", @@ -6948,9 +7364,9 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3cdf1b5e620a498ee6f2a171885ac7e22f0e12089ec4b3d22b84921792507c" +checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", @@ -6979,9 +7395,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" +checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", @@ -7004,12 +7420,11 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127" +checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" dependencies = [ "lazy_static", - "loom", ] [[package]] @@ -7020,9 +7435,9 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook" -version = "0.1.17" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" +checksum = "8a7f3f92a1da3d6b1d32245d0cbcbbab0cfc45996d8df619c42bccfa6d2bbb5f" dependencies = [ "libc", "signal-hook-registry", @@ -7039,9 +7454,9 @@ dependencies = [ [[package]] name = "signature" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" +checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" [[package]] name = "simba" @@ -7061,6 +7476,17 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +[[package]] +name = "sluice" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fa0333a60ff2e3474a6775cc611840c2a55610c831dd366503474c02f1a28f5" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", +] + [[package]] name = "smallvec" version = "0.6.14" @@ -7089,7 +7515,7 @@ dependencies = [ "rand_core 0.5.1", "ring", "rustc_version", - "sha2 0.9.2", + "sha2 0.9.3", "subtle 2.4.0", "x25519-dalek", ] @@ -7106,22 +7532,13 @@ dependencies = [ ] [[package]] -name = "soketto" -version = "0.3.2" +name = "socket2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c9dab3f95c9ebdf3a88268c19af668f637a3c5039c2c56ff2d40b1b2d64a25b" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" dependencies = [ - "base64 0.11.0", - "bytes 0.5.6", - "futures 0.3.12", - "http 0.2.2", - "httparse", - "log", - "rand 0.7.3", - "sha1", - "smallvec 1.6.1", - "static_assertions", - "thiserror", + "libc", + "winapi 0.3.9", ] [[package]] @@ -7133,17 +7550,17 @@ dependencies = [ "base64 0.12.3", "bytes 0.5.6", "flate2", - "futures 0.3.12", + "futures 0.3.13", "httparse", "log", "rand 0.7.3", - "sha-1 0.9.2", + "sha-1 0.9.4", ] [[package]] name = "sp-allocator" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "log", "sp-core", @@ -7155,10 +7572,11 @@ dependencies = [ [[package]] name = "sp-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "hash-db", - "parity-scale-codec", + "log", + "parity-scale-codec 2.0.1", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -7171,10 +7589,10 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "blake2-rfc", - "proc-macro-crate", + "proc-macro-crate 1.0.0", "proc-macro2", "quote", "syn", @@ -7183,9 +7601,9 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-core", "sp-io", @@ -7195,22 +7613,23 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "integer-sqrt", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-debug-derive", "sp-std", + "static_assertions", ] [[package]] name = "sp-authorship" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-inherents", "sp-runtime", "sp-std", @@ -7219,9 +7638,9 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-inherents", "sp-runtime", @@ -7231,12 +7650,12 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "log", "lru", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sp-api", "sp-consensus", @@ -7249,7 +7668,7 @@ dependencies = [ [[package]] name = "sp-chain-spec" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "serde", "serde_json", @@ -7258,13 +7677,14 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "async-trait", + "futures 0.3.13", "futures-timer 3.0.2", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "serde", "sp-api", @@ -7284,11 +7704,12 @@ dependencies = [ [[package]] name = "sp-consensus-aura" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-application-crypto", + "sp-consensus", "sp-consensus-slots", "sp-inherents", "sp-runtime", @@ -7299,10 +7720,11 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "merlin", - "parity-scale-codec", + "parity-scale-codec 2.0.1", + "serde", "sp-api", "sp-application-crypto", "sp-consensus", @@ -7319,9 +7741,9 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-arithmetic", "sp-runtime", ] @@ -7329,9 +7751,9 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "schnorrkel", "sp-core", "sp-runtime", @@ -7341,14 +7763,14 @@ dependencies = [ [[package]] name = "sp-core" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "base58", "blake2-rfc", "byteorder", "dyn-clonable", "ed25519-dalek", - "futures 0.3.12", + "futures 0.3.13", "hash-db", "hash256-std-hasher", "hex", @@ -7358,7 +7780,7 @@ dependencies = [ "log", "merlin", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "parking_lot 0.11.1", "primitive-types", @@ -7367,7 +7789,7 @@ dependencies = [ "schnorrkel", "secrecy", "serde", - "sha2 0.9.2", + "sha2 0.9.3", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", @@ -7385,7 +7807,7 @@ dependencies = [ [[package]] name = "sp-database" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "kvdb", "parking_lot 0.11.1", @@ -7394,7 +7816,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "proc-macro2", "quote", @@ -7404,10 +7826,10 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-std", "sp-storage", ] @@ -7415,11 +7837,11 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "finality-grandpa", + "finality-grandpa 0.14.0", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-application-crypto", @@ -7432,9 +7854,9 @@ dependencies = [ [[package]] name = "sp-inherents" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sp-core", "sp-std", @@ -7444,13 +7866,13 @@ dependencies = [ [[package]] name = "sp-io" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "hash-db", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "sp-core", "sp-externalities", @@ -7468,7 +7890,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "lazy_static", "sp-core", @@ -7479,13 +7901,13 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "async-trait", "derive_more", - "futures 0.3.12", + "futures 0.3.13", "merlin", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "schnorrkel", "serde", @@ -7493,10 +7915,19 @@ dependencies = [ "sp-externalities", ] +[[package]] +name = "sp-maybe-compressed-blob" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "ruzstd", + "zstd", +] + [[package]] name = "sp-offchain" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "sp-api", "sp-core", @@ -7506,7 +7937,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "backtrace", ] @@ -7514,7 +7945,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "serde", "sp-core", @@ -7523,13 +7954,13 @@ dependencies = [ [[package]] name = "sp-runtime" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parity-util-mem", "paste 1.0.4", "rand 0.7.3", @@ -7544,10 +7975,10 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", @@ -7561,10 +7992,10 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "Inflector", - "proc-macro-crate", + "proc-macro-crate 1.0.0", "proc-macro2", "quote", "syn", @@ -7573,7 +8004,7 @@ dependencies = [ [[package]] name = "sp-serializer" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "serde", "serde_json", @@ -7582,9 +8013,9 @@ dependencies = [ [[package]] name = "sp-session" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-core", "sp-runtime", @@ -7595,9 +8026,9 @@ dependencies = [ [[package]] name = "sp-staking" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-runtime", "sp-std", ] @@ -7605,12 +8036,12 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "hash-db", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "parking_lot 0.11.1", "rand 0.7.3", "smallvec 1.6.1", @@ -7627,15 +8058,15 @@ dependencies = [ [[package]] name = "sp-std" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" [[package]] name = "sp-storage" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "ref-cast", "serde", "sp-debug-derive", @@ -7645,7 +8076,7 @@ dependencies = [ [[package]] name = "sp-tasks" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "log", "sp-core", @@ -7658,10 +8089,9 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-api", "sp-inherents", "sp-runtime", @@ -7672,10 +8102,10 @@ dependencies = [ [[package]] name = "sp-tracing" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-std", "tracing", "tracing-core", @@ -7685,12 +8115,12 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "derive_more", - "futures 0.3.12", + "futures 0.3.13", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-api", "sp-blockchain", @@ -7701,11 +8131,11 @@ dependencies = [ [[package]] name = "sp-trie" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "hash-db", "memory-db", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-core", "sp-std", "trie-db", @@ -7715,9 +8145,9 @@ dependencies = [ [[package]] name = "sp-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "futures-core", "futures-timer 3.0.2", "lazy_static", @@ -7727,10 +8157,10 @@ dependencies = [ [[package]] name = "sp-version" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "serde", "sp-runtime", "sp-std", @@ -7739,10 +8169,10 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sp-std", "wasmi", ] @@ -7753,6 +8183,15 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spinning_top" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bd0ab6b8c375d2d963503b90d3770010d95bc3b5f98036f948dee24bf4e8879" +dependencies = [ + "lock_api 0.4.2", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -7761,9 +8200,9 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "standback" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" +checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" dependencies = [ "version_check", ] @@ -7832,6 +8271,30 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" +[[package]] +name = "storage-proof-fuzzer" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-runtime", + "bp-test-utils", + "env_logger 0.8.3", + "finality-grandpa 0.12.3", + "frame-support", + "frame-system", + "hash-db", + "honggfuzz", + "log", + "parity-scale-codec 1.3.7", + "sp-core", + "sp-finality-grandpa", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-trie", +] + [[package]] name = "stream-cipher" version = "0.7.1" @@ -7917,9 +8380,9 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" -version = "2.0.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f14feab86fe31e7d0a485d53d7c1c634c426f7ae5b8ce4f705b2e49a35713fcb" +checksum = "bd540ba72520174c2c73ce96bf507eeba3cc8a481f58be92525b69110e1fa645" dependencies = [ "platforms", ] @@ -7927,15 +8390,15 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "frame-system-rpc-runtime-api", - "futures 0.3.12", + "futures 0.3.13", "jsonrpc-core 15.1.0", "jsonrpc-core-client", "jsonrpc-derive", "log", - "parity-scale-codec", + "parity-scale-codec 2.0.1", "sc-client-api", "sc-rpc-api", "serde", @@ -7950,7 +8413,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" dependencies = [ "async-std", "derive_more", @@ -7958,47 +8421,57 @@ dependencies = [ "hyper 0.13.10", "log", "prometheus", - "tokio 0.2.24", + "tokio 0.2.25", ] [[package]] name = "substrate-relay" version = "0.1.0" dependencies = [ + "anyhow", "async-std", "async-trait", "bp-header-chain", "bp-kusama", - "bp-message-lane", + "bp-messages", "bp-millau", "bp-polkadot", "bp-rialto", + "bp-rococo", "bp-runtime", + "bp-westend", "bridge-runtime-common", + "finality-grandpa 0.14.0", + "finality-relay", "frame-support", - "futures 0.3.12", + "futures 0.3.13", "headers-relay", "hex", + "hex-literal 0.3.1", "log", "messages-relay", "millau-runtime", + "num-format", "num-traits", - "pallet-bridge-call-dispatch", - "pallet-message-lane", - "pallet-substrate-bridge", - "parity-scale-codec", + "pallet-bridge-dispatch", + "pallet-bridge-messages", + "parity-scale-codec 2.0.1", "paste 1.0.4", "relay-kusama-client", "relay-millau-client", "relay-polkadot-client", "relay-rialto-client", + "relay-rococo-client", "relay-substrate-client", "relay-utils", + "relay-westend-client", "rialto-runtime", "sp-core", "sp-finality-grandpa", + "sp-keyring", "sp-runtime", "sp-trie", + "sp-version", "structopt", ] @@ -8022,9 +8495,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.60" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" +checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" dependencies = [ "proc-macro2", "quote", @@ -8045,11 +8518,13 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.15.3" +version = "0.15.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67330cbee3b2a819e3365a773f05e884a136603687f812bf24db5b6c3d76b696" +checksum = "de94457a09609f33fec5e7fceaf907488967c6c7c75d64da6a7ce6ffdb8b5abd" dependencies = [ - "cfg-if 0.1.10", + "cc", + "cfg-if 1.0.0", + "core-foundation-sys 0.8.2", "doc-comment", "libc", "ntapi", @@ -8066,26 +8541,26 @@ checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" [[package]] name = "tap" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36474e732d1affd3a6ed582781b3683df3d0563714c59c39591e8ff707cf078e" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee5a98e506fb7231a304c3a1bd7c132a55016cf65001e0282480665870dfcb9" +checksum = "422045212ea98508ae3d28025bc5aaa2bd4a9cdaecd442a08da2ee620ee9ea95" [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall 0.1.57", + "rand 0.8.3", + "redox_syscall 0.2.5", "remove_dir_all", "winapi 0.3.9", ] @@ -8110,18 +8585,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" +checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" +checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" dependencies = [ "proc-macro2", "quote", @@ -8130,11 +8605,11 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.0.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -8207,7 +8682,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.9.2", + "sha2 0.9.3", "thiserror", "unicode-normalization", "zeroize", @@ -8224,9 +8699,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" dependencies = [ "tinyvec_macros", ] @@ -8244,7 +8719,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "mio", "num_cpus", "tokio-codec", @@ -8263,9 +8738,9 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" +checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" dependencies = [ "bytes 0.5.6", "fnv", @@ -8291,7 +8766,7 @@ checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" dependencies = [ "bytes 0.4.12", "either", - "futures 0.1.30", + "futures 0.1.31", ] [[package]] @@ -8301,7 +8776,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "tokio-io", ] @@ -8311,7 +8786,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "tokio-executor", ] @@ -8322,7 +8797,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" dependencies = [ "crossbeam-utils 0.7.2", - "futures 0.1.30", + "futures 0.1.31", ] [[package]] @@ -8331,7 +8806,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "tokio-io", "tokio-threadpool", ] @@ -8343,7 +8818,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "log", ] @@ -8354,7 +8829,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d282d483052288b2308ba5ee795f5673b159c9bdf63c385a05609da782a5eae" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "mio", "mio-named-pipes", "tokio 0.1.22", @@ -8367,7 +8842,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" dependencies = [ "crossbeam-utils 0.7.2", - "futures 0.1.30", + "futures 0.1.31", "lazy_static", "log", "mio", @@ -8387,8 +8862,8 @@ checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" dependencies = [ "futures-core", "rustls 0.18.1", - "tokio 0.2.24", - "webpki", + "tokio 0.2.25", + "webpki 0.21.4", ] [[package]] @@ -8397,7 +8872,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", ] [[package]] @@ -8407,7 +8882,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" dependencies = [ "fnv", - "futures 0.1.30", + "futures 0.1.31", ] [[package]] @@ -8417,7 +8892,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "iovec", "mio", "tokio-io", @@ -8433,7 +8908,7 @@ dependencies = [ "crossbeam-deque 0.7.3", "crossbeam-queue", "crossbeam-utils 0.7.2", - "futures 0.1.30", + "futures 0.1.31", "lazy_static", "log", "num_cpus", @@ -8448,7 +8923,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" dependencies = [ "crossbeam-utils 0.7.2", - "futures 0.1.30", + "futures 0.1.31", "slab", "tokio-executor", ] @@ -8460,7 +8935,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "log", "mio", "tokio-codec", @@ -8475,7 +8950,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" dependencies = [ "bytes 0.4.12", - "futures 0.1.30", + "futures 0.1.31", "iovec", "libc", "log", @@ -8497,7 +8972,7 @@ dependencies = [ "futures-sink", "log", "pin-project-lite 0.1.11", - "tokio 0.2.24", + "tokio 0.2.25", ] [[package]] @@ -8511,15 +8986,15 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ "cfg-if 1.0.0", "log", @@ -8530,9 +9005,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" +checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07" dependencies = [ "proc-macro2", "quote", @@ -8550,19 +9025,19 @@ dependencies = [ [[package]] name = "tracing-futures" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 0.4.27", + "pin-project 1.0.5", "tracing", ] [[package]] name = "tracing-log" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" dependencies = [ "lazy_static", "log", @@ -8581,9 +9056,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" +checksum = "8ab8966ac3ca27126141f7999361cc97dd6fb4b71da04c02044fa9045d98bb96" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -8603,12 +9078,12 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc176c377eb24d652c9c69c832c832019011b6106182bf84276c66b66d5c9a6" +checksum = "ec051edf7f0fc9499a2cb0947652cab2148b9d7f61cee7605e312e9f970dacaf" dependencies = [ "hash-db", - "hashbrown 0.9.1", + "hashbrown", "log", "rustc-hex", "smallvec 1.6.1", @@ -8625,12 +9100,55 @@ dependencies = [ [[package]] name = "triehash" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f490aa7aa4e4d07edeba442c007e42e3e7f43aafb5112c5b047fff0b1aa5449c" +checksum = "a1631b201eb031b563d2e85ca18ec8092508e262a3196ce9bd10a67ec87b9f5c" dependencies = [ "hash-db", - "rlp 0.4.6", + "rlp", +] + +[[package]] +name = "trust-dns-proto" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d57e219ba600dd96c2f6d82eb79645068e14edbc5c7e27514af40436b88150c" +dependencies = [ + "async-trait", + "cfg-if 1.0.0", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.2.2", + "ipnet", + "lazy_static", + "log", + "rand 0.8.3", + "smallvec 1.6.1", + "thiserror", + "tinyvec", + "url 2.2.1", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0437eea3a6da51acc1e946545ff53d5b8fb2611ff1c3bed58522dde100536ae" +dependencies = [ + "cfg-if 1.0.0", + "futures-util", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "parking_lot 0.11.1", + "resolv-conf", + "smallvec 1.6.1", + "thiserror", + "trust-dns-proto", ] [[package]] @@ -8694,9 +9212,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] @@ -8742,7 +9260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2" dependencies = [ "asynchronous-codec 0.5.0", - "bytes 1.0.0", + "bytes 1.0.1", "futures-io", "futures-util", ] @@ -8754,7 +9272,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" dependencies = [ "asynchronous-codec 0.6.0", - "bytes 1.0.0", + "bytes 1.0.1", "futures-io", "futures-util", ] @@ -8778,12 +9296,12 @@ dependencies = [ [[package]] name = "url" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ "form_urlencoded", - "idna 0.2.0", + "idna 0.2.2", "matches", "percent-encoding 2.1.0", ] @@ -8815,16 +9333,6 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" -[[package]] -name = "vergen" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ce50d8996df1f85af15f2cd8d33daae6e479575123ef4314a51a70a230739cb" -dependencies = [ - "bitflags", - "chrono", -] - [[package]] name = "version_check" version = "0.9.2" @@ -8849,7 +9357,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ - "futures 0.1.30", + "futures 0.1.31", "log", "try-lock", ] @@ -8878,9 +9386,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -8888,9 +9396,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" dependencies = [ "bumpalo", "lazy_static", @@ -8903,9 +9411,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -8915,9 +9423,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8925,9 +9433,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" dependencies = [ "proc-macro2", "quote", @@ -8938,9 +9446,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" [[package]] name = "wasm-timer" @@ -8948,7 +9456,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "js-sys", "parking_lot 0.11.1", "pin-utils", @@ -8982,15 +9490,15 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.71.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89a30c99437829ede826802bfcf28500cf58df00e66cb9114df98813bc145ff1" +checksum = "755a9a4afe3f6cccbbe6d7e965eef44cf260b001f93e547eba84255c1d0187d8" [[package]] name = "wasmtime" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7426055cb92bd9a1e9469b48154d8d6119cd8c498c8b70284e420342c05dc45d" +checksum = "718cb52a9fdb7ab12471e9b9d051c9adfa6b5c504e0a1fea045e5eabc81eedd9" dependencies = [ "anyhow", "backtrace", @@ -9000,6 +9508,7 @@ dependencies = [ "indexmap", "libc", "log", + "paste 1.0.4", "region", "rustc-demangle", "serde", @@ -9008,6 +9517,7 @@ dependencies = [ "wasmparser", "wasmtime-cache", "wasmtime-environ", + "wasmtime-fiber", "wasmtime-jit", "wasmtime-profiling", "wasmtime-runtime", @@ -9017,9 +9527,9 @@ dependencies = [ [[package]] name = "wasmtime-cache" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c01d9287e36921e46f5887a47007824ae5dbb9b7517a2d565660ab4471478709" +checksum = "1f984df56c4adeba91540f9052db9f7a8b3b00cfaac1a023bee50a972f588b0c" dependencies = [ "anyhow", "base64 0.13.0", @@ -9030,7 +9540,7 @@ dependencies = [ "libc", "log", "serde", - "sha2 0.9.2", + "sha2 0.9.3", "toml", "winapi 0.3.9", "zstd", @@ -9038,22 +9548,23 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4134ed3a4316cd0de0e546c6004850afe472b0fa3fcdc2f2c15f8d449562d962" +checksum = "2a05abbf94e03c2c8ee02254b1949320c4d45093de5d9d6ed4d9351d536075c9" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", "cranelift-wasm", + "wasmparser", "wasmtime-environ", ] [[package]] name = "wasmtime-debug" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91fa931df6dd8af2b02606307674d3bad23f55473d5f4c809dddf7e4c4dc411" +checksum = "382eecd6281c6c1d1f3c904c3c143e671fc1a9573820cbfa777fba45ce2eda9c" dependencies = [ "anyhow", "gimli", @@ -9067,9 +9578,9 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1098871dc3120aaf8190d79153e470658bb79f63ee9ca31716711e123c28220" +checksum = "81011b2b833663d7e0ce34639459a0e301e000fc7331e0298b3a27c78d0cec60" dependencies = [ "anyhow", "cfg-if 1.0.0", @@ -9086,10 +9597,21 @@ dependencies = [ ] [[package]] -name = "wasmtime-jit" -version = "0.22.0" +name = "wasmtime-fiber" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738bfcd1561ede8bb174215776fd7d9a95d5f0a47ca3deabe0282c55f9a89f68" +checksum = "d92da32e31af2e3d828f485f5f24651ed4d3b7f03a46ea6555eae6940d1402cd" +dependencies = [ + "cc", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "wasmtime-jit" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b5f649623859a12d361fe4cc4793de44f7c3ff34c322c5714289787e89650bb" dependencies = [ "addr2line", "anyhow", @@ -9120,9 +9642,9 @@ dependencies = [ [[package]] name = "wasmtime-obj" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e96d77f1801131c5e86d93e42a3cf8a35402107332c202c245c83f34888a906" +checksum = "ef2e99cd9858f57fd062e9351e07881cedfc8597928385e02a48d9333b9e15a1" dependencies = [ "anyhow", "more-asserts", @@ -9134,9 +9656,9 @@ dependencies = [ [[package]] name = "wasmtime-profiling" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60bb672c9d894776d7b9250dd9b4fe890f8760201ee4f53e5f2da772b6c4debb" +checksum = "e46c0a590e49278ba7f79ef217af9db4ecc671b50042c185093e22d73524abb2" dependencies = [ "anyhow", "cfg-if 1.0.0", @@ -9153,9 +9675,9 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a978086740949eeedfefcee667b57a9e98d9a7fc0de382fcfa0da30369e3530d" +checksum = "1438a09185fc7ca067caf1a80d7e5b398eefd4fb7630d94841448ade60feb3d0" dependencies = [ "backtrace", "cc", @@ -9175,27 +9697,27 @@ dependencies = [ [[package]] name = "wast" -version = "30.0.0" +version = "35.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b79907b22f740634810e882d8d1d9d0f9563095a8ab94e786e370242bff5cd2" +checksum = "1a5800e9f86a1eae935e38bea11e60fd253f6d514d153fb39b3e5535a7b37b56" dependencies = [ "leb128", ] [[package]] name = "wat" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8279a02835bf12e61ed2b3c3cbc6ecf9918762fd97e036917c11a09ec20ca44" +checksum = "8ec280a739b69173e0ffd12c1658507996836ba4e992ed9bc1e5385a0bd72a02" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" dependencies = [ "js-sys", "wasm-bindgen", @@ -9210,14 +9732,14 @@ dependencies = [ "derive_more", "ethabi", "ethereum-types", - "futures 0.3.12", + "futures 0.3.13", "futures-timer 3.0.2", "hex", "jsonrpc-core 17.0.0", "log", "parking_lot 0.11.1", "pin-project 1.0.5", - "rlp 0.5.0", + "rlp", "serde", "serde_json", "tiny-keccak", @@ -9234,12 +9756,13 @@ dependencies = [ ] [[package]] -name = "webpki-roots" -version = "0.17.0" +name = "webpki" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a262ae37dd9d60f60dd473d1158f9fbebf110ba7b6a5051c8160460f6043718b" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ - "webpki", + "ring", + "untrusted", ] [[package]] @@ -9248,7 +9771,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" dependencies = [ - "webpki", + "webpki 0.21.4", ] [[package]] @@ -9279,6 +9802,12 @@ dependencies = [ "thiserror", ] +[[package]] +name = "widestring" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" + [[package]] name = "winapi" version = "0.2.8" @@ -9322,6 +9851,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -9344,7 +9882,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" dependencies = [ - "curve25519-dalek 3.0.0", + "curve25519-dalek 3.0.2", "rand_core 0.5.1", "zeroize", ] @@ -9361,7 +9899,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" dependencies = [ - "futures 0.3.12", + "futures 0.3.13", "log", "nohash-hasher", "parking_lot 0.11.1", @@ -9392,18 +9930,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.5.4+zstd.1.4.7" +version = "0.6.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" +checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" +version = "3.0.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" +checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" dependencies = [ "libc", "zstd-sys", @@ -9411,12 +9949,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" +version = "1.4.20+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" dependencies = [ "cc", - "glob", - "itertools", "libc", ] diff --git a/polkadot/bridges/Dockerfile b/polkadot/bridges/Dockerfile index a1ff908974..b3c4a7b4ba 100644 --- a/polkadot/bridges/Dockerfile +++ b/polkadot/bridges/Dockerfile @@ -8,33 +8,10 @@ # # See the `deployments/README.md` for all the available `PROJECT` values. -# This first stage prepares our dependencies to be built by `cargo-chef`. -FROM rust as planner -WORKDIR /parity-bridges-common -RUN cargo install cargo-chef --version 0.1.13 -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -# This second stage is where the dependencies actually get built. -# The reason we split it from the first stage is so that the `COPY . .` -# step doesn't blow our cache. -FROM paritytech/bridge-dependencies AS cacher -WORKDIR /parity-bridges-common -RUN cargo install cargo-chef --version 0.1.13 - -COPY --from=planner /parity-bridges-common/recipe.json recipe.json -RUN cargo chef cook --release --recipe-path recipe.json - -# In this third stage we go ahead and build the actual binary we want. -# This should be fairly quick since the dependencies are being built and -# cached in the previous stage. FROM paritytech/bridge-dependencies as builder WORKDIR /parity-bridges-common -RUN cargo install cargo-chef --version 0.1.13 COPY . . -COPY --from=cacher /parity-bridges-common/target target -COPY --from=cacher $CARGO_HOME $CARGO_HOME ARG PROJECT=ethereum-poa-relay RUN cargo build --release --verbose -p ${PROJECT} @@ -42,17 +19,23 @@ RUN strip ./target/release/${PROJECT} # In this final stage we copy over the final binary and do some checks # to make sure that everything looks good. -FROM ubuntu:xenial as runtime +FROM ubuntu:20.04 as runtime # show backtraces ENV RUST_BACKTRACE 1 +ENV DEBIAN_FRONTEND=noninteractive RUN set -eux; \ apt-get update && \ - apt-get install -y libssl-dev curl - -RUN groupadd -g 1000 user \ - && useradd -u 1000 -g user -s /bin/sh -m user + apt-get install -y curl ca-certificates && \ + apt-get install -y --no-install-recommends libssl-dev && \ + update-ca-certificates && \ + groupadd -g 1000 user && \ + useradd -u 1000 -g user -s /bin/sh -m user && \ + # apt clean up + apt-get autoremove -y && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* # switch to non-root user USER user @@ -69,3 +52,20 @@ RUN ./${PROJECT} --version ENV PROJECT=$PROJECT ENTRYPOINT ["/home/user/bridge-entrypoint.sh"] + +# metadata +ARG VCS_REF=master +ARG BUILD_DATE="" +ARG VERSION="" + +LABEL org.opencontainers.image.title="${PROJECT}" \ + org.opencontainers.image.description="${PROJECT} - component of Parity Bridges Common" \ + org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/Dockerfile" \ + org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/Dockerfile" \ + org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/README.md" \ + org.opencontainers.image.created="${BUILD_DATE}" \ + org.opencontainers.image.version="${VERSION}" \ + org.opencontainers.image.revision="${VCS_REF}" \ + org.opencontainers.image.authors="devops-team@parity.io" \ + org.opencontainers.image.vendor="Parity Technologies" \ + org.opencontainers.image.licenses="GPL-3.0 License" diff --git a/polkadot/bridges/README.md b/polkadot/bridges/README.md index eaa63ad14c..8f6446c887 100644 --- a/polkadot/bridges/README.md +++ b/polkadot/bridges/README.md @@ -11,6 +11,7 @@ Substrate chains or Ethereum Proof-of-Authority chains. 🚧 The bridges are currently under construction - a hardhat is recommended beyond this point 🚧 ## Contents + - [Installation](#installation) - [High-Level Architecture](#high-level-architecture) - [Project Layout](#project-layout) @@ -19,6 +20,7 @@ Substrate chains or Ethereum Proof-of-Authority chains. - [Community](#community) ## Installation + To get up and running you need both stable and nightly Rust. Rust nightly is used to build the Web Assembly (WASM) runtime for the node. You can configure the WASM support as so: @@ -70,6 +72,7 @@ Take a look at [Bridge High Level Documentation](./docs/high-level-overview.md) description of the bridge interaction. ## Project Layout + Here's an overview of how the project is laid out. The main bits are the `node`, which is the actual "blockchain", the `modules` which are used to build the blockchain's logic (a.k.a the runtime) and the `relays` which are used to pass messages between chains. @@ -83,15 +86,16 @@ the `relays` which are used to pass messages between chains. │ └── ... ├── modules // Substrate Runtime Modules (a.k.a Pallets) │ ├── ethereum // Ethereum PoA Header Sync Module -│ ├── substrate // Substrate Based Chain Header Sync Module -│ ├── message-lane // Cross Chain Message Passing +│ ├── grandpa // On-Chain GRANDPA Light Client +│ ├── messages // Cross Chain Message Passing +│ ├── dispatch // Target Chain Message Execution │ └── ... ├── primitives // Code shared between modules, runtimes, and relays │ └── ... ├── relays // Application for sending headers and messages between chains │ └── ... └── scripts // Useful development and maintenence scripts - ``` +``` ## Running the Bridge @@ -99,9 +103,9 @@ To run the Bridge you need to be able to connect the bridge relay node to the RP on each side of the bridge (source and target chain). There are 3 ways to run the bridge, described below: - - building & running from source, - - building or using Docker images for each individual component, - - running a Docker Compose setup (recommended). +- building & running from source, +- building or using Docker images for each individual component, +- running a Docker Compose setup (recommended). ### Using the Source @@ -154,20 +158,20 @@ Then we need to initialize and run the relayer: ```bash docker run --network=host -it \ - paritytech/substrate-relay initialize-rialto-headers-bridge-in-millau \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --millau-signer //Alice + paritytech/substrate-relay init-bridge RialtoToMillau \ + --target-host localhost \ + --target-port 9945 \ + --source-host localhost \ + --source-port 9944 \ + --target-signer //Alice docker run --network=host -it \ - paritytech/substrate-relay rialto-headers-to-millau \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --millau-signer //Bob \ + paritytech/substrate-relay relay-headers RialtoToMillau \ + --target-host localhost \ + --target-port 9945 \ + --source-host localhost \ + --source-port 9944 \ + --target-signer //Bob \ ``` You should now see the relayer submitting headers from the Millau chain to the Rialto chain. @@ -196,6 +200,7 @@ monitoring dashboards, etc. see the [Deployments README](./deployments/README.md A straightforward way to interact with and test the bridge is sending messages. This is explained in the [send message](./docs/send-message.md) document. + ## Community Main hangout for the community is [Element](https://element.io/) (formerly Riot). Element is a chat @@ -208,4 +213,3 @@ Element channel. The [Substrate Technical](https://app.element.io/#/room/#substrate-technical:matrix.org) Element channel is most suited for discussions regarding Substrate itself. - diff --git a/polkadot/bridges/bin/millau/node/Cargo.toml b/polkadot/bridges/bin/millau/node/Cargo.toml index 6f7aa94e6f..e31e2c871a 100644 --- a/polkadot/bridges/bin/millau/node/Cargo.toml +++ b/polkadot/bridges/bin/millau/node/Cargo.toml @@ -15,17 +15,18 @@ structopt = "0.3.21" # Bridge dependencies -bp-message-lane = { path = "../../../primitives/message-lane" } -bp-millau= { path = "../../../primitives/millau" } +bp-messages = { path = "../../../primitives/messages" } +bp-millau= { path = "../../../primitives/chain-millau" } bp-runtime = { path = "../../../primitives/runtime" } millau-runtime = { path = "../runtime" } -pallet-message-lane = { path = "../../../modules/message-lane" } -pallet-message-lane-rpc = { path = "../../../modules/message-lane/rpc" } +pallet-bridge-messages = { path = "../../../modules/messages" } # Substrate Dependencies frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } +node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -35,21 +36,21 @@ sc-executor = { git = "https://github.com/paritytech/substrate", branch = "maste sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } [build-dependencies] -build-script-utils = { package = "substrate-build-script-utils", version = "2.0" } +substrate-build-script-utils = "3.0.0" frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -vergen = "3.1.0" [features] default = [] diff --git a/polkadot/bridges/bin/millau/node/build.rs b/polkadot/bridges/bin/millau/node/build.rs index e9a10ff8ad..d9b50049e2 100644 --- a/polkadot/bridges/bin/millau/node/build.rs +++ b/polkadot/bridges/bin/millau/node/build.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,12 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use vergen::{generate_cargo_keys, ConstantsFlags}; - -const ERROR_MSG: &str = "Failed to generate metadata files"; +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; fn main() { - generate_cargo_keys(ConstantsFlags::SHA_SHORT).expect(ERROR_MSG); + generate_cargo_keys(); - build_script_utils::rerun_if_git_head_changed(); + rerun_if_git_head_changed(); } diff --git a/polkadot/bridges/bin/millau/node/src/chain_spec.rs b/polkadot/bridges/bin/millau/node/src/chain_spec.rs index 8e9aded9f1..f9e9502da7 100644 --- a/polkadot/bridges/bin/millau/node/src/chain_spec.rs +++ b/polkadot/bridges/bin/millau/node/src/chain_spec.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ use bp_millau::derive_account_from_rialto_id; use millau_runtime::{ - AccountId, AuraConfig, BalancesConfig, BridgeRialtoConfig, GenesisConfig, GrandpaConfig, SessionConfig, + AccountId, AuraConfig, BalancesConfig, BridgeWestendGrandpaConfig, GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, }; use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -122,7 +122,10 @@ impl Alternative { get_account_id_from_seed::("Ferdie//stash"), get_account_id_from_seed::("George//stash"), get_account_id_from_seed::("Harry//stash"), - pallet_message_lane::Module::::relayer_fund_account_id(), + pallet_bridge_messages::Pallet::< + millau_runtime::Runtime, + pallet_bridge_messages::DefaultInstance, + >::relayer_fund_account_id(), derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Dave"), )), @@ -151,31 +154,33 @@ fn testnet_genesis( _enable_println: bool, ) -> GenesisConfig { GenesisConfig { - frame_system: Some(SystemConfig { + frame_system: SystemConfig { code: WASM_BINARY.to_vec(), changes_trie_config: Default::default(), - }), - pallet_balances: Some(BalancesConfig { + }, + pallet_balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), - }), - pallet_aura: Some(AuraConfig { + }, + pallet_aura: AuraConfig { authorities: Vec::new(), - }), - pallet_grandpa: Some(GrandpaConfig { + }, + pallet_grandpa: GrandpaConfig { authorities: Vec::new(), - }), - pallet_substrate_bridge: Some(BridgeRialtoConfig { - // We'll initialize the pallet with a dispatchable instead. - init_data: None, - owner: Some(root_key.clone()), - }), - pallet_sudo: Some(SudoConfig { key: root_key }), - pallet_session: Some(SessionConfig { + }, + pallet_sudo: SudoConfig { key: root_key }, + pallet_session: SessionConfig { keys: initial_authorities .iter() .map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone()))) .collect::>(), - }), + }, + pallet_bridge_grandpa_Instance1: BridgeWestendGrandpaConfig { + // for our deployments to avoid multiple same-nonces transactions: + // //Alice is already used to initialize Rialto<->Millau bridge + // => let's use //George to initialize Westend->Millau bridge + owner: Some(get_account_id_from_seed::("George")), + ..Default::default() + }, } } diff --git a/polkadot/bridges/bin/millau/node/src/cli.rs b/polkadot/bridges/bin/millau/node/src/cli.rs index 1149c4f910..46323ed25c 100644 --- a/polkadot/bridges/bin/millau/node/src/cli.rs +++ b/polkadot/bridges/bin/millau/node/src/cli.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -31,6 +31,7 @@ pub struct Cli { pub enum Subcommand { /// Key management cli utilities Key(sc_cli::KeySubcommand), + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. Verify(sc_cli::VerifyCmd), @@ -61,7 +62,9 @@ pub enum Subcommand { /// Revert the chain to a previous state. Revert(sc_cli::RevertCmd), - /// The custom benchmark subcommmand benchmarking runtime pallets. - #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] + /// Inspect blocks or extrinsics. + Inspect(node_inspect::cli::InspectCmd), + + /// Benchmark runtime pallets. Benchmark(frame_benchmarking_cli::BenchmarkCmd), } diff --git a/polkadot/bridges/bin/millau/node/src/command.rs b/polkadot/bridges/bin/millau/node/src/command.rs index 8751a4516d..d73f9b1ac9 100644 --- a/polkadot/bridges/bin/millau/node/src/command.rs +++ b/polkadot/bridges/bin/millau/node/src/command.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -17,7 +17,7 @@ use crate::cli::{Cli, Subcommand}; use crate::service; use crate::service::new_partial; -use millau_runtime::Block; +use millau_runtime::{Block, RuntimeApi}; use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; @@ -154,6 +154,10 @@ pub fn run() -> sc_cli::Result<()> { Ok((cmd.run(client, backend), task_manager)) }) } + Some(Subcommand::Inspect(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run::(config)) + } None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { diff --git a/polkadot/bridges/bin/millau/node/src/lib.rs b/polkadot/bridges/bin/millau/node/src/lib.rs index fdecc0b45f..382d1c2d7f 100644 --- a/polkadot/bridges/bin/millau/node/src/lib.rs +++ b/polkadot/bridges/bin/millau/node/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/bin/millau/node/src/main.rs b/polkadot/bridges/bin/millau/node/src/main.rs index 07ec88727d..cf6dd9f733 100644 --- a/polkadot/bridges/bin/millau/node/src/main.rs +++ b/polkadot/bridges/bin/millau/node/src/main.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/bin/millau/node/src/service.rs b/polkadot/bridges/bin/millau/node/src/service.rs index 2f72e5717f..8677ec2e70 100644 --- a/polkadot/bridges/bin/millau/node/src/service.rs +++ b/polkadot/bridges/bin/millau/node/src/service.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -30,11 +30,13 @@ use millau_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use sp_inherents::InherentDataProviders; use std::sync::Arc; @@ -70,6 +72,7 @@ pub fn new_partial( AuraPair, >, sc_finality_grandpa::LinkHalf, + Option, ), >, ServiceError, @@ -77,12 +80,30 @@ pub fn new_partial( if config.keystore_remote.is_some() { return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())); } - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); + let inherent_data_providers = InherentDataProviders::new(); - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(&config)?; + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + )?; let client = Arc::new(client); + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = sc_transaction_pool::BasicPool::new_full( @@ -93,22 +114,28 @@ pub fn new_partial( client.clone(), ); - let (grandpa_block_import, grandpa_link) = - sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone())?; + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), + )?; let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import.clone(), - Some(Box::new(grandpa_block_import)), - client.clone(), - inherent_data_providers.clone(), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), - )?; + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: aura_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import)), + client: client.clone(), + inherent_data_providers: inherent_data_providers.clone(), + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; Ok(sc_service::PartialComponents { client, @@ -119,7 +146,7 @@ pub fn new_partial( select_chain, transaction_pool, inherent_data_providers, - other: (aura_block_import, grandpa_link), + other: (aura_block_import, grandpa_link, telemetry), }) } @@ -141,7 +168,7 @@ pub fn new_full(mut config: Configuration) -> Result select_chain, transaction_pool, inherent_data_providers, - other: (block_import, grandpa_link), + other: (block_import, grandpa_link, mut telemetry), } = new_partial(&config)?; if let Some(url) = &config.keystore_remote { @@ -173,13 +200,7 @@ pub fn new_full(mut config: Configuration) -> Result })?; if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - backend.clone(), - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); + sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); } let role = config.role.clone(); @@ -190,40 +211,9 @@ pub fn new_full(mut config: Configuration) -> Result let prometheus_registry = config.prometheus_registry().cloned(); let rpc_extensions_builder = { - use bp_message_lane::{LaneId, MessageNonce}; - use bp_runtime::{InstanceId, RIALTO_BRIDGE_INSTANCE}; use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; - use sp_core::storage::StorageKey; - // This struct is here to ease update process. - - /// Millau runtime from message-lane RPC point of view. - struct MillauMessageLaneKeys; - - impl pallet_message_lane_rpc::Runtime for MillauMessageLaneKeys { - fn message_key(&self, instance: &InstanceId, lane: &LaneId, nonce: MessageNonce) -> Option { - match *instance { - RIALTO_BRIDGE_INSTANCE => Some(millau_runtime::rialto_messages::message_key(lane, nonce)), - _ => None, - } - } - - fn outbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option { - match *instance { - RIALTO_BRIDGE_INSTANCE => Some(millau_runtime::rialto_messages::outbound_lane_data_key(lane)), - _ => None, - } - } - - fn inbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option { - match *instance { - RIALTO_BRIDGE_INSTANCE => Some(millau_runtime::rialto_messages::inbound_lane_data_key(lane)), - _ => None, - } - } - } - - use pallet_message_lane_rpc::{MessageLaneApi, MessageLaneRpcHandler}; + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler}; use sc_rpc::DenyUnsafe; use substrate_frame_rpc_system::{FullSystem, SystemApi}; @@ -237,7 +227,7 @@ pub fn new_full(mut config: Configuration) -> Result let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), Some(shared_authority_set.clone())); + GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone())); Box::new(move |_, subscription_executor| { let mut io = jsonrpc_core::IoHandler::default(); @@ -246,6 +236,9 @@ pub fn new_full(mut config: Configuration) -> Result pool.clone(), DenyUnsafe::No, ))); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( + client.clone(), + ))); io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( shared_authority_set.clone(), shared_voter_state.clone(), @@ -253,16 +246,11 @@ pub fn new_full(mut config: Configuration) -> Result subscription_executor, finality_proof_provider.clone(), ))); - io.extend_with(MessageLaneApi::to_delegate(MessageLaneRpcHandler::new( - backend.clone(), - Arc::new(MillauMessageLaneKeys), - ))); - io }) }; - let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { network: network.clone(), client: client.clone(), keystore: keystore_container.sync_keystore(), @@ -275,32 +263,35 @@ pub fn new_full(mut config: Configuration) -> Result network_status_sinks, system_rpc_tx, config, - telemetry_span: None, + telemetry: telemetry.as_mut(), })?; if role.is_authority() { - let proposer = sc_basic_authorship::ProposerFactory::new( + let proposer_factory = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), ); let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - client.clone(), + let aura = sc_consensus_aura::start_aura::(StartAuraParams { + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + client: client.clone(), select_chain, block_import, - proposer, - network.clone(), + proposer_factory, inherent_data_providers, force_authoring, backoff_authoring_blocks, - keystore_container.sync_keystore(), + keystore: keystore_container.sync_keystore(), can_author_with, - )?; + sync_oracle: network.clone(), + block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; // the AURA authoring task is considered essential, i.e. if it // fails we take down the service with it. @@ -323,6 +314,7 @@ pub fn new_full(mut config: Configuration) -> Result observer_enabled: false, keystore, is_authority: role.is_authority(), + telemetry: telemetry.as_ref().map(|x| x.handle()), }; if enable_grandpa { @@ -336,10 +328,10 @@ pub fn new_full(mut config: Configuration) -> Result config: grandpa_config, link: grandpa_link, network, - telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state: SharedVoterState::empty(), + telemetry: telemetry.as_ref().map(|x| x.handle()), }; // the GRANDPA voter task is considered infallible, i.e. @@ -355,8 +347,27 @@ pub fn new_full(mut config: Configuration) -> Result /// Builds a new service for a light client. pub fn new_light(mut config: Configuration) -> Result { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::(&config)?; + sc_service::new_light_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + )?; + + let mut telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); config .network @@ -373,22 +384,28 @@ pub fn new_light(mut config: Configuration) -> Result on_demand.clone(), )); - let (grandpa_block_import, _) = - sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; + let (grandpa_block_import, _) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain, + telemetry.as_ref().map(|x| x.handle()), + )?; let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import, - Some(Box::new(grandpa_block_import)), - client.clone(), - InherentDataProviders::new(), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - sp_consensus::NeverCanAuthor, - )?; + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: aura_block_import, + justification_import: Some(Box::new(grandpa_block_import)), + client: client.clone(), + inherent_data_providers: InherentDataProviders::new(), + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::NeverCanAuthor, + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -402,13 +419,7 @@ pub fn new_light(mut config: Configuration) -> Result })?; if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - backend.clone(), - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); + sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); } sc_service::spawn_tasks(sc_service::SpawnTasksParams { @@ -424,7 +435,7 @@ pub fn new_light(mut config: Configuration) -> Result network, network_status_sinks, system_rpc_tx, - telemetry_span: None, + telemetry: telemetry.as_mut(), })?; network_starter.start_network(); diff --git a/polkadot/bridges/bin/millau/runtime/Cargo.toml b/polkadot/bridges/bin/millau/runtime/Cargo.toml index d163661284..e1f7ed10c6 100644 --- a/polkadot/bridges/bin/millau/runtime/Cargo.toml +++ b/polkadot/bridges/bin/millau/runtime/Cargo.toml @@ -10,21 +10,21 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } hex-literal = "0.3" -serde = { version = "1.0.123", optional = true, features = ["derive"] } +serde = { version = "1.0.124", optional = true, features = ["derive"] } # Bridge dependencies bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } -bp-message-lane = { path = "../../../primitives/message-lane", default-features = false } -bp-millau = { path = "../../../primitives/millau", default-features = false } -bp-rialto = { path = "../../../primitives/rialto", default-features = false } +bp-messages = { path = "../../../primitives/messages", default-features = false } +bp-millau = { path = "../../../primitives/chain-millau", default-features = false } +bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } bp-runtime = { path = "../../../primitives/runtime", default-features = false } +bp-westend = { path = "../../../primitives/chain-westend", default-features = false } bridge-runtime-common = { path = "../../runtime-common", default-features = false } -pallet-bridge-call-dispatch = { path = "../../../modules/call-dispatch", default-features = false } -pallet-finality-verifier = { path = "../../../modules/finality-verifier", default-features = false } -pallet-message-lane = { path = "../../../modules/message-lane", default-features = false } +pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } +pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } +pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } -pallet-substrate-bridge = { path = "../../../modules/substrate", default-features = false } # Substrate Dependencies @@ -40,12 +40,13 @@ pallet-session = { git = "https://github.com/paritytech/substrate", branch = "ma pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } @@ -61,36 +62,37 @@ wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2. default = ["std"] std = [ "bp-header-chain/std", - "bp-message-lane/std", + "bp-messages/std", "bp-millau/std", "bp-rialto/std", "bp-runtime/std", + "bp-westend/std", "bridge-runtime-common/std", "codec/std", "frame-executive/std", "frame-support/std", - "frame-system/std", "frame-system-rpc-runtime-api/std", + "frame-system/std", "pallet-aura/std", "pallet-balances/std", - "pallet-bridge-call-dispatch/std", - "pallet-finality-verifier/std", + "pallet-bridge-dispatch/std", + "pallet-bridge-grandpa/std", + "pallet-bridge-messages/std", "pallet-grandpa/std", - "pallet-message-lane/std", "pallet-randomness-collective-flip/std", - "pallet-shift-session-manager/std", "pallet-session/std", - "pallet-substrate-bridge/std", + "pallet-shift-session-manager/std", "pallet-sudo/std", "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", "serde", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", - "sp-inherents/std", "sp-finality-grandpa/std", + "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", @@ -99,3 +101,6 @@ std = [ "sp-trie/std", "sp-version/std", ] +# TODO: https://github.com/paritytech/parity-bridges-common/issues/390 +# I've left the feature flag here to test our CI configuration +runtime-benchmarks = [] diff --git a/polkadot/bridges/bin/millau/runtime/build.rs b/polkadot/bridges/bin/millau/runtime/build.rs index 4fda040c9b..dcb5cb0621 100644 --- a/polkadot/bridges/bin/millau/runtime/build.rs +++ b/polkadot/bridges/bin/millau/runtime/build.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/bin/millau/runtime/src/lib.rs b/polkadot/bridges/bin/millau/runtime/src/lib.rs index 491359fc97..30cf1bd87c 100644 --- a/polkadot/bridges/bin/millau/runtime/src/lib.rs +++ b/polkadot/bridges/bin/millau/runtime/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -37,6 +37,7 @@ use crate::rialto_messages::{ToRialtoMessagePayload, WithRialtoMessageBridge}; use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge}; use codec::Decode; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; @@ -61,8 +62,9 @@ pub use frame_support::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_message_lane::Call as MessageLaneCall; -pub use pallet_substrate_bridge::Call as BridgeRialtoCall; +pub use pallet_bridge_grandpa::Call as BridgeGrandpaRialtoCall; +pub use pallet_bridge_grandpa::Call as BridgeGrandpaWestendCall; +pub use pallet_bridge_messages::Call as MessagesCall; pub use pallet_sudo::Call as SudoCall; pub use pallet_timestamp::Call as TimestampCall; @@ -199,15 +201,16 @@ impl frame_system::Config for Runtime { type DbWeight = DbWeight; /// The designated SS58 prefix of this chain. type SS58Prefix = SS58Prefix; + /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); } impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; } -impl pallet_bridge_call_dispatch::Config for Runtime { +impl pallet_bridge_dispatch::Config for Runtime { type Event = Event; - type MessageId = (bp_message_lane::LaneId, bp_message_lane::MessageNonce); + type MessageId = (bp_messages::LaneId, bp_messages::MessageNonce); type Call = Call; type CallFilter = (); type EncodedCall = crate::rialto_messages::FromRialtoEncodedCall; @@ -291,7 +294,7 @@ impl pallet_session::Config for Runtime { type ValidatorIdOf = (); type ShouldEndSession = pallet_session::PeriodicSessions; type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = pallet_shift_session_manager::Module; + type SessionManager = pallet_shift_session_manager::Pallet; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = (); @@ -299,33 +302,48 @@ impl pallet_session::Config for Runtime { type WeightInfo = (); } -impl pallet_substrate_bridge::Config for Runtime { - type BridgedChain = bp_rialto::Rialto; -} - parameter_types! { // This is a pretty unscientific cap. // // Note that once this is hit the pallet will essentially throttle incoming requests down to one // call per block. pub const MaxRequests: u32 = 50; + pub const WestendValidatorCount: u32 = 255; + + // Number of headers to keep. + // + // Assuming the worst case of every header being finalized, we will keep headers for at least a + // week. + pub const HeadersToKeep: u32 = 7 * bp_millau::DAYS as u32; } -impl pallet_finality_verifier::Config for Runtime { +pub type RialtoGrandpaInstance = (); +impl pallet_bridge_grandpa::Config for Runtime { type BridgedChain = bp_rialto::Rialto; - type HeaderChain = pallet_substrate_bridge::Module; - type AncestryProof = Vec; - type AncestryChecker = bp_header_chain::LinearAncestryChecker; type MaxRequests = MaxRequests; + type HeadersToKeep = HeadersToKeep; + + // TODO [#391]: Use weights generated for the Millau runtime instead of Rialto ones. + type WeightInfo = pallet_bridge_grandpa::weights::RialtoWeight; +} + +pub type WestendGrandpaInstance = pallet_bridge_grandpa::Instance1; +impl pallet_bridge_grandpa::Config for Runtime { + type BridgedChain = bp_westend::Westend; + type MaxRequests = MaxRequests; + type HeadersToKeep = HeadersToKeep; + + // TODO [#391]: Use weights generated for the Millau runtime instead of Rialto ones. + type WeightInfo = pallet_bridge_grandpa::weights::RialtoWeight; } impl pallet_shift_session_manager::Config for Runtime {} parameter_types! { - pub const MaxMessagesToPruneAtOnce: bp_message_lane::MessageNonce = 8; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_message_lane::MessageNonce = + pub const MaxMessagesToPruneAtOnce: bp_messages::MessageNonce = 8; + pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_message_lane::MessageNonce = + pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE; // `IdentityFee` is used by Millau => we may use weight directly pub const GetDeliveryConfirmationTransactionFee: Balance = @@ -333,11 +351,14 @@ parameter_types! { pub const RootAccountForPayments: Option = None; } -impl pallet_message_lane::Config for Runtime { +/// Instance of the messages pallet used to relay messages to/from Rialto chain. +pub type WithRialtoMessagesInstance = pallet_bridge_messages::DefaultInstance; + +impl pallet_bridge_messages::Config for Runtime { type Event = Event; // TODO: https://github.com/paritytech/parity-bridges-common/issues/390 - type WeightInfo = pallet_message_lane::weights::RialtoWeight; - type Parameter = rialto_messages::MillauToRialtoMessageLaneParameter; + type WeightInfo = pallet_bridge_messages::weights::RialtoWeight; + type Parameter = rialto_messages::MillauToRialtoMessagesParameter; type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; @@ -353,7 +374,7 @@ impl pallet_message_lane::Config for Runtime { type TargetHeaderChain = crate::rialto_messages::Rialto; type LaneMessageVerifier = crate::rialto_messages::ToRialtoMessageVerifier; - type MessageDeliveryAndDispatchPayment = pallet_message_lane::instant_payments::InstantCurrencyPayments< + type MessageDeliveryAndDispatchPayment = pallet_bridge_messages::instant_payments::InstantCurrencyPayments< Runtime, pallet_balances::Pallet, GetDeliveryConfirmationTransactionFee, @@ -370,10 +391,10 @@ construct_runtime!( NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { - BridgeRialto: pallet_substrate_bridge::{Pallet, Call, Storage, Config}, - BridgeRialtoMessageLane: pallet_message_lane::{Pallet, Call, Storage, Event}, - BridgeCallDispatch: pallet_bridge_call_dispatch::{Pallet, Event}, - BridgeFinalityVerifier: pallet_finality_verifier::{Pallet, Call}, + BridgeRialtoMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event}, + BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, + BridgeRialtoGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, + BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Config, Storage}, System: frame_system::{Pallet, Call, Config, Storage, Event}, RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, @@ -411,6 +432,8 @@ pub type SignedExtra = ( pub type SignedPayload = generic::SignedPayload; /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive, Runtime, AllPallets>; @@ -422,7 +445,7 @@ impl_runtime_apis! { } fn execute_block(block: Block) { - Executive::execute_block(block) + Executive::execute_block(block); } fn initialize_block(header: &::Header) { @@ -483,8 +506,8 @@ impl_runtime_apis! { } impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) } fn authorities() -> Vec { @@ -492,6 +515,18 @@ impl_runtime_apis! { } } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + Balance, + > for Runtime { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + } + impl sp_session::SessionKeys for Runtime { fn generate_session_keys(seed: Option>) -> Vec { SessionKeys::generate(seed) @@ -535,32 +570,31 @@ impl_runtime_apis! { } } - impl bp_rialto::RialtoHeaderApi for Runtime { - fn best_blocks() -> Vec<(bp_rialto::BlockNumber, bp_rialto::Hash)> { - BridgeRialto::best_headers() - } - - fn finalized_block() -> (bp_rialto::BlockNumber, bp_rialto::Hash) { - let header = BridgeRialto::best_finalized(); + impl bp_rialto::RialtoFinalityApi for Runtime { + fn best_finalized() -> (bp_rialto::BlockNumber, bp_rialto::Hash) { + let header = BridgeRialtoGrandpa::best_finalized(); (header.number, header.hash()) } - fn incomplete_headers() -> Vec<(bp_rialto::BlockNumber, bp_rialto::Hash)> { - BridgeRialto::require_justifications() + fn is_known_header(hash: bp_rialto::Hash) -> bool { + BridgeRialtoGrandpa::is_known_header(hash) + } + } + + impl bp_westend::WestendFinalityApi for Runtime { + fn best_finalized() -> (bp_westend::BlockNumber, bp_westend::Hash) { + let header = BridgeWestendGrandpa::best_finalized(); + (header.number, header.hash()) } - fn is_known_block(hash: bp_rialto::Hash) -> bool { - BridgeRialto::is_known_header(hash) - } - - fn is_finalized_block(hash: bp_rialto::Hash) -> bool { - BridgeRialto::is_finalized_header(hash) + fn is_known_header(hash: bp_westend::Hash) -> bool { + BridgeWestendGrandpa::is_known_header(hash) } } impl bp_rialto::ToRialtoOutboundLaneApi for Runtime { fn estimate_message_delivery_and_dispatch_fee( - _lane_id: bp_message_lane::LaneId, + _lane_id: bp_messages::LaneId, payload: ToRialtoMessagePayload, ) -> Option { estimate_message_dispatch_and_delivery_fee::( @@ -570,12 +604,12 @@ impl_runtime_apis! { } fn messages_dispatch_weight( - lane: bp_message_lane::LaneId, - begin: bp_message_lane::MessageNonce, - end: bp_message_lane::MessageNonce, - ) -> Vec<(bp_message_lane::MessageNonce, Weight, u32)> { + lane: bp_messages::LaneId, + begin: bp_messages::MessageNonce, + end: bp_messages::MessageNonce, + ) -> Vec<(bp_messages::MessageNonce, Weight, u32)> { (begin..=end).filter_map(|nonce| { - let encoded_payload = BridgeRialtoMessageLane::outbound_message_payload(lane, nonce)?; + let encoded_payload = BridgeRialtoMessages::outbound_message_payload(lane, nonce)?; let decoded_payload = rialto_messages::ToRialtoMessagePayload::decode( &mut &encoded_payload[..] ).ok()?; @@ -584,26 +618,26 @@ impl_runtime_apis! { .collect() } - fn latest_received_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeRialtoMessageLane::outbound_latest_received_nonce(lane) + fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeRialtoMessages::outbound_latest_received_nonce(lane) } - fn latest_generated_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeRialtoMessageLane::outbound_latest_generated_nonce(lane) + fn latest_generated_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeRialtoMessages::outbound_latest_generated_nonce(lane) } } impl bp_rialto::FromRialtoInboundLaneApi for Runtime { - fn latest_received_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeRialtoMessageLane::inbound_latest_received_nonce(lane) + fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeRialtoMessages::inbound_latest_received_nonce(lane) } - fn latest_confirmed_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeRialtoMessageLane::inbound_latest_confirmed_nonce(lane) + fn latest_confirmed_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeRialtoMessages::inbound_latest_confirmed_nonce(lane) } - fn unrewarded_relayers_state(lane: bp_message_lane::LaneId) -> bp_message_lane::UnrewardedRelayersState { - BridgeRialtoMessageLane::inbound_unrewarded_relayers_state(lane) + fn unrewarded_relayers_state(lane: bp_messages::LaneId) -> bp_messages::UnrewardedRelayersState { + BridgeRialtoMessages::inbound_unrewarded_relayers_state(lane) } } } @@ -623,7 +657,7 @@ where AccountId: codec::Encode, SpecVersion: codec::Encode, { - pallet_bridge_call_dispatch::account_ownership_digest( + pallet_bridge_dispatch::account_ownership_digest( rialto_call, millau_account_id, rialto_spec_version, @@ -639,9 +673,9 @@ mod tests { #[test] fn ensure_millau_message_lane_weights_are_correct() { // TODO: https://github.com/paritytech/parity-bridges-common/issues/390 - type Weights = pallet_message_lane::weights::RialtoWeight; + type Weights = pallet_bridge_messages::weights::RialtoWeight; - pallet_message_lane::ensure_weights_are_correct::( + pallet_bridge_messages::ensure_weights_are_correct::( bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, @@ -650,34 +684,24 @@ mod tests { let max_incoming_message_proof_size = bp_rialto::EXTRA_STORAGE_PROOF_SIZE.saturating_add( messages::target::maximal_incoming_message_size(bp_millau::max_extrinsic_size()), ); - pallet_message_lane::ensure_able_to_receive_message::( + pallet_bridge_messages::ensure_able_to_receive_message::( bp_millau::max_extrinsic_size(), bp_millau::max_extrinsic_weight(), max_incoming_message_proof_size, - bridge_runtime_common::messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - max_incoming_message_proof_size as _, - 0, - ), messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()), ); - let max_incoming_inbound_lane_data_proof_size = bp_message_lane::InboundLaneData::<()>::encoded_size_hint( + let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint( bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, ) .unwrap_or(u32::MAX); - pallet_message_lane::ensure_able_to_receive_confirmation::( + pallet_bridge_messages::ensure_able_to_receive_confirmation::( bp_millau::max_extrinsic_size(), bp_millau::max_extrinsic_weight(), max_incoming_inbound_lane_data_proof_size, bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, - bridge_runtime_common::messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - max_incoming_inbound_lane_data_proof_size as _, - 0, - ), ); } } diff --git a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs b/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs index 9775c93d2d..a800117dc5 100644 --- a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs +++ b/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,52 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Everything required to serve Millau <-> Rialto message lanes. +//! Everything required to serve Millau <-> Rialto messages. use crate::Runtime; -use bp_message_lane::{ +use bp_messages::{ source_chain::TargetHeaderChain, target_chain::{ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessageLaneParameter, + InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter, }; use bp_runtime::{InstanceId, RIALTO_BRIDGE_INSTANCE}; -use bridge_runtime_common::messages::{self, ChainWithMessageLanes, MessageBridge}; +use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction}; use codec::{Decode, Encode}; use frame_support::{ parameter_types, - weights::{DispatchClass, Weight, WeightToFeePolynomial}, + weights::{DispatchClass, Weight}, RuntimeDebug, }; -use sp_core::storage::StorageKey; use sp_runtime::{FixedPointNumber, FixedU128}; use sp_std::{convert::TryFrom, ops::RangeInclusive}; +/// Initial value of `RialtoToMillauConversionRate` parameter. +pub const INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV); + parameter_types! { /// Rialto to Millau conversion rate. Initially we treat both tokens as equal. - storage RialtoToMillauConversionRate: FixedU128 = 1.into(); -} - -/// Storage key of the Millau -> Rialto message in the runtime storage. -pub fn message_key(lane: &LaneId, nonce: MessageNonce) -> StorageKey { - pallet_message_lane::storage_keys::message_key::::MessageLaneInstance>( - lane, nonce, - ) -} - -/// Storage key of the Millau -> Rialto message lane state in the runtime storage. -pub fn outbound_lane_data_key(lane: &LaneId) -> StorageKey { - pallet_message_lane::storage_keys::outbound_lane_data_key::<::MessageLaneInstance>( - lane, - ) -} - -/// Storage key of the Rialto -> Millau message lane state in the runtime storage. -pub fn inbound_lane_data_key(lane: &LaneId) -> StorageKey { - pallet_message_lane::storage_keys::inbound_lane_data_key::< - Runtime, - ::MessageLaneInstance, - >(lane) + pub storage RialtoToMillauConversionRate: FixedU128 = INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE; } /// Message payload for Millau -> Rialto messages. @@ -84,7 +64,7 @@ type ToRialtoMessagesDeliveryProof = messages::source::FromBridgedChainMessagesD pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDispatch< WithRialtoMessageBridge, crate::Runtime, - pallet_bridge_call_dispatch::DefaultInstance, + pallet_bridge_dispatch::DefaultInstance, >; /// Millau <-> Rialto message bridge. @@ -99,59 +79,6 @@ impl MessageBridge for WithRialtoMessageBridge { type ThisChain = Millau; type BridgedChain = Rialto; - fn maximal_extrinsic_size_on_target_chain() -> u32 { - bp_rialto::max_extrinsic_size() - } - - fn weight_limits_of_message_on_bridged_chain(_message_payload: &[u8]) -> RangeInclusive { - // we don't want to relay too large messages + keep reserve for future upgrades - let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); - - // we're charging for payload bytes in `WithRialtoMessageBridge::weight_of_delivery_transaction` function - // - // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about - // minimal dispatch weight here - - 0..=upper_limit - } - - fn weight_of_delivery_transaction(message_payload: &[u8]) -> Weight { - let message_payload_len = u32::try_from(message_payload.len()) - .map(Into::into) - .unwrap_or(Weight::MAX); - let extra_bytes_in_payload = - message_payload_len.saturating_sub(pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); - messages::transaction_weight_without_multiplier( - bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - message_payload_len.saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE as _), - extra_bytes_in_payload - .saturating_mul(bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) - .saturating_add(bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT), - ) - } - - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> Weight { - let inbounded_data_size: Weight = - InboundLaneData::::encoded_size_hint(bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) - .map(Into::into) - .unwrap_or(Weight::MAX); - - messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - inbounded_data_size.saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE as _), - bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - ) - } - - fn this_weight_to_this_balance(weight: Weight) -> bp_millau::Balance { - ::WeightToFee::calc(&weight) - } - - fn bridged_weight_to_bridged_balance(weight: Weight) -> bp_rialto::Balance { - // we're using the same weights in both chains now - ::WeightToFee::calc(&weight) as _ - } - fn bridged_balance_to_this_balance(bridged_balance: bp_rialto::Balance) -> bp_millau::Balance { bp_millau::Balance::try_from(RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance)) .unwrap_or(bp_millau::Balance::MAX) @@ -162,42 +89,114 @@ impl MessageBridge for WithRialtoMessageBridge { #[derive(RuntimeDebug, Clone, Copy)] pub struct Millau; -impl messages::ChainWithMessageLanes for Millau { +impl messages::ChainWithMessages for Millau { type Hash = bp_millau::Hash; type AccountId = bp_millau::AccountId; type Signer = bp_millau::AccountSigner; type Signature = bp_millau::Signature; - type Call = crate::Call; type Weight = Weight; type Balance = bp_millau::Balance; - type MessageLaneInstance = pallet_message_lane::DefaultInstance; + type MessagesInstance = crate::WithRialtoMessagesInstance; } -impl messages::ThisChainWithMessageLanes for Millau { +impl messages::ThisChainWithMessages for Millau { + type Call = crate::Call; + fn is_outbound_lane_enabled(lane: &LaneId) -> bool { - *lane == LaneId::default() + *lane == [0, 0, 0, 0] || *lane == [0, 0, 0, 1] } fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { MessageNonce::MAX } + + fn estimate_delivery_confirmation_transaction() -> MessageTransaction { + let inbound_data_size = + InboundLaneData::::encoded_size_hint(bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) + .unwrap_or(u32::MAX); + + MessageTransaction { + dispatch_weight: bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, + size: inbound_data_size + .saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE) + .saturating_add(bp_millau::TX_EXTRA_BYTES), + } + } + + fn transaction_payment(transaction: MessageTransaction) -> bp_millau::Balance { + // in our testnets, both per-byte fee and weight-to-fee are 1:1 + messages::transaction_payment( + bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, + 1, + FixedU128::zero(), + |weight| weight as _, + transaction, + ) + } } /// Rialto chain from message lane point of view. #[derive(RuntimeDebug, Clone, Copy)] pub struct Rialto; -impl messages::ChainWithMessageLanes for Rialto { +impl messages::ChainWithMessages for Rialto { type Hash = bp_rialto::Hash; type AccountId = bp_rialto::AccountId; type Signer = bp_rialto::AccountSigner; type Signature = bp_rialto::Signature; - type Call = (); // unknown to us type Weight = Weight; type Balance = bp_rialto::Balance; - type MessageLaneInstance = pallet_message_lane::DefaultInstance; + type MessagesInstance = pallet_bridge_messages::DefaultInstance; +} + +impl messages::BridgedChainWithMessages for Rialto { + fn maximal_extrinsic_size() -> u32 { + bp_rialto::max_extrinsic_size() + } + + fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { + // we don't want to relay too large messages + keep reserve for future upgrades + let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); + + // we're charging for payload bytes in `WithRialtoMessageBridge::transaction_payment` function + // + // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about + // minimal dispatch weight here + + 0..=upper_limit + } + + fn estimate_delivery_transaction( + message_payload: &[u8], + message_dispatch_weight: Weight, + ) -> MessageTransaction { + let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX); + let extra_bytes_in_payload = Weight::from(message_payload_len) + .saturating_sub(pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); + + MessageTransaction { + dispatch_weight: extra_bytes_in_payload + .saturating_mul(bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) + .saturating_add(bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT) + .saturating_add(message_dispatch_weight), + size: message_payload_len + .saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE) + .saturating_add(bp_rialto::TX_EXTRA_BYTES), + } + } + + fn transaction_payment(transaction: MessageTransaction) -> bp_rialto::Balance { + // in our testnets, both per-byte fee and weight-to-fee are 1:1 + messages::transaction_payment( + bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, + 1, + FixedU128::zero(), + |weight| weight as _, + transaction, + ) + } } impl TargetHeaderChain for Rialto { @@ -238,15 +237,15 @@ impl SourceHeaderChain for Rialto { /// Millau -> Rialto message lane pallet parameters. #[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)] -pub enum MillauToRialtoMessageLaneParameter { +pub enum MillauToRialtoMessagesParameter { /// The conversion formula we use is: `MillauTokens = RialtoTokens * conversion_rate`. RialtoToMillauConversionRate(FixedU128), } -impl MessageLaneParameter for MillauToRialtoMessageLaneParameter { +impl MessagesParameter for MillauToRialtoMessagesParameter { fn save(&self) { match *self { - MillauToRialtoMessageLaneParameter::RialtoToMillauConversionRate(ref conversion_rate) => { + MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(ref conversion_rate) => { RialtoToMillauConversionRate::set(conversion_rate) } } diff --git a/polkadot/bridges/bin/rialto/node/Cargo.toml b/polkadot/bridges/bin/rialto/node/Cargo.toml index f99178c77a..a51ee7a5ab 100644 --- a/polkadot/bridges/bin/rialto/node/Cargo.toml +++ b/polkadot/bridges/bin/rialto/node/Cargo.toml @@ -15,17 +15,19 @@ structopt = "0.3.21" # Bridge dependencies -bp-message-lane = { path = "../../../primitives/message-lane" } +bp-messages = { path = "../../../primitives/messages" } bp-runtime = { path = "../../../primitives/runtime" } -bp-rialto = { path = "../../../primitives/rialto" } -pallet-message-lane = { path = "../../../modules/message-lane" } -pallet-message-lane-rpc = { path = "../../../modules/message-lane/rpc" } +bp-rialto = { path = "../../../primitives/chain-rialto" } +pallet-bridge-messages = { path = "../../../modules/messages" } rialto-runtime = { path = "../runtime" } # Substrate Dependencies + frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } +node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -35,22 +37,21 @@ sc-executor = { git = "https://github.com/paritytech/substrate", branch = "maste sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } [build-dependencies] -build-script-utils = { package = "substrate-build-script-utils", version = "2.0" } +substrate-build-script-utils = "3.0.0" frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -vergen = "3.1.0" [features] default = [] diff --git a/polkadot/bridges/bin/rialto/node/build.rs b/polkadot/bridges/bin/rialto/node/build.rs index e9a10ff8ad..d9b50049e2 100644 --- a/polkadot/bridges/bin/rialto/node/build.rs +++ b/polkadot/bridges/bin/rialto/node/build.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,12 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use vergen::{generate_cargo_keys, ConstantsFlags}; - -const ERROR_MSG: &str = "Failed to generate metadata files"; +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; fn main() { - generate_cargo_keys(ConstantsFlags::SHA_SHORT).expect(ERROR_MSG); + generate_cargo_keys(); - build_script_utils::rerun_if_git_head_changed(); + rerun_if_git_head_changed(); } diff --git a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs b/polkadot/bridges/bin/rialto/node/src/chain_spec.rs index 00a73cd445..732cf1a4b1 100644 --- a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs +++ b/polkadot/bridges/bin/rialto/node/src/chain_spec.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,8 +16,8 @@ use bp_rialto::derive_account_from_millau_id; use rialto_runtime::{ - AccountId, AuraConfig, BalancesConfig, BridgeKovanConfig, BridgeMillauConfig, BridgeRialtoPoAConfig, GenesisConfig, - GrandpaConfig, SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, + AccountId, AuraConfig, BalancesConfig, BridgeKovanConfig, BridgeRialtoPoAConfig, GenesisConfig, GrandpaConfig, + SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, }; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{sr25519, Pair, Public}; @@ -122,7 +122,10 @@ impl Alternative { get_account_id_from_seed::("Ferdie//stash"), get_account_id_from_seed::("George//stash"), get_account_id_from_seed::("Harry//stash"), - pallet_message_lane::Module::::relayer_fund_account_id(), + pallet_bridge_messages::Pallet::< + rialto_runtime::Runtime, + pallet_bridge_messages::DefaultInstance, + >::relayer_fund_account_id(), derive_account_from_millau_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Dave"), )), @@ -151,33 +154,28 @@ fn testnet_genesis( _enable_println: bool, ) -> GenesisConfig { GenesisConfig { - frame_system: Some(SystemConfig { + frame_system: SystemConfig { code: WASM_BINARY.to_vec(), changes_trie_config: Default::default(), - }), - pallet_balances: Some(BalancesConfig { + }, + pallet_balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), - }), - pallet_aura: Some(AuraConfig { + }, + pallet_aura: AuraConfig { authorities: Vec::new(), - }), - pallet_bridge_eth_poa_Instance1: Some(load_rialto_poa_bridge_config()), - pallet_bridge_eth_poa_Instance2: Some(load_kovan_bridge_config()), - pallet_grandpa: Some(GrandpaConfig { + }, + pallet_bridge_eth_poa_Instance1: load_rialto_poa_bridge_config(), + pallet_bridge_eth_poa_Instance2: load_kovan_bridge_config(), + pallet_grandpa: GrandpaConfig { authorities: Vec::new(), - }), - pallet_substrate_bridge: Some(BridgeMillauConfig { - // We'll initialize the pallet with a dispatchable instead. - init_data: None, - owner: Some(root_key.clone()), - }), - pallet_sudo: Some(SudoConfig { key: root_key }), - pallet_session: Some(SessionConfig { + }, + pallet_sudo: SudoConfig { key: root_key }, + pallet_session: SessionConfig { keys: initial_authorities .iter() .map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone()))) .collect::>(), - }), + }, } } diff --git a/polkadot/bridges/bin/rialto/node/src/cli.rs b/polkadot/bridges/bin/rialto/node/src/cli.rs index 1149c4f910..46323ed25c 100644 --- a/polkadot/bridges/bin/rialto/node/src/cli.rs +++ b/polkadot/bridges/bin/rialto/node/src/cli.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -31,6 +31,7 @@ pub struct Cli { pub enum Subcommand { /// Key management cli utilities Key(sc_cli::KeySubcommand), + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. Verify(sc_cli::VerifyCmd), @@ -61,7 +62,9 @@ pub enum Subcommand { /// Revert the chain to a previous state. Revert(sc_cli::RevertCmd), - /// The custom benchmark subcommmand benchmarking runtime pallets. - #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] + /// Inspect blocks or extrinsics. + Inspect(node_inspect::cli::InspectCmd), + + /// Benchmark runtime pallets. Benchmark(frame_benchmarking_cli::BenchmarkCmd), } diff --git a/polkadot/bridges/bin/rialto/node/src/command.rs b/polkadot/bridges/bin/rialto/node/src/command.rs index 8242c9eaab..a9930c5741 100644 --- a/polkadot/bridges/bin/rialto/node/src/command.rs +++ b/polkadot/bridges/bin/rialto/node/src/command.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -17,7 +17,7 @@ use crate::cli::{Cli, Subcommand}; use crate::service; use crate::service::new_partial; -use rialto_runtime::Block; +use rialto_runtime::{Block, RuntimeApi}; use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; @@ -153,6 +153,10 @@ pub fn run() -> sc_cli::Result<()> { Ok((cmd.run(client, backend), task_manager)) }) } + Some(Subcommand::Inspect(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run::(config)) + } None => { let runner = cli.create_runner(&cli.run)?; runner diff --git a/polkadot/bridges/bin/rialto/node/src/main.rs b/polkadot/bridges/bin/rialto/node/src/main.rs index 164afae2bb..f319d1437a 100644 --- a/polkadot/bridges/bin/rialto/node/src/main.rs +++ b/polkadot/bridges/bin/rialto/node/src/main.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/bin/rialto/node/src/service.rs b/polkadot/bridges/bin/rialto/node/src/service.rs index 67ca185137..841202ac7b 100644 --- a/polkadot/bridges/bin/rialto/node/src/service.rs +++ b/polkadot/bridges/bin/rialto/node/src/service.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -30,11 +30,13 @@ use rialto_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use sp_inherents::InherentDataProviders; use std::sync::Arc; @@ -70,6 +72,7 @@ pub fn new_partial( AuraPair, >, sc_finality_grandpa::LinkHalf, + Option, ), >, ServiceError, @@ -77,12 +80,30 @@ pub fn new_partial( if config.keystore_remote.is_some() { return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())); } - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); + let inherent_data_providers = InherentDataProviders::new(); - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(&config)?; + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + )?; let client = Arc::new(client); + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = sc_transaction_pool::BasicPool::new_full( @@ -93,22 +114,28 @@ pub fn new_partial( client.clone(), ); - let (grandpa_block_import, grandpa_link) = - sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone())?; + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), + )?; let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import.clone(), - Some(Box::new(grandpa_block_import)), - client.clone(), - inherent_data_providers.clone(), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), - )?; + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: aura_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import)), + client: client.clone(), + inherent_data_providers: inherent_data_providers.clone(), + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; Ok(sc_service::PartialComponents { client, @@ -119,7 +146,7 @@ pub fn new_partial( select_chain, transaction_pool, inherent_data_providers, - other: (aura_block_import, grandpa_link), + other: (aura_block_import, grandpa_link, telemetry), }) } @@ -141,7 +168,7 @@ pub fn new_full(mut config: Configuration) -> Result select_chain, transaction_pool, inherent_data_providers, - other: (block_import, grandpa_link), + other: (block_import, grandpa_link, mut telemetry), } = new_partial(&config)?; if let Some(url) = &config.keystore_remote { @@ -173,13 +200,7 @@ pub fn new_full(mut config: Configuration) -> Result })?; if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - backend.clone(), - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); + sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); } let role = config.role.clone(); @@ -190,40 +211,9 @@ pub fn new_full(mut config: Configuration) -> Result let prometheus_registry = config.prometheus_registry().cloned(); let rpc_extensions_builder = { - use bp_message_lane::{LaneId, MessageNonce}; - use bp_runtime::{InstanceId, MILLAU_BRIDGE_INSTANCE}; use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; - use sp_core::storage::StorageKey; - // This struct is here to ease update process. - - /// Rialto runtime from message-lane RPC point of view. - struct RialtoMessageLaneKeys; - - impl pallet_message_lane_rpc::Runtime for RialtoMessageLaneKeys { - fn message_key(&self, instance: &InstanceId, lane: &LaneId, nonce: MessageNonce) -> Option { - match *instance { - MILLAU_BRIDGE_INSTANCE => Some(rialto_runtime::millau_messages::message_key(lane, nonce)), - _ => None, - } - } - - fn outbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option { - match *instance { - MILLAU_BRIDGE_INSTANCE => Some(rialto_runtime::millau_messages::outbound_lane_data_key(lane)), - _ => None, - } - } - - fn inbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option { - match *instance { - MILLAU_BRIDGE_INSTANCE => Some(rialto_runtime::millau_messages::inbound_lane_data_key(lane)), - _ => None, - } - } - } - - use pallet_message_lane_rpc::{MessageLaneApi, MessageLaneRpcHandler}; + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler}; use sc_rpc::DenyUnsafe; use substrate_frame_rpc_system::{FullSystem, SystemApi}; @@ -237,7 +227,7 @@ pub fn new_full(mut config: Configuration) -> Result let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), Some(shared_authority_set.clone())); + GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone())); Box::new(move |_, subscription_executor| { let mut io = jsonrpc_core::IoHandler::default(); @@ -246,6 +236,9 @@ pub fn new_full(mut config: Configuration) -> Result pool.clone(), DenyUnsafe::No, ))); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( + client.clone(), + ))); io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( shared_authority_set.clone(), shared_voter_state.clone(), @@ -253,16 +246,12 @@ pub fn new_full(mut config: Configuration) -> Result subscription_executor, finality_proof_provider.clone(), ))); - io.extend_with(MessageLaneApi::to_delegate(MessageLaneRpcHandler::new( - backend.clone(), - Arc::new(RialtoMessageLaneKeys), - ))); io }) }; - let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { network: network.clone(), client: client.clone(), keystore: keystore_container.sync_keystore(), @@ -275,32 +264,35 @@ pub fn new_full(mut config: Configuration) -> Result network_status_sinks, system_rpc_tx, config, - telemetry_span: None, + telemetry: telemetry.as_mut(), })?; if role.is_authority() { - let proposer = sc_basic_authorship::ProposerFactory::new( + let proposer_factory = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), ); let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - client.clone(), + let aura = sc_consensus_aura::start_aura::(StartAuraParams { + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + client: client.clone(), select_chain, block_import, - proposer, - network.clone(), + proposer_factory, inherent_data_providers, force_authoring, backoff_authoring_blocks, - keystore_container.sync_keystore(), + keystore: keystore_container.sync_keystore(), can_author_with, - )?; + sync_oracle: network.clone(), + block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; // the AURA authoring task is considered essential, i.e. if it // fails we take down the service with it. @@ -323,6 +315,7 @@ pub fn new_full(mut config: Configuration) -> Result observer_enabled: false, keystore, is_authority: role.is_authority(), + telemetry: telemetry.as_ref().map(|x| x.handle()), }; if enable_grandpa { @@ -336,10 +329,10 @@ pub fn new_full(mut config: Configuration) -> Result config: grandpa_config, link: grandpa_link, network, - telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state: SharedVoterState::empty(), + telemetry: telemetry.as_ref().map(|x| x.handle()), }; // the GRANDPA voter task is considered infallible, i.e. @@ -355,8 +348,27 @@ pub fn new_full(mut config: Configuration) -> Result /// Builds a new service for a light client. pub fn new_light(mut config: Configuration) -> Result { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::(&config)?; + sc_service::new_light_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + )?; + + let mut telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); config .network @@ -373,22 +385,28 @@ pub fn new_light(mut config: Configuration) -> Result on_demand.clone(), )); - let (grandpa_block_import, _) = - sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; + let (grandpa_block_import, _) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain, + telemetry.as_ref().map(|x| x.handle()), + )?; let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import, - Some(Box::new(grandpa_block_import)), - client.clone(), - InherentDataProviders::new(), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - sp_consensus::NeverCanAuthor, - )?; + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: aura_block_import, + justification_import: Some(Box::new(grandpa_block_import)), + client: client.clone(), + inherent_data_providers: InherentDataProviders::new(), + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::NeverCanAuthor, + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -402,13 +420,7 @@ pub fn new_light(mut config: Configuration) -> Result })?; if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - backend.clone(), - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); + sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); } sc_service::spawn_tasks(sc_service::SpawnTasksParams { @@ -424,7 +436,7 @@ pub fn new_light(mut config: Configuration) -> Result network, network_status_sinks, system_rpc_tx, - telemetry_span: None, + telemetry: telemetry.as_mut(), })?; network_starter.start_network(); diff --git a/polkadot/bridges/bin/rialto/runtime/Cargo.toml b/polkadot/bridges/bin/rialto/runtime/Cargo.toml index 517ddff9cb..ea8c51d0e8 100644 --- a/polkadot/bridges/bin/rialto/runtime/Cargo.toml +++ b/polkadot/bridges/bin/rialto/runtime/Cargo.toml @@ -11,7 +11,8 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } hex-literal = "0.3" libsecp256k1 = { version = "0.3.4", optional = true, default-features = false, features = ["hmac"] } -serde = { version = "1.0.123", optional = true, features = ["derive"] } +log = { version = "0.4.14", default-features = false } +serde = { version = "1.0.124", optional = true, features = ["derive"] } # Bridge dependencies @@ -19,17 +20,16 @@ bp-currency-exchange = { path = "../../../primitives/currency-exchange", default bp-eth-poa = { path = "../../../primitives/ethereum-poa", default-features = false } bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } bp-message-dispatch = { path = "../../../primitives/message-dispatch", default-features = false } -bp-message-lane = { path = "../../../primitives/message-lane", default-features = false } -bp-millau = { path = "../../../primitives/millau", default-features = false } -bp-rialto = { path = "../../../primitives/rialto", default-features = false } +bp-messages = { path = "../../../primitives/messages", default-features = false } +bp-millau = { path = "../../../primitives/chain-millau", default-features = false } +bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } bp-runtime = { path = "../../../primitives/runtime", default-features = false } bridge-runtime-common = { path = "../../runtime-common", default-features = false } -pallet-bridge-eth-poa = { path = "../../../modules/ethereum", default-features = false } -pallet-bridge-call-dispatch = { path = "../../../modules/call-dispatch", default-features = false } pallet-bridge-currency-exchange = { path = "../../../modules/currency-exchange", default-features = false } -pallet-finality-verifier = { path = "../../../modules/finality-verifier", default-features = false } -pallet-substrate-bridge = { path = "../../../modules/substrate", default-features = false } -pallet-message-lane = { path = "../../../modules/message-lane", default-features = false } +pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } +pallet-bridge-eth-poa = { path = "../../../modules/ethereum", default-features = false } +pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } +pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } # Substrate Dependencies @@ -47,6 +47,7 @@ pallet-session = { git = "https://github.com/paritytech/substrate", branch = "ma pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } @@ -62,6 +63,7 @@ sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + [dev-dependencies] libsecp256k1 = { version = "0.3.4", features = ["hmac"] } @@ -75,7 +77,7 @@ std = [ "bp-eth-poa/std", "bp-header-chain/std", "bp-message-dispatch/std", - "bp-message-lane/std", + "bp-messages/std", "bp-millau/std", "bp-rialto/std", "bp-runtime/std", @@ -86,19 +88,20 @@ std = [ "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", + "log/std", "pallet-aura/std", "pallet-balances/std", - "pallet-bridge-eth-poa/std", - "pallet-bridge-call-dispatch/std", "pallet-bridge-currency-exchange/std", - "pallet-finality-verifier/std", + "pallet-bridge-dispatch/std", + "pallet-bridge-eth-poa/std", + "pallet-bridge-grandpa/std", + "pallet-bridge-messages/std", "pallet-grandpa/std", - "pallet-message-lane/std", "pallet-randomness-collective-flip/std", "pallet-shift-session-manager/std", - "pallet-substrate-bridge/std", "pallet-sudo/std", "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", "serde", "sp-api/std", @@ -124,6 +127,6 @@ runtime-benchmarks = [ "libsecp256k1", "pallet-bridge-currency-exchange/runtime-benchmarks", "pallet-bridge-eth-poa/runtime-benchmarks", - "pallet-message-lane/runtime-benchmarks", + "pallet-bridge-messages/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/polkadot/bridges/bin/rialto/runtime/build.rs b/polkadot/bridges/bin/rialto/runtime/build.rs index 4fda040c9b..dcb5cb0621 100644 --- a/polkadot/bridges/bin/rialto/runtime/build.rs +++ b/polkadot/bridges/bin/rialto/runtime/build.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/bin/rialto/runtime/src/benches.rs b/polkadot/bridges/bin/rialto/runtime/src/benches.rs index 4ca476e5f3..86d6b8361c 100644 --- a/polkadot/bridges/bin/rialto/runtime/src/benches.rs +++ b/polkadot/bridges/bin/rialto/runtime/src/benches.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright 2020-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/bin/rialto/runtime/src/exchange.rs b/polkadot/bridges/bin/rialto/runtime/src/exchange.rs index 926d959537..a054962a79 100644 --- a/polkadot/bridges/bin/rialto/runtime/src/exchange.rs +++ b/polkadot/bridges/bin/rialto/runtime/src/exchange.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -81,7 +81,7 @@ impl MaybeLockFundsTransaction for EthTransaction { // we only accept transactions sending funds directly to the pre-configured address if tx.unsigned.to != Some(LOCK_FUNDS_ADDRESS.into()) { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Failed to parse fund locks transaction. Invalid peer recipient: {:?}", tx.unsigned.to, @@ -94,7 +94,7 @@ impl MaybeLockFundsTransaction for EthTransaction { match tx.unsigned.payload.len() { 32 => recipient_raw.as_fixed_bytes_mut().copy_from_slice(&tx.unsigned.payload), len => { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Failed to parse fund locks transaction. Invalid recipient length: {}", len, @@ -106,7 +106,7 @@ impl MaybeLockFundsTransaction for EthTransaction { let amount = tx.unsigned.value.low_u128(); if tx.unsigned.value != amount.into() { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Failed to parse fund locks transaction. Invalid amount: {}", tx.unsigned.value, diff --git a/polkadot/bridges/bin/rialto/runtime/src/kovan.rs b/polkadot/bridges/bin/rialto/runtime/src/kovan.rs index fa76347db2..03b0ca8a07 100644 --- a/polkadot/bridges/bin/rialto/runtime/src/kovan.rs +++ b/polkadot/bridges/bin/rialto/runtime/src/kovan.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/bin/rialto/runtime/src/lib.rs b/polkadot/bridges/bin/rialto/runtime/src/lib.rs index 033e4f9f59..4e81d3efb1 100644 --- a/polkadot/bridges/bin/rialto/runtime/src/lib.rs +++ b/polkadot/bridges/bin/rialto/runtime/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -43,6 +43,7 @@ use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge}; use codec::Decode; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; @@ -69,8 +70,8 @@ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_bridge_currency_exchange::Call as BridgeCurrencyExchangeCall; pub use pallet_bridge_eth_poa::Call as BridgeEthPoACall; -pub use pallet_message_lane::Call as MessageLaneCall; -pub use pallet_substrate_bridge::Call as BridgeMillauCall; +pub use pallet_bridge_grandpa::Call as BridgeGrandpaMillauCall; +pub use pallet_bridge_messages::Call as MessagesCall; pub use pallet_sudo::Call as SudoCall; pub use pallet_timestamp::Call as TimestampCall; @@ -207,6 +208,7 @@ impl frame_system::Config for Runtime { type DbWeight = DbWeight; /// The designated SS58 prefix of this chain. type SS58Prefix = SS58Prefix; + /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); } @@ -256,9 +258,9 @@ impl pallet_bridge_currency_exchange::Config for Runtime type DepositInto = DepositInto; } -impl pallet_bridge_call_dispatch::Config for Runtime { +impl pallet_bridge_dispatch::Config for Runtime { type Event = Event; - type MessageId = (bp_message_lane::LaneId, bp_message_lane::MessageNonce); + type MessageId = (bp_messages::LaneId, bp_messages::MessageNonce); type Call = Call; type CallFilter = (); type EncodedCall = crate::millau_messages::FromMillauEncodedCall; @@ -290,7 +292,7 @@ impl bp_currency_exchange::DepositInto for DepositInto { // - deposited != 0: (should never happen in practice) deposit has been partially completed match deposited_amount { _ if deposited_amount == amount => { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Deposited {} to {:?}", amount, @@ -300,7 +302,7 @@ impl bp_currency_exchange::DepositInto for DepositInto { Ok(()) } _ if deposited_amount == 0 => { - frame_support::debug::error!( + log::error!( target: "runtime", "Deposit of {} to {:?} has failed", amount, @@ -310,7 +312,7 @@ impl bp_currency_exchange::DepositInto for DepositInto { Err(bp_currency_exchange::Error::DepositFailed) } _ => { - frame_support::debug::error!( + log::error!( target: "runtime", "Deposit of {} to {:?} has partially competed. {} has been deposited", amount, @@ -398,7 +400,7 @@ impl pallet_session::Config for Runtime { type ValidatorIdOf = (); type ShouldEndSession = pallet_session::PeriodicSessions; type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = pallet_shift_session_manager::Module; + type SessionManager = pallet_shift_session_manager::Pallet; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = (); @@ -406,33 +408,34 @@ impl pallet_session::Config for Runtime { type WeightInfo = (); } -impl pallet_substrate_bridge::Config for Runtime { - type BridgedChain = bp_millau::Millau; -} - parameter_types! { // This is a pretty unscientific cap. // // Note that once this is hit the pallet will essentially throttle incoming requests down to one // call per block. pub const MaxRequests: u32 = 50; + + // Number of headers to keep. + // + // Assuming the worst case of every header being finalized, we will keep headers at least for a + // week. + pub const HeadersToKeep: u32 = 7 * bp_rialto::DAYS as u32; } -impl pallet_finality_verifier::Config for Runtime { +impl pallet_bridge_grandpa::Config for Runtime { type BridgedChain = bp_millau::Millau; - type HeaderChain = pallet_substrate_bridge::Module; - type AncestryProof = Vec; - type AncestryChecker = bp_header_chain::LinearAncestryChecker; type MaxRequests = MaxRequests; + type HeadersToKeep = HeadersToKeep; + type WeightInfo = pallet_bridge_grandpa::weights::RialtoWeight; } impl pallet_shift_session_manager::Config for Runtime {} parameter_types! { - pub const MaxMessagesToPruneAtOnce: bp_message_lane::MessageNonce = 8; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_message_lane::MessageNonce = + pub const MaxMessagesToPruneAtOnce: bp_messages::MessageNonce = 8; + pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_message_lane::MessageNonce = + pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE; // `IdentityFee` is used by Rialto => we may use weight directly pub const GetDeliveryConfirmationTransactionFee: Balance = @@ -440,11 +443,13 @@ parameter_types! { pub const RootAccountForPayments: Option = None; } -pub(crate) type WithMillauMessageLaneInstance = pallet_message_lane::DefaultInstance; -impl pallet_message_lane::Config for Runtime { +/// Instance of the messages pallet used to relay messages to/from Millau chain. +pub type WithMillauMessagesInstance = pallet_bridge_messages::DefaultInstance; + +impl pallet_bridge_messages::Config for Runtime { type Event = Event; - type WeightInfo = pallet_message_lane::weights::RialtoWeight; - type Parameter = millau_messages::RialtoToMillauMessageLaneParameter; + type WeightInfo = pallet_bridge_messages::weights::RialtoWeight; + type Parameter = millau_messages::RialtoToMillauMessagesParameter; type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; @@ -460,7 +465,7 @@ impl pallet_message_lane::Config for Runtime { type TargetHeaderChain = crate::millau_messages::Millau; type LaneMessageVerifier = crate::millau_messages::ToMillauMessageVerifier; - type MessageDeliveryAndDispatchPayment = pallet_message_lane::instant_payments::InstantCurrencyPayments< + type MessageDeliveryAndDispatchPayment = pallet_bridge_messages::instant_payments::InstantCurrencyPayments< Runtime, pallet_balances::Pallet, GetDeliveryConfirmationTransactionFee, @@ -481,10 +486,9 @@ construct_runtime!( BridgeKovan: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, BridgeRialtoCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, BridgeKovanCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, - BridgeMillau: pallet_substrate_bridge::{Pallet, Call, Storage, Config}, - BridgeFinalityVerifier: pallet_finality_verifier::{Pallet, Call}, - BridgeCallDispatch: pallet_bridge_call_dispatch::{Pallet, Event}, - BridgeMillauMessageLane: pallet_message_lane::{Pallet, Call, Storage, Event}, + BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, + BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, + BridgeMillauMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event}, System: frame_system::{Pallet, Call, Config, Storage, Event}, RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, @@ -522,6 +526,8 @@ pub type SignedExtra = ( pub type SignedPayload = generic::SignedPayload; /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive, Runtime, AllPallets>; @@ -533,7 +539,7 @@ impl_runtime_apis! { } fn execute_block(block: Block) { - Executive::execute_block(block) + Executive::execute_block(block); } fn initialize_block(header: &::Header) { @@ -618,26 +624,14 @@ impl_runtime_apis! { } } - impl bp_millau::MillauHeaderApi for Runtime { - fn best_blocks() -> Vec<(bp_millau::BlockNumber, bp_millau::Hash)> { - BridgeMillau::best_headers() - } - - fn finalized_block() -> (bp_millau::BlockNumber, bp_millau::Hash) { - let header = BridgeMillau::best_finalized(); + impl bp_millau::MillauFinalityApi for Runtime { + fn best_finalized() -> (bp_millau::BlockNumber, bp_millau::Hash) { + let header = BridgeMillauGrandpa::best_finalized(); (header.number, header.hash()) } - fn incomplete_headers() -> Vec<(bp_millau::BlockNumber, bp_millau::Hash)> { - BridgeMillau::require_justifications() - } - - fn is_known_block(hash: bp_millau::Hash) -> bool { - BridgeMillau::is_known_header(hash) - } - - fn is_finalized_block(hash: bp_millau::Hash) -> bool { - BridgeMillau::is_finalized_header(hash) + fn is_known_header(hash: bp_millau::Hash) -> bool { + BridgeMillauGrandpa::is_known_header(hash) } } @@ -669,8 +663,8 @@ impl_runtime_apis! { } impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) } fn authorities() -> Vec { @@ -678,6 +672,18 @@ impl_runtime_apis! { } } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + Balance, + > for Runtime { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + } + impl sp_session::SessionKeys for Runtime { fn generate_session_keys(seed: Option>) -> Vec { SessionKeys::generate(seed) @@ -723,7 +729,7 @@ impl_runtime_apis! { impl bp_millau::ToMillauOutboundLaneApi for Runtime { fn estimate_message_delivery_and_dispatch_fee( - _lane_id: bp_message_lane::LaneId, + _lane_id: bp_messages::LaneId, payload: ToMillauMessagePayload, ) -> Option { estimate_message_dispatch_and_delivery_fee::( @@ -733,12 +739,12 @@ impl_runtime_apis! { } fn messages_dispatch_weight( - lane: bp_message_lane::LaneId, - begin: bp_message_lane::MessageNonce, - end: bp_message_lane::MessageNonce, - ) -> Vec<(bp_message_lane::MessageNonce, Weight, u32)> { + lane: bp_messages::LaneId, + begin: bp_messages::MessageNonce, + end: bp_messages::MessageNonce, + ) -> Vec<(bp_messages::MessageNonce, Weight, u32)> { (begin..=end).filter_map(|nonce| { - let encoded_payload = BridgeMillauMessageLane::outbound_message_payload(lane, nonce)?; + let encoded_payload = BridgeMillauMessages::outbound_message_payload(lane, nonce)?; let decoded_payload = millau_messages::ToMillauMessagePayload::decode( &mut &encoded_payload[..] ).ok()?; @@ -747,26 +753,26 @@ impl_runtime_apis! { .collect() } - fn latest_received_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeMillauMessageLane::outbound_latest_received_nonce(lane) + fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeMillauMessages::outbound_latest_received_nonce(lane) } - fn latest_generated_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeMillauMessageLane::outbound_latest_generated_nonce(lane) + fn latest_generated_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeMillauMessages::outbound_latest_generated_nonce(lane) } } impl bp_millau::FromMillauInboundLaneApi for Runtime { - fn latest_received_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeMillauMessageLane::inbound_latest_received_nonce(lane) + fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeMillauMessages::inbound_latest_received_nonce(lane) } - fn latest_confirmed_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeMillauMessageLane::inbound_latest_confirmed_nonce(lane) + fn latest_confirmed_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeMillauMessages::inbound_latest_confirmed_nonce(lane) } - fn unrewarded_relayers_state(lane: bp_message_lane::LaneId) -> bp_message_lane::UnrewardedRelayersState { - BridgeMillauMessageLane::inbound_unrewarded_relayers_state(lane) + fn unrewarded_relayers_state(lane: bp_messages::LaneId) -> bp_messages::UnrewardedRelayersState { + BridgeMillauMessages::inbound_unrewarded_relayers_state(lane) } } @@ -776,6 +782,7 @@ impl_runtime_apis! { config: frame_benchmarking::BenchmarkConfig, ) -> Result, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, TrackedStorageKey, add_benchmark}; + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), @@ -793,7 +800,7 @@ impl_runtime_apis! { let params = (&config, &whitelist); use pallet_bridge_currency_exchange::benchmarking::{ - Module as BridgeCurrencyExchangeBench, + Pallet as BridgeCurrencyExchangeBench, Config as BridgeCurrencyExchangeConfig, ProofParams as BridgeCurrencyExchangeProofParams, }; @@ -834,16 +841,16 @@ impl_runtime_apis! { use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; use bridge_runtime_common::messages; - use pallet_message_lane::benchmarking::{ - Module as MessageLaneBench, - Config as MessageLaneConfig, - MessageDeliveryProofParams as MessageLaneMessageDeliveryProofParams, - MessageParams as MessageLaneMessageParams, - MessageProofParams as MessageLaneMessageProofParams, - ProofSize as MessageLaneProofSize, + use pallet_bridge_messages::benchmarking::{ + Pallet as MessagesBench, + Config as MessagesConfig, + MessageDeliveryProofParams, + MessageParams, + MessageProofParams, + ProofSize as MessagesProofSize, }; - impl MessageLaneConfig for Runtime { + impl MessagesConfig for Runtime { fn maximal_message_size() -> u32 { messages::source::maximal_message_size::() } @@ -864,10 +871,10 @@ impl_runtime_apis! { } fn prepare_outbound_message( - params: MessageLaneMessageParams, + params: MessageParams, ) -> (millau_messages::ToMillauMessagePayload, Balance) { let message_payload = vec![0; params.size as usize]; - let dispatch_origin = pallet_bridge_call_dispatch::CallOrigin::SourceAccount( + let dispatch_origin = pallet_bridge_dispatch::CallOrigin::SourceAccount( params.sender_account, ); @@ -877,25 +884,25 @@ impl_runtime_apis! { origin: dispatch_origin, call: message_payload, }; - (message, pallet_message_lane::benchmarking::MESSAGE_FEE.into()) + (message, pallet_bridge_messages::benchmarking::MESSAGE_FEE.into()) } fn prepare_message_proof( - params: MessageLaneMessageProofParams, + params: MessageProofParams, ) -> (millau_messages::FromMillauMessagesProof, Weight) { use crate::millau_messages::{Millau, WithMillauMessageBridge}; - use bp_message_lane::MessageKey; + use bp_messages::MessageKey; use bridge_runtime_common::{ - messages::ChainWithMessageLanes, + messages::ChainWithMessages, messages_benchmarking::{ed25519_sign, prepare_message_proof}, }; use codec::Encode; use frame_support::weights::GetDispatchInfo; - use pallet_message_lane::storage_keys; + use pallet_bridge_messages::storage_keys; use sp_runtime::traits::Header; let remark = match params.size { - MessageLaneProofSize::Minimal(ref size) => vec![0u8; *size as _], + MessagesProofSize::Minimal(ref size) => vec![0u8; *size as _], _ => vec![], }; let call = Call::System(SystemCall::remark(remark)); @@ -913,15 +920,16 @@ impl_runtime_apis! { let make_millau_message_key = |message_key: MessageKey| storage_keys::message_key::< Runtime, - ::MessageLaneInstance, + ::MessagesInstance, >( &message_key.lane_id, message_key.nonce, ).0; let make_millau_outbound_lane_data_key = |lane_id| storage_keys::outbound_lane_data_key::< - ::MessageLaneInstance, + ::MessagesInstance, >( &lane_id, ).0; + let make_millau_header = |state_root| bp_millau::Header::new( 0, Default::default(), @@ -930,16 +938,16 @@ impl_runtime_apis! { Default::default(), ); - prepare_message_proof::( + prepare_message_proof::( params, make_millau_message_key, make_millau_outbound_lane_data_key, make_millau_header, call_weight, - pallet_bridge_call_dispatch::MessagePayload { + pallet_bridge_dispatch::MessagePayload { spec_version: VERSION.spec_version, weight: call_weight, - origin: pallet_bridge_call_dispatch::CallOrigin::< + origin: pallet_bridge_dispatch::CallOrigin::< bp_millau::AccountId, MultiSigner, Signature, @@ -954,20 +962,20 @@ impl_runtime_apis! { } fn prepare_message_delivery_proof( - params: MessageLaneMessageDeliveryProofParams, + params: MessageDeliveryProofParams, ) -> millau_messages::ToMillauMessagesDeliveryProof { use crate::millau_messages::{Millau, WithMillauMessageBridge}; use bridge_runtime_common::{ - messages::ChainWithMessageLanes, + messages::ChainWithMessages, messages_benchmarking::prepare_message_delivery_proof, }; use sp_runtime::traits::Header; - prepare_message_delivery_proof::( + prepare_message_delivery_proof::( params, - |lane_id| pallet_message_lane::storage_keys::inbound_lane_data_key::< + |lane_id| pallet_bridge_messages::storage_keys::inbound_lane_data_key::< Runtime, - ::MessageLaneInstance, + ::MessagesInstance, >( &lane_id, ).0, @@ -982,7 +990,6 @@ impl_runtime_apis! { } } - add_benchmark!(params, batches, pallet_bridge_eth_poa, BridgeKovan); add_benchmark!( params, batches, @@ -992,9 +999,10 @@ impl_runtime_apis! { add_benchmark!( params, batches, - pallet_message_lane, - MessageLaneBench:: + pallet_bridge_messages, + MessagesBench:: ); + add_benchmark!(params, batches, pallet_bridge_grandpa, BridgeMillauGrandpa); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) @@ -1017,7 +1025,7 @@ where AccountId: codec::Encode, SpecVersion: codec::Encode, { - pallet_bridge_call_dispatch::account_ownership_digest( + pallet_bridge_dispatch::account_ownership_digest( millau_call, rialto_account_id, millau_spec_version, @@ -1068,9 +1076,9 @@ mod tests { #[test] fn ensure_rialto_message_lane_weights_are_correct() { - type Weights = pallet_message_lane::weights::RialtoWeight; + type Weights = pallet_bridge_messages::weights::RialtoWeight; - pallet_message_lane::ensure_weights_are_correct::( + pallet_bridge_messages::ensure_weights_are_correct::( bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, @@ -1079,34 +1087,24 @@ mod tests { let max_incoming_message_proof_size = bp_millau::EXTRA_STORAGE_PROOF_SIZE.saturating_add( messages::target::maximal_incoming_message_size(bp_rialto::max_extrinsic_size()), ); - pallet_message_lane::ensure_able_to_receive_message::( + pallet_bridge_messages::ensure_able_to_receive_message::( bp_rialto::max_extrinsic_size(), bp_rialto::max_extrinsic_weight(), max_incoming_message_proof_size, - bridge_runtime_common::messages::transaction_weight_without_multiplier( - bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - max_incoming_message_proof_size as _, - 0, - ), messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight()), ); - let max_incoming_inbound_lane_data_proof_size = bp_message_lane::InboundLaneData::<()>::encoded_size_hint( + let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint( bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, ) .unwrap_or(u32::MAX); - pallet_message_lane::ensure_able_to_receive_confirmation::( + pallet_bridge_messages::ensure_able_to_receive_confirmation::( bp_rialto::max_extrinsic_size(), bp_rialto::max_extrinsic_weight(), max_incoming_inbound_lane_data_proof_size, bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, - bridge_runtime_common::messages::transaction_weight_without_multiplier( - bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - max_incoming_inbound_lane_data_proof_size as _, - 0, - ), ); } diff --git a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs b/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs index 9fb57ee861..8ee2094660 100644 --- a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs +++ b/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,52 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Everything required to serve Millau <-> Rialto message lanes. +//! Everything required to serve Millau <-> Rialto messages. use crate::Runtime; -use bp_message_lane::{ +use bp_messages::{ source_chain::TargetHeaderChain, target_chain::{ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessageLaneParameter, + InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter, }; use bp_runtime::{InstanceId, MILLAU_BRIDGE_INSTANCE}; -use bridge_runtime_common::messages::{self, ChainWithMessageLanes, MessageBridge}; +use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction}; use codec::{Decode, Encode}; use frame_support::{ parameter_types, - weights::{DispatchClass, Weight, WeightToFeePolynomial}, + weights::{DispatchClass, Weight}, RuntimeDebug, }; -use sp_core::storage::StorageKey; use sp_runtime::{FixedPointNumber, FixedU128}; use sp_std::{convert::TryFrom, ops::RangeInclusive}; +/// Initial value of `MillauToRialtoConversionRate` parameter. +pub const INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV); + parameter_types! { /// Millau to Rialto conversion rate. Initially we treat both tokens as equal. - storage MillauToRialtoConversionRate: FixedU128 = 1.into(); -} - -/// Storage key of the Rialto -> Millau message in the runtime storage. -pub fn message_key(lane: &LaneId, nonce: MessageNonce) -> StorageKey { - pallet_message_lane::storage_keys::message_key::::MessageLaneInstance>( - lane, nonce, - ) -} - -/// Storage key of the Rialto -> Millau message lane state in the runtime storage. -pub fn outbound_lane_data_key(lane: &LaneId) -> StorageKey { - pallet_message_lane::storage_keys::outbound_lane_data_key::<::MessageLaneInstance>( - lane, - ) -} - -/// Storage key of the Millau -> Rialto message lane state in the runtime storage. -pub fn inbound_lane_data_key(lane: &LaneId) -> StorageKey { - pallet_message_lane::storage_keys::inbound_lane_data_key::< - Runtime, - ::MessageLaneInstance, - >(lane) + pub storage MillauToRialtoConversionRate: FixedU128 = INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE; } /// Message payload for Rialto -> Millau messages. @@ -78,7 +58,7 @@ pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessag pub type FromMillauMessageDispatch = messages::target::FromBridgedChainMessageDispatch< WithMillauMessageBridge, crate::Runtime, - pallet_bridge_call_dispatch::DefaultInstance, + pallet_bridge_dispatch::DefaultInstance, >; /// Messages proof for Millau -> Rialto messages. @@ -99,59 +79,6 @@ impl MessageBridge for WithMillauMessageBridge { type ThisChain = Rialto; type BridgedChain = Millau; - fn maximal_extrinsic_size_on_target_chain() -> u32 { - bp_millau::max_extrinsic_size() - } - - fn weight_limits_of_message_on_bridged_chain(_message_payload: &[u8]) -> RangeInclusive { - // we don't want to relay too large messages + keep reserve for future upgrades - let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()); - - // we're charging for payload bytes in `WithMillauMessageBridge::weight_of_delivery_transaction` function - // - // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about - // minimal dispatch weight here - - 0..=upper_limit - } - - fn weight_of_delivery_transaction(message_payload: &[u8]) -> Weight { - let message_payload_len = u32::try_from(message_payload.len()) - .map(Into::into) - .unwrap_or(Weight::MAX); - let extra_bytes_in_payload = - message_payload_len.saturating_sub(pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); - messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - message_payload_len.saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE as _), - extra_bytes_in_payload - .saturating_mul(bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) - .saturating_add(bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT), - ) - } - - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> Weight { - let inbounded_data_size: Weight = - InboundLaneData::::encoded_size_hint(bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) - .map(Into::into) - .unwrap_or(Weight::MAX); - - messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - inbounded_data_size.saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE as _), - bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - ) - } - - fn this_weight_to_this_balance(weight: Weight) -> bp_rialto::Balance { - ::WeightToFee::calc(&weight) - } - - fn bridged_weight_to_bridged_balance(weight: Weight) -> bp_millau::Balance { - // we're using the same weights in both chains now - ::WeightToFee::calc(&weight) as _ - } - fn bridged_balance_to_this_balance(bridged_balance: bp_millau::Balance) -> bp_rialto::Balance { bp_rialto::Balance::try_from(MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance)) .unwrap_or(bp_rialto::Balance::MAX) @@ -162,42 +89,114 @@ impl MessageBridge for WithMillauMessageBridge { #[derive(RuntimeDebug, Clone, Copy)] pub struct Rialto; -impl messages::ChainWithMessageLanes for Rialto { +impl messages::ChainWithMessages for Rialto { type Hash = bp_rialto::Hash; type AccountId = bp_rialto::AccountId; type Signer = bp_rialto::AccountSigner; type Signature = bp_rialto::Signature; - type Call = crate::Call; type Weight = Weight; type Balance = bp_rialto::Balance; - type MessageLaneInstance = crate::WithMillauMessageLaneInstance; + type MessagesInstance = crate::WithMillauMessagesInstance; } -impl messages::ThisChainWithMessageLanes for Rialto { +impl messages::ThisChainWithMessages for Rialto { + type Call = crate::Call; + fn is_outbound_lane_enabled(lane: &LaneId) -> bool { - *lane == LaneId::default() + *lane == [0, 0, 0, 0] || *lane == [0, 0, 0, 1] } fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { MessageNonce::MAX } + + fn estimate_delivery_confirmation_transaction() -> MessageTransaction { + let inbound_data_size = + InboundLaneData::::encoded_size_hint(bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) + .unwrap_or(u32::MAX); + + MessageTransaction { + dispatch_weight: bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, + size: inbound_data_size + .saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE) + .saturating_add(bp_rialto::TX_EXTRA_BYTES), + } + } + + fn transaction_payment(transaction: MessageTransaction) -> bp_rialto::Balance { + // in our testnets, both per-byte fee and weight-to-fee are 1:1 + messages::transaction_payment( + bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, + 1, + FixedU128::zero(), + |weight| weight as _, + transaction, + ) + } } /// Millau chain from message lane point of view. #[derive(RuntimeDebug, Clone, Copy)] pub struct Millau; -impl messages::ChainWithMessageLanes for Millau { +impl messages::ChainWithMessages for Millau { type Hash = bp_millau::Hash; type AccountId = bp_millau::AccountId; type Signer = bp_millau::AccountSigner; type Signature = bp_millau::Signature; - type Call = (); // unknown to us type Weight = Weight; type Balance = bp_millau::Balance; - type MessageLaneInstance = pallet_message_lane::DefaultInstance; + type MessagesInstance = pallet_bridge_messages::DefaultInstance; +} + +impl messages::BridgedChainWithMessages for Millau { + fn maximal_extrinsic_size() -> u32 { + bp_millau::max_extrinsic_size() + } + + fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { + // we don't want to relay too large messages + keep reserve for future upgrades + let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()); + + // we're charging for payload bytes in `WithMillauMessageBridge::transaction_payment` function + // + // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about + // minimal dispatch weight here + + 0..=upper_limit + } + + fn estimate_delivery_transaction( + message_payload: &[u8], + message_dispatch_weight: Weight, + ) -> MessageTransaction { + let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX); + let extra_bytes_in_payload = Weight::from(message_payload_len) + .saturating_sub(pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); + + MessageTransaction { + dispatch_weight: extra_bytes_in_payload + .saturating_mul(bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) + .saturating_add(bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT) + .saturating_add(message_dispatch_weight), + size: message_payload_len + .saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE) + .saturating_add(bp_millau::TX_EXTRA_BYTES), + } + } + + fn transaction_payment(transaction: MessageTransaction) -> bp_millau::Balance { + // in our testnets, both per-byte fee and weight-to-fee are 1:1 + messages::transaction_payment( + bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, + 1, + FixedU128::zero(), + |weight| weight as _, + transaction, + ) + } } impl TargetHeaderChain for Millau { @@ -238,15 +237,15 @@ impl SourceHeaderChain for Millau { /// Rialto -> Millau message lane pallet parameters. #[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)] -pub enum RialtoToMillauMessageLaneParameter { +pub enum RialtoToMillauMessagesParameter { /// The conversion formula we use is: `RialtoTokens = MillauTokens * conversion_rate`. MillauToRialtoConversionRate(FixedU128), } -impl MessageLaneParameter for RialtoToMillauMessageLaneParameter { +impl MessagesParameter for RialtoToMillauMessagesParameter { fn save(&self) { match *self { - RialtoToMillauMessageLaneParameter::MillauToRialtoConversionRate(ref conversion_rate) => { + RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(ref conversion_rate) => { MillauToRialtoConversionRate::set(conversion_rate) } } diff --git a/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs b/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs index 54ac8e2571..83b263975a 100644 --- a/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs +++ b/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright 2020-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/bin/runtime-common/Cargo.toml b/polkadot/bridges/bin/runtime-common/Cargo.toml index 69b618e7da..83803d06de 100644 --- a/polkadot/bridges/bin/runtime-common/Cargo.toml +++ b/polkadot/bridges/bin/runtime-common/Cargo.toml @@ -15,11 +15,11 @@ hash-db = { version = "0.15.2", default-features = false } # Bridge dependencies bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } -bp-message-lane = { path = "../../primitives/message-lane", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } bp-runtime = { path = "../../primitives/runtime", default-features = false } -pallet-bridge-call-dispatch = { path = "../../modules/call-dispatch", default-features = false } -pallet-message-lane = { path = "../../modules/message-lane", default-features = false } -pallet-substrate-bridge = { path = "../../modules/substrate", default-features = false } +pallet-bridge-dispatch = { path = "../../modules/dispatch", default-features = false } +pallet-bridge-grandpa = { path = "../../modules/grandpa", default-features = false } +pallet-bridge-messages = { path = "../../modules/messages", default-features = false } # Substrate dependencies @@ -34,14 +34,14 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default = ["std"] std = [ "bp-message-dispatch/std", - "bp-message-lane/std", + "bp-messages/std", "bp-runtime/std", "codec/std", "frame-support/std", "hash-db/std", - "pallet-bridge-call-dispatch/std", - "pallet-message-lane/std", - "pallet-substrate-bridge/std", + "pallet-bridge-dispatch/std", + "pallet-bridge-grandpa/std", + "pallet-bridge-messages/std", "sp-core/std", "sp-runtime/std", "sp-state-machine/std", @@ -50,7 +50,7 @@ std = [ ] runtime-benchmarks = [ "ed25519-dalek/u64_backend", - "pallet-message-lane/runtime-benchmarks", - "pallet-substrate-bridge/runtime-benchmarks", + "pallet-bridge-grandpa/runtime-benchmarks", + "pallet-bridge-messages/runtime-benchmarks", "sp-state-machine", ] diff --git a/polkadot/bridges/bin/runtime-common/README.md b/polkadot/bridges/bin/runtime-common/README.md index 58fe92c9ca..b375f48309 100644 --- a/polkadot/bridges/bin/runtime-common/README.md +++ b/polkadot/bridges/bin/runtime-common/README.md @@ -1,22 +1,22 @@ -# Helpers for Message Lane Module Integration +# Helpers for Messages Module Integration The [`messages`](./src/messages.rs) module of this crate contains a bunch of helpers for integrating -message lane module into your runtime. Basic prerequisites of these helpers are: +messages module into your runtime. Basic prerequisites of these helpers are: - we're going to bridge Substrate-based chain with another Substrate-based chain; -- both chains have [message lane module](../../modules/message-lane/README.md), Substrate bridge - module and the [call dispatch module](../../modules/call-dispatch/README.md); +- both chains have [messages module](../../modules/messages/README.md), Substrate bridge + module and the [call dispatch module](../../modules/dispatch/README.md); - all message lanes are identical and may be used to transfer the same messages; - the messages sent over the bridge are dispatched using - [call dispatch module](../../modules/call-dispatch/README.md); -- the messages are `pallet_bridge_call_dispatch::MessagePayload` structures, where `call` field is + [call dispatch module](../../modules/dispatch/README.md); +- the messages are `pallet_bridge_dispatch::MessagePayload` structures, where `call` field is encoded `Call` of the target chain. This means that the `Call` is opaque to the - [message lane module](../../modules/message-lane/README.md) instance at the source chain. + [messages module](../../modules/messages/README.md) instance at the source chain. It is pre-encoded by the message submitter; -- all proofs in the [message lane module](../../modules/message-lane/README.md) transactions are +- all proofs in the [messages module](../../modules/messages/README.md) transactions are based on the storage proofs from the bridged chain: storage proof of the outbound message (value - from the `pallet_message_lane::Store::MessagePayload` map), storage proof of the outbound lane - state (value from the `pallet_message_lane::Store::OutboundLanes` map) and storage proof of the - inbound lane state (value from the `pallet_message_lane::Store::InboundLanes` map); + from the `pallet_bridge_messages::Store::MessagePayload` map), storage proof of the outbound lane + state (value from the `pallet_bridge_messages::Store::OutboundLanes` map) and storage proof of the + inbound lane state (value from the `pallet_bridge_messages::Store::InboundLanes` map); - storage proofs are built at the finalized headers of the corresponding chain. So all message lane transactions with proofs are verifying storage proofs against finalized chain headers from Substrate bridge module. @@ -27,23 +27,69 @@ message lane module into your runtime. Basic prerequisites of these helpers are: ## Contents - [`MessageBridge` Trait](#messagebridge-trait) -- [`ChainWithMessageLanes` Trait ](#chainwithmessagelanes-trait) +- [`ChainWithMessages` Trait ](#ChainWithMessages-trait) - [Helpers for the Source Chain](#helpers-for-the-source-chain) - [Helpers for the Target Chain](#helpers-for-the-target-chain) ## `MessageBridge` Trait -The essence of your integration will be a struct that implements a `MessageBridge` trait. Let's -review every method and give some implementation hints here: +The essence of your integration will be a struct that implements a `MessageBridge` trait. It has +single method (`MessageBridge::bridged_balance_to_this_balance`), used to convert from bridged chain +tokens into this chain tokens. The bridge also requires two associated types to be specified - +`ThisChain` and `BridgedChain`. -- `MessageBridge::maximal_extrinsic_size_on_target_chain`: you will need to return the maximal - extrinsic size of the target chain from this function. This may be the constant that is updated - when your runtime is upgraded, or you may use the - [message lane parameters functionality](../../modules/message-lane/README.md#Non-Essential-Functionality) - to allow the pallet owner to update this value more frequently (you may also want to use this - functionality for all constants that are used in other methods described below). +Worth to say that if you're going to use hardcoded constant (conversion rate) in the +`MessageBridge::bridged_balance_to_this_balance` method (or in any other method of +`ThisChainWithMessages` or `BridgedChainWithMessages` traits), then you should take a +look at the +[messages parameters functionality](../../modules/messages/README.md#Non-Essential-Functionality). +They allow pallet owner to update constants more frequently than runtime upgrade happens. -- `MessageBridge::weight_limits_of_message_on_bridged_chain`: you'll need to return a range of +## `ChainWithMessages` Trait + +The trait is quite simple and can easily be implemented - you just need to specify types used at the +corresponding chain. There is single exception, though (it may be changed in the future): + +- `ChainWithMessages::MessagesInstance`: this is used to compute runtime storage keys. There + may be several instances of messages pallet, included in the Runtime. Every instance stores + messages and these messages stored under different keys. When we are verifying storage proofs from + the bridged chain, we should know which instance we're talking to. This is fine, but there's + significant inconvenience with that - this chain runtime must have the same messages pallet + instance. This does not necessarily mean that we should use the same instance on both chains - + this instance may be used to bridge with another chain/instance, or may not be used at all. + +## `ThisChainWithMessages` Trait + +This trait represents this chain from bridge point of view. Let's review every method of this trait: + +- `ThisChainWithMessages::is_outbound_lane_enabled`: is used to check whether given lane accepts + outbound messages. + +- `ThisChainWithMessages::maximal_pending_messages_at_outbound_lane`: you should return maximal + number of pending (undelivered) messages from this function. Returning small values would require + relayers to operate faster and could make message sending logic more complicated. On the other + hand, returning large values could lead to chain state growth. + +- `ThisChainWithMessages::estimate_delivery_confirmation_transaction`: you'll need to return + estimated size and dispatch weight of the delivery confirmation transaction (that happens on + this chain) from this function. + +- `ThisChainWithMessages::transaction_payment`: you'll need to return fee that the submitter + must pay for given transaction on this chain. Normally, you would use transaction payment pallet + for this. However, if your chain has non-zero fee multiplier set, this would mean that the + payment will be computed using current value of this multiplier. But since this transaction + will be submitted in the future, you may want to choose other value instead. Otherwise, + non-altruistic relayer may choose not to submit this transaction until number of transactions + will decrease. + +## `BridgedChainWithMessages` Trait + +This trait represents this chain from bridge point of view. Let's review every method of this trait: + +- `BridgedChainWithMessages::maximal_extrinsic_size`: you will need to return the maximal + extrinsic size of the target chain from this function. + +- `MessageBridge::message_weight_limits`: you'll need to return a range of dispatch weights that the outbound message may take at the target chain. Please keep in mind that our helpers assume that the message is an encoded call of the target chain. But we never decode this call at the source chain. So you can't simply get dispatch weight from pre-dispatch @@ -55,66 +101,13 @@ review every method and give some implementation hints here: maximal weight of extrinsic at the target chain. In our test chains, we reject all messages that have declared dispatch weight larger than 50% of the maximal bridged extrinsic weight. -- `MessageBridge::weight_of_delivery_transaction`: you will need to return the maximal weight of the - delivery transaction that delivers a given message to the target chain. There are three main - things to notice: +- `MessageBridge::estimate_delivery_transaction`: you will need to return estimated dispatch weight and + size of the delivery transaction that delivers a given message to the target chain. - 1. weight, returned from this function is then used to compute the fee that the - message sender needs to pay for the delivery transaction. So it shall not be a simple dispatch - weight of delivery call - it should be the "weight" of the transaction itself, including per-byte - "weight", "weight" of signed extras and etc. - 1. the delivery transaction brings storage proof of - the message, not the message itself. So your transaction will include extra bytes. We suggest - computing the size of single empty value storage proof at the source chain, increase this value a - bit and hardcode it in the source chain runtime code. This size then must be added to the size of - payload and included in the weight computation; - 1. before implementing this function, please take - a look at the - [weight formula of delivery transaction](../../modules/message-lane/README.md#Weight-of-receive_messages_proof-call). - It adds some extra weight for every additional byte of the proof (everything above - `pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH`), so it's not trivial. Even better, please - refer to [our implementation](../millau/runtime/src/rialto_messages.rs) for test chains for - details. - -- `MessageBridge::weight_of_delivery_confirmation_transaction_on_this_chain`: you'll need to return - the maximal weight of a single message delivery confirmation transaction on this chain. All points - from the previous paragraph are also relevant here. - -- `MessageBridge::this_weight_to_this_balance`: this function needs to convert weight units into fee - units on this chain. Most probably this can be done by calling - `pallet_transaction_payment::Config::WeightToFee::calc()` for passed weight. - -- `MessageBridge::bridged_weight_to_bridged_balance`: this function needs to convert weight units - into fee units on the target chain. The best case is when you have the same conversion formula on - both chains - then you may just call the same formula from the previous paragraph. Otherwise, - you'll need to hardcode this formula into your runtime. - -- `MessageBridge::bridged_balance_to_this_balance`: this may be the easiest method to implement and - the hardest to maintain at the same time. If you don't have any automatic methods to determine - conversion rate, then you'll probably need to maintain it by yourself (by updating conversion - rate, stored in runtime storage). This means that if you're too late with an update, then you risk - to accept messages with lower-than-expected fee. So it may be wise to have some reserve in this - conversion rate, even if that means larger delivery and dispatch fees. - -## `ChainWithMessageLanes` Trait - -Apart from its methods, `MessageBridge` also has two associated types that are implementing the -`ChainWithMessageLanes` trait. One is for this chain and the other is for the bridged chain. The -trait is quite simple and can easily be implemented - you just need to specify types used at the -corresponding chain. There are two exceptions, though. Both may be changed in the future. Here they -are: - -- `ChainWithMessageLanes::Call`: it isn't a good idea to reference bridged chain runtime from your - runtime (cyclic references + maintaining on upgrades). So you can't know the type of bridged chain - call in your runtime. This type isn't actually used at this chain, so you may use `()` instead. - -- `ChainWithMessageLanes::MessageLaneInstance`: this is used to compute runtime storage keys. There - may be several instances of message lane pallet, included in the Runtime. Every instance stores - messages and these messages stored under different keys. When we are verifying storage proofs from - the bridged chain, we should know which instance we're talking to. This is fine, but there's - significant inconvenience with that - this chain runtime must have the same message lane pallet - instance. This does not necessarily mean that we should use the same instance on both chains - - this instance may be used to bridge with another chain/instance, or may not be used at all. +- `MessageBridge::transaction_payment`: you'll need to return fee that the submitter + must pay for given transaction on bridged chain. The best case is when you have the same conversion + formula on both chains - then you may just reuse the `ThisChainWithMessages::transaction_payment` + implementation. Otherwise, you'll need to hardcode this formula into your runtime. ## Helpers for the Source Chain @@ -125,10 +118,10 @@ are: `maximal_message_size`, `verify_chain_message`, `verify_messages_delivery_p `estimate_message_dispatch_and_delivery_fee`. `FromThisChainMessagePayload` is a message that the sender sends through our bridge. It is the -`pallet_bridge_call_dispatch::MessagePayload`, where `call` field is encoded target chain call. So +`pallet_bridge_dispatch::MessagePayload`, where `call` field is encoded target chain call. So at this chain we don't see internals of this call - we just know its size. -`FromThisChainMessageVerifier` is an implementation of `bp_message_lane::LaneMessageVerifier`. It +`FromThisChainMessageVerifier` is an implementation of `bp_messages::LaneMessageVerifier`. It has following checks in its `verify_message` method: 1. it'll verify that the used outbound lane is enabled in our runtime; @@ -138,8 +131,8 @@ has following checks in its `verify_message` method: 1. it'll reject a message if it has the wrong dispatch origin declared. Like if the submitter is not the root of this chain, but it tries to dispatch the message at the target chain using - `pallet_bridge_call_dispatch::CallOrigin::SourceRoot` origin. Or he has provided wrong signature - in the `pallet_bridge_call_dispatch::CallOrigin::TargetAccount` origin; + `pallet_bridge_dispatch::CallOrigin::SourceRoot` origin. Or he has provided wrong signature + in the `pallet_bridge_dispatch::CallOrigin::TargetAccount` origin; 1. it'll reject a message if the delivery and dispatch fee that the submitter wants to pay is lesser than the fee that is computed using the `estimate_message_dispatch_and_delivery_fee` function. @@ -174,10 +167,10 @@ The helpers for the target chain reside in the `target` submodule of the `FromBridgedChainMessagePayload` corresponds to the `FromThisChainMessagePayload` at the bridged chain. We expect that messages with this payload are stored in the `OutboundMessages` storage map of -the [message lane module](../../modules/message-lane/README.md). This map is used to build +the [messages module](../../modules/messages/README.md). This map is used to build `FromBridgedChainMessagesProof`. The proof holds the lane id, range of message nonces included in the proof, storage proof of `OutboundMessages` entries and the hash of bridged chain header that has been used to build the proof. Additionally, there's storage proof may contain the proof of outbound lane state. It may be required to prune `relayers` entries at this chain (see -[message lane module documentation](../../modules/message-lane/README.md#What-about-other-Constants-in-the-Message-Lane-Module-Configuration-Trait) +[messages module documentation](../../modules/messages/README.md#What-about-other-Constants-in-the-Messages-Module-Configuration-Trait) for details). This proof is verified by the `verify_messages_proof` function. diff --git a/polkadot/bridges/bin/runtime-common/src/lib.rs b/polkadot/bridges/bin/runtime-common/src/lib.rs index 2842e3b659..ae7efb4a41 100644 --- a/polkadot/bridges/bin/runtime-common/src/lib.rs +++ b/polkadot/bridges/bin/runtime-common/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/bin/runtime-common/src/messages.rs b/polkadot/bridges/bin/runtime-common/src/messages.rs index 04b2317749..8e83c0f94a 100644 --- a/polkadot/bridges/bin/runtime-common/src/messages.rs +++ b/polkadot/bridges/bin/runtime-common/src/messages.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -21,17 +21,19 @@ //! of to elements - message lane id and message nonce. use bp_message_dispatch::MessageDispatch as _; -use bp_message_lane::{ +use bp_messages::{ source_chain::{LaneMessageVerifier, Sender}, target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages}, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, }; -use bp_runtime::{InstanceId, Size}; +use bp_runtime::{InstanceId, Size, StorageProofChecker}; use codec::{Decode, Encode}; use frame_support::{traits::Instance, weights::Weight, RuntimeDebug}; use hash_db::Hasher; -use pallet_substrate_bridge::StorageProofChecker; -use sp_runtime::traits::{CheckedAdd, CheckedDiv, CheckedMul}; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul}, + FixedPointNumber, FixedPointOperand, FixedU128, +}; use sp_std::{cmp::PartialOrd, convert::TryFrom, fmt::Debug, marker::PhantomData, ops::RangeInclusive, vec::Vec}; use sp_trie::StorageProof; @@ -44,44 +46,16 @@ pub trait MessageBridge { const RELAYER_FEE_PERCENT: u32; /// This chain in context of message bridge. - type ThisChain: ThisChainWithMessageLanes; + type ThisChain: ThisChainWithMessages; /// Bridged chain in context of message bridge. - type BridgedChain: ChainWithMessageLanes; + type BridgedChain: BridgedChainWithMessages; - /// Maximal extrinsic size on target chain. - fn maximal_extrinsic_size_on_target_chain() -> u32; - - /// Returns feasible weights range for given message payload on the target chain. - /// - /// If message is being sent with the weight that is out of this range, then it - /// should be rejected. - /// - /// Weights returned from this function shall not include transaction overhead - /// (like weight of signature and signed extensions verification), because they're - /// already accounted by the `weight_of_delivery_transaction`. So this function should - /// return pure call dispatch weights range. - fn weight_limits_of_message_on_bridged_chain( - message_payload: &[u8], - ) -> RangeInclusive>>; - - /// Maximal weight of single message delivery transaction on Bridged chain. - fn weight_of_delivery_transaction(message_payload: &[u8]) -> WeightOf>; - - /// Maximal weight of single message delivery confirmation transaction on This chain. - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> WeightOf>; - - /// Convert weight of This chain to the fee (paid in Balance) of This chain. - fn this_weight_to_this_balance(weight: WeightOf>) -> BalanceOf>; - - /// Convert weight of the Bridged chain to the fee (paid in Balance) of the Bridged chain. - fn bridged_weight_to_bridged_balance(weight: WeightOf>) -> BalanceOf>; - - /// Convert Bridged chain Balance into This chain Balance. + /// Convert Bridged chain balance into This chain balance. fn bridged_balance_to_this_balance(bridged_balance: BalanceOf>) -> BalanceOf>; } -/// Chain that has `message-lane` and `call-dispatch` modules. -pub trait ChainWithMessageLanes { +/// Chain that has `pallet-bridge-messages` and `dispatch` modules. +pub trait ChainWithMessages { /// Hash used in the chain. type Hash: Decode; /// Accound id on the chain. @@ -90,8 +64,6 @@ pub trait ChainWithMessageLanes { type Signer: Decode; /// Signature type used on the chain. type Signature: Decode; - /// Call type on the chain. - type Call: Encode + Decode; /// Type of weight that is used on the chain. This would almost always be a regular /// `frame_support::weight::Weight`. But since the meaning of weight on different chains /// may be different, the `WeightOf<>` construct is used to avoid confusion between @@ -100,58 +72,104 @@ pub trait ChainWithMessageLanes { /// Type of balances that is used on the chain. type Balance: Encode + Decode + CheckedAdd + CheckedDiv + CheckedMul + PartialOrd + From + Copy; - /// Instance of the message-lane pallet. - type MessageLaneInstance: Instance; + /// Instance of the `pallet-bridge-messages` pallet. + type MessagesInstance: Instance; } -/// This chain that has `message-lane` and `call-dispatch` modules. -pub trait ThisChainWithMessageLanes: ChainWithMessageLanes { +/// Message related transaction parameters estimation. +#[derive(RuntimeDebug)] +pub struct MessageTransaction { + /// The estimated dispatch weight of the transaction. + pub dispatch_weight: Weight, + /// The estimated size of the encoded transaction. + pub size: u32, +} + +/// This chain that has `pallet-bridge-messages` and `dispatch` modules. +pub trait ThisChainWithMessages: ChainWithMessages { + /// Call type on the chain. + type Call: Encode + Decode; + /// Are we accepting any messages to the given lane? fn is_outbound_lane_enabled(lane: &LaneId) -> bool; - /// Maximal number of pending (not yet delivered) messages at this chain. + /// Maximal number of pending (not yet delivered) messages at This chain. /// /// Any messages over this limit, will be rejected. fn maximal_pending_messages_at_outbound_lane() -> MessageNonce; + + /// Estimate size and weight of single message delivery confirmation transaction at This chain. + fn estimate_delivery_confirmation_transaction() -> MessageTransaction>; + + /// Returns minimal transaction fee that must be paid for given transaction at This chain. + fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf; +} + +/// Bridged chain that has `pallet-bridge-messages` and `dispatch` modules. +pub trait BridgedChainWithMessages: ChainWithMessages { + /// Maximal extrinsic size at Bridged chain. + fn maximal_extrinsic_size() -> u32; + + /// Returns feasible weights range for given message payload at the Bridged chain. + /// + /// If message is being sent with the weight that is out of this range, then it + /// should be rejected. + /// + /// Weights returned from this function shall not include transaction overhead + /// (like weight of signature and signed extensions verification), because they're + /// already accounted by the `weight_of_delivery_transaction`. So this function should + /// return pure call dispatch weights range. + fn message_weight_limits(message_payload: &[u8]) -> RangeInclusive; + + /// Estimate size and weight of single message delivery transaction at the Bridged chain. + fn estimate_delivery_transaction( + message_payload: &[u8], + message_dispatch_weight: WeightOf, + ) -> MessageTransaction>; + + /// Returns minimal transaction fee that must be paid for given transaction at the Bridged chain. + fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf; } pub(crate) type ThisChain = ::ThisChain; pub(crate) type BridgedChain = ::BridgedChain; -pub(crate) type HashOf = ::Hash; -pub(crate) type AccountIdOf = ::AccountId; -pub(crate) type SignerOf = ::Signer; -pub(crate) type SignatureOf = ::Signature; -pub(crate) type WeightOf = ::Weight; -pub(crate) type BalanceOf = ::Balance; -pub(crate) type CallOf = ::Call; -pub(crate) type MessageLaneInstanceOf = ::MessageLaneInstance; +pub(crate) type HashOf = ::Hash; +pub(crate) type AccountIdOf = ::AccountId; +pub(crate) type SignerOf = ::Signer; +pub(crate) type SignatureOf = ::Signature; +pub(crate) type WeightOf = ::Weight; +pub(crate) type BalanceOf = ::Balance; +pub(crate) type MessagesInstanceOf = ::MessagesInstance; + +pub(crate) type CallOf = ::Call; /// Raw storage proof type (just raw trie nodes). type RawStorageProof = Vec>; -/// Compute weight of transaction at runtime where: +/// Compute fee of transaction at runtime where regular transaction payment pallet is being used. /// -/// - transaction payment pallet is being used; -/// - fee multiplier is zero. -pub fn transaction_weight_without_multiplier( - base_weight: Weight, - payload_size: Weight, - dispatch_weight: Weight, -) -> Weight { - // non-adjustable per-byte weight is mapped 1:1 to tx weight - let per_byte_weight = payload_size; +/// The value of `multiplier` parameter is the expected value of `pallet_transaction_payment::NextFeeMultiplier` +/// at the moment when transaction is submitted. If you're charging this payment in advance (and that's what +/// happens with delivery and confirmation transaction in this crate), then there's a chance that the actual +/// fee will be larger than what is paid in advance. So the value must be chosen carefully. +pub fn transaction_payment( + base_extrinsic_weight: Weight, + per_byte_fee: Balance, + multiplier: FixedU128, + weight_to_fee: impl Fn(Weight) -> Balance, + transaction: MessageTransaction, +) -> Balance { + // base fee is charged for every tx + let base_fee = weight_to_fee(base_extrinsic_weight); - // we assume that adjustable per-byte weight is always zero - let adjusted_per_byte_weight = 0; + // non-adjustable per-byte fee + let len_fee = per_byte_fee.saturating_mul(Balance::from(transaction.size)); - // we assume that transaction tip we use is also zero - let transaction_tip_weight = 0; + // the adjustable part of the fee + let unadjusted_weight_fee = weight_to_fee(transaction.dispatch_weight); + let adjusted_weight_fee = multiplier.saturating_mul_int(unadjusted_weight_fee); - base_weight - .saturating_add(per_byte_weight) - .saturating_add(adjusted_per_byte_weight) - .saturating_add(transaction_tip_weight) - .saturating_add(dispatch_weight) + base_fee.saturating_add(len_fee).saturating_add(adjusted_weight_fee) } /// Sub-module that is declaring types required for processing This -> Bridged chain messages. @@ -162,7 +180,7 @@ pub mod source { pub type BridgedChainOpaqueCall = Vec; /// Message payload for This -> Bridged chain messages. - pub type FromThisChainMessagePayload = pallet_bridge_call_dispatch::MessagePayload< + pub type FromThisChainMessagePayload = pallet_bridge_dispatch::MessagePayload< AccountIdOf>, SignerOf>, SignatureOf>, @@ -203,7 +221,7 @@ pub mod source { /// This verifier assumes following: /// /// - all message lanes are equivalent, so all checks are the same; - /// - messages are being dispatched using `pallet-bridge-call-dispatch` pallet on the target chain. + /// - messages are being dispatched using `pallet-bridge-dispatch` pallet on the target chain. /// /// Following checks are made: /// @@ -249,8 +267,8 @@ pub mod source { } // Do the dispatch-specific check. We assume that the target chain uses - // `CallDispatch`, so we verify the message accordingly. - pallet_bridge_call_dispatch::verify_message_origin(submitter, payload).map_err(|_| BAD_ORIGIN)?; + // `Dispatch`, so we verify the message accordingly. + pallet_bridge_dispatch::verify_message_origin(submitter, payload).map_err(|_| BAD_ORIGIN)?; let minimal_fee_in_this_tokens = estimate_message_dispatch_and_delivery_fee::(payload, B::RELAYER_FEE_PERCENT)?; @@ -266,7 +284,7 @@ pub mod source { /// Return maximal message size of This -> Bridged chain message. pub fn maximal_message_size() -> u32 { - super::target::maximal_incoming_message_size(B::maximal_extrinsic_size_on_target_chain()) + super::target::maximal_incoming_message_size(BridgedChain::::maximal_extrinsic_size()) } /// Do basic Bridged-chain specific verification of This -> Bridged chain message. @@ -277,7 +295,7 @@ pub mod source { pub fn verify_chain_message( payload: &FromThisChainMessagePayload, ) -> Result<(), &'static str> { - let weight_limits = B::weight_limits_of_message_on_bridged_chain(&payload.call); + let weight_limits = BridgedChain::::message_weight_limits(&payload.call); if !weight_limits.contains(&payload.weight.into()) { return Err("Incorrect message weight declared"); } @@ -308,18 +326,17 @@ pub mod source { relayer_fee_percent: u32, ) -> Result>, &'static str> { // the fee (in Bridged tokens) of all transactions that are made on the Bridged chain - let delivery_fee = B::bridged_weight_to_bridged_balance(B::weight_of_delivery_transaction(&payload.call)); - let dispatch_fee = B::bridged_weight_to_bridged_balance(payload.weight.into()); + let delivery_transaction = + BridgedChain::::estimate_delivery_transaction(&payload.call, payload.weight.into()); + let delivery_transaction_fee = BridgedChain::::transaction_payment(delivery_transaction); // the fee (in This tokens) of all transactions that are made on This chain - let delivery_confirmation_fee = - B::this_weight_to_this_balance(B::weight_of_delivery_confirmation_transaction_on_this_chain()); + let confirmation_transaction = ThisChain::::estimate_delivery_confirmation_transaction(); + let confirmation_transaction_fee = ThisChain::::transaction_payment(confirmation_transaction); // minimal fee (in This tokens) is a sum of all required fees - let minimal_fee = delivery_fee - .checked_add(&dispatch_fee) - .map(B::bridged_balance_to_this_balance) - .and_then(|fee| fee.checked_add(&delivery_confirmation_fee)); + let minimal_fee = + B::bridged_balance_to_this_balance(delivery_transaction_fee).checked_add(&confirmation_transaction_fee); // before returning, add extra fee that is paid to the relayer (relayer interest) minimal_fee @@ -339,25 +356,24 @@ pub mod source { proof: FromBridgedChainMessagesDeliveryProof>>, ) -> Result, &'static str> where - ThisRuntime: pallet_substrate_bridge::Config, - ThisRuntime: pallet_message_lane::Config>>, - HashOf>: - Into::BridgedChain>>, + ThisRuntime: pallet_bridge_grandpa::Config, + ThisRuntime: pallet_bridge_messages::Config>>, + HashOf>: Into::BridgedChain>>, { let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane, } = proof; - pallet_substrate_bridge::Module::::parse_finalized_storage_proof( + pallet_bridge_grandpa::Pallet::::parse_finalized_storage_proof( bridged_header_hash.into(), StorageProof::new(storage_proof), |storage| { // Messages delivery proof is just proof of single storage key read => any error // is fatal. - let storage_inbound_lane_data_key = pallet_message_lane::storage_keys::inbound_lane_data_key::< + let storage_inbound_lane_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::< ThisRuntime, - MessageLaneInstanceOf>, + MessagesInstanceOf>, >(&lane); let raw_inbound_lane_data = storage .read_value(storage_inbound_lane_data_key.0.as_ref()) @@ -378,14 +394,14 @@ pub mod target { use super::*; /// Call origin for Bridged -> This chain messages. - pub type FromBridgedChainMessageCallOrigin = pallet_bridge_call_dispatch::CallOrigin< + pub type FromBridgedChainMessageCallOrigin = pallet_bridge_dispatch::CallOrigin< AccountIdOf>, SignerOf>, SignatureOf>, >; /// Decoded Bridged -> This message payload. - pub type FromBridgedChainMessagePayload = pallet_bridge_call_dispatch::MessagePayload< + pub type FromBridgedChainMessagePayload = pallet_bridge_dispatch::MessagePayload< AccountIdOf>, SignerOf>, SignatureOf>, @@ -440,19 +456,19 @@ pub mod target { /// Dispatching Bridged -> This chain messages. #[derive(RuntimeDebug, Clone, Copy)] - pub struct FromBridgedChainMessageDispatch { - _marker: PhantomData<(B, ThisRuntime, ThisCallDispatchInstance)>, + pub struct FromBridgedChainMessageDispatch { + _marker: PhantomData<(B, ThisRuntime, ThisDispatchInstance)>, } - impl - MessageDispatch< as ChainWithMessageLanes>::Balance> - for FromBridgedChainMessageDispatch + impl + MessageDispatch< as ChainWithMessages>::Balance> + for FromBridgedChainMessageDispatch where - ThisCallDispatchInstance: frame_support::traits::Instance, - ThisRuntime: pallet_bridge_call_dispatch::Config, - >::Event: - From>, - pallet_bridge_call_dispatch::Module: + ThisDispatchInstance: frame_support::traits::Instance, + ThisRuntime: pallet_bridge_dispatch::Config, + >::Event: + From>, + pallet_bridge_dispatch::Pallet: bp_message_dispatch::MessageDispatch<(LaneId, MessageNonce), Message = FromBridgedChainMessagePayload>, { type DispatchPayload = FromBridgedChainMessagePayload; @@ -465,7 +481,7 @@ pub mod target { fn dispatch(message: DispatchMessage>>) { let message_id = (message.key.lane_id, message.key.nonce); - pallet_bridge_call_dispatch::Module::::dispatch( + pallet_bridge_dispatch::Pallet::::dispatch( B::INSTANCE, message_id, message.data.payload.map_err(drop), @@ -493,16 +509,15 @@ pub mod target { messages_count: u32, ) -> Result>>>, &'static str> where - ThisRuntime: pallet_substrate_bridge::Config, - ThisRuntime: pallet_message_lane::Config>>, - HashOf>: - Into::BridgedChain>>, + ThisRuntime: pallet_bridge_grandpa::Config, + ThisRuntime: pallet_bridge_messages::Config>>, + HashOf>: Into::BridgedChain>>, { verify_messages_proof_with_parser::( proof, messages_count, |bridged_header_hash, bridged_storage_proof| { - pallet_substrate_bridge::Module::::parse_finalized_storage_proof( + pallet_bridge_grandpa::Pallet::::parse_finalized_storage_proof( bridged_header_hash.into(), StorageProof::new(bridged_storage_proof), |storage_adapter| storage_adapter, @@ -556,11 +571,11 @@ pub mod target { where H: Hasher, B: MessageBridge, - ThisRuntime: pallet_message_lane::Config>>, + ThisRuntime: pallet_bridge_messages::Config>>, { fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option> { - let storage_outbound_lane_data_key = pallet_message_lane::storage_keys::outbound_lane_data_key::< - MessageLaneInstanceOf>, + let storage_outbound_lane_data_key = pallet_bridge_messages::storage_keys::outbound_lane_data_key::< + MessagesInstanceOf>, >(lane_id); self.storage .read_value(storage_outbound_lane_data_key.0.as_ref()) @@ -568,9 +583,9 @@ pub mod target { } fn read_raw_message(&self, message_key: &MessageKey) -> Option> { - let storage_message_key = pallet_message_lane::storage_keys::message_key::< + let storage_message_key = pallet_bridge_messages::storage_keys::message_key::< ThisRuntime, - MessageLaneInstanceOf>, + MessagesInstanceOf>, >(&message_key.lane_id, message_key.nonce); self.storage.read_value(storage_message_key.0.as_ref()).ok()? } @@ -681,31 +696,6 @@ mod tests { type ThisChain = ThisChain; type BridgedChain = BridgedChain; - fn maximal_extrinsic_size_on_target_chain() -> u32 { - BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE - } - - fn weight_limits_of_message_on_bridged_chain(message_payload: &[u8]) -> RangeInclusive { - let begin = std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight); - begin..=BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT - } - - fn weight_of_delivery_transaction(_message_payload: &[u8]) -> Weight { - DELIVERY_TRANSACTION_WEIGHT - } - - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> Weight { - DELIVERY_CONFIRMATION_TRANSACTION_WEIGHT - } - - fn this_weight_to_this_balance(weight: Weight) -> ThisChainBalance { - ThisChainBalance(weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) - } - - fn bridged_weight_to_bridged_balance(weight: Weight) -> BridgedChainBalance { - BridgedChainBalance(weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) - } - fn bridged_balance_to_this_balance(bridged_balance: BridgedChainBalance) -> ThisChainBalance { ThisChainBalance(bridged_balance.0 * BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE as u32) } @@ -722,30 +712,6 @@ mod tests { type ThisChain = BridgedChain; type BridgedChain = ThisChain; - fn maximal_extrinsic_size_on_target_chain() -> u32 { - unreachable!() - } - - fn weight_limits_of_message_on_bridged_chain(_message_payload: &[u8]) -> RangeInclusive { - unreachable!() - } - - fn weight_of_delivery_transaction(_message_payload: &[u8]) -> Weight { - unreachable!() - } - - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> Weight { - unreachable!() - } - - fn this_weight_to_this_balance(_weight: Weight) -> BridgedChainBalance { - unreachable!() - } - - fn bridged_weight_to_bridged_balance(_weight: Weight) -> ThisChainBalance { - unreachable!() - } - fn bridged_balance_to_this_balance(_this_balance: ThisChainBalance) -> BridgedChainBalance { unreachable!() } @@ -840,19 +806,20 @@ mod tests { struct ThisChain; - impl ChainWithMessageLanes for ThisChain { + impl ChainWithMessages for ThisChain { type Hash = (); type AccountId = ThisChainAccountId; type Signer = ThisChainSigner; type Signature = ThisChainSignature; - type Call = ThisChainCall; type Weight = frame_support::weights::Weight; type Balance = ThisChainBalance; - type MessageLaneInstance = pallet_message_lane::DefaultInstance; + type MessagesInstance = pallet_bridge_messages::DefaultInstance; } - impl ThisChainWithMessageLanes for ThisChain { + impl ThisChainWithMessages for ThisChain { + type Call = ThisChainCall; + fn is_outbound_lane_enabled(lane: &LaneId) -> bool { lane == TEST_LANE_ID } @@ -860,23 +827,56 @@ mod tests { fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE } + + fn estimate_delivery_confirmation_transaction() -> MessageTransaction> { + MessageTransaction { + dispatch_weight: DELIVERY_CONFIRMATION_TRANSACTION_WEIGHT, + size: 0, + } + } + + fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf { + ThisChainBalance(transaction.dispatch_weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) + } + } + + impl BridgedChainWithMessages for ThisChain { + fn maximal_extrinsic_size() -> u32 { + unreachable!() + } + + fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { + unreachable!() + } + + fn estimate_delivery_transaction( + _message_payload: &[u8], + _message_dispatch_weight: WeightOf, + ) -> MessageTransaction> { + unreachable!() + } + + fn transaction_payment(_transaction: MessageTransaction>) -> BalanceOf { + unreachable!() + } } struct BridgedChain; - impl ChainWithMessageLanes for BridgedChain { + impl ChainWithMessages for BridgedChain { type Hash = (); type AccountId = BridgedChainAccountId; type Signer = BridgedChainSigner; type Signature = BridgedChainSignature; - type Call = BridgedChainCall; type Weight = frame_support::weights::Weight; type Balance = BridgedChainBalance; - type MessageLaneInstance = pallet_message_lane::DefaultInstance; + type MessagesInstance = pallet_bridge_messages::DefaultInstance; } - impl ThisChainWithMessageLanes for BridgedChain { + impl ThisChainWithMessages for BridgedChain { + type Call = BridgedChainCall; + fn is_outbound_lane_enabled(_lane: &LaneId) -> bool { unreachable!() } @@ -884,6 +884,39 @@ mod tests { fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { unreachable!() } + + fn estimate_delivery_confirmation_transaction() -> MessageTransaction> { + unreachable!() + } + + fn transaction_payment(_transaction: MessageTransaction>) -> BalanceOf { + unreachable!() + } + } + + impl BridgedChainWithMessages for BridgedChain { + fn maximal_extrinsic_size() -> u32 { + BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE + } + + fn message_weight_limits(message_payload: &[u8]) -> RangeInclusive { + let begin = std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight); + begin..=BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + } + + fn estimate_delivery_transaction( + _message_payload: &[u8], + message_dispatch_weight: WeightOf, + ) -> MessageTransaction> { + MessageTransaction { + dispatch_weight: DELIVERY_TRANSACTION_WEIGHT + message_dispatch_weight, + size: 0, + } + } + + fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf { + BridgedChainBalance(transaction.dispatch_weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) + } } fn test_lane_outbound_data() -> OutboundLaneData { @@ -896,7 +929,7 @@ mod tests { let message_on_bridged_chain = source::FromThisChainMessagePayload:: { spec_version: 1, weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, call: ThisChainCall::Transfer.encode(), } .encode(); @@ -910,7 +943,7 @@ mod tests { target::FromBridgedChainMessagePayload:: { spec_version: 1, weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, call: target::FromBridgedChainEncodedMessageCall:: { encoded_call: ThisChainCall::Transfer.encode(), _marker: PhantomData::default(), @@ -927,7 +960,7 @@ mod tests { source::FromThisChainMessagePayload:: { spec_version: 1, weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, call: vec![42], } } @@ -977,7 +1010,7 @@ mod tests { let payload = source::FromThisChainMessagePayload:: { spec_version: 1, weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, call: vec![42], }; @@ -1020,7 +1053,7 @@ mod tests { let payload = source::FromThisChainMessagePayload:: { spec_version: 1, weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceAccount(ThisChainAccountId(1)), + origin: pallet_bridge_dispatch::CallOrigin::SourceAccount(ThisChainAccountId(1)), call: vec![42], }; @@ -1087,7 +1120,7 @@ mod tests { > { spec_version: 1, weight: 5, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, call: vec![1, 2, 3, 4, 5, 6], },) .is_err() @@ -1102,7 +1135,7 @@ mod tests { > { spec_version: 1, weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + 1, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, call: vec![1, 2, 3, 4, 5, 6], },) .is_err() @@ -1117,7 +1150,7 @@ mod tests { > { spec_version: 1, weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, call: vec![0; source::maximal_message_size::() as usize + 1], },) .is_err() @@ -1132,7 +1165,7 @@ mod tests { > { spec_version: 1, weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, call: vec![0; source::maximal_message_size::() as _], },), Ok(()), @@ -1371,4 +1404,38 @@ mod tests { Err(target::MessageProofError::MessagesCountMismatch), ); } + + #[test] + fn transaction_payment_works_with_zero_multiplier() { + assert_eq!( + transaction_payment( + 100, + 10, + FixedU128::zero(), + |weight| weight, + MessageTransaction { + size: 50, + dispatch_weight: 777 + }, + ), + 100 + 50 * 10, + ); + } + + #[test] + fn transaction_payment_works_with_non_zero_multiplier() { + assert_eq!( + transaction_payment( + 100, + 10, + FixedU128::one(), + |weight| weight, + MessageTransaction { + size: 50, + dispatch_weight: 777 + }, + ), + 100 + 50 * 10 + 777, + ); + } } diff --git a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs b/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs index 4aa2abbd6b..639e5f6c50 100644 --- a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ b/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Everything required to run benchmarks of message-lanes, based on +//! Everything required to run benchmarks of messages module, based on //! `bridge_runtime_common::messages` implementation. #![cfg(feature = "runtime-benchmarks")] @@ -24,11 +24,11 @@ use crate::messages::{ BridgedChain, HashOf, MessageBridge, ThisChain, }; -use bp_message_lane::{LaneId, MessageData, MessageKey, MessagePayload}; +use bp_messages::{LaneId, MessageData, MessageKey, MessagePayload}; use codec::Encode; use ed25519_dalek::{PublicKey, SecretKey, Signer, KEYPAIR_LENGTH, SECRET_KEY_LENGTH}; use frame_support::weights::Weight; -use pallet_message_lane::benchmarking::{MessageDeliveryProofParams, MessageProofParams, ProofSize}; +use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams, ProofSize}; use sp_core::Hasher; use sp_runtime::traits::Header; use sp_std::prelude::*; @@ -62,7 +62,7 @@ pub fn ed25519_sign(target_call: &impl Encode, source_account_id: &impl Encode) } /// Prepare proof of messages for the `receive_messages_proof` call. -pub fn prepare_message_proof( +pub fn prepare_message_proof( params: MessageProofParams, make_bridged_message_storage_key: MM, make_bridged_outbound_lane_data_key: ML, @@ -73,7 +73,8 @@ pub fn prepare_message_proof( where B: MessageBridge, H: Hasher, - R: pallet_substrate_bridge::Config, + R: pallet_bridge_grandpa::Config, + FI: 'static, ::Hash: Into>>, MM: Fn(MessageKey) -> Vec, ML: Fn(LaneId) -> Vec, @@ -129,7 +130,7 @@ where // prepare Bridged chain header and insert it into the Substrate pallet let bridged_header = make_bridged_header(root); let bridged_header_hash = bridged_header.hash(); - pallet_substrate_bridge::initialize_for_benchmarks::(bridged_header); + pallet_bridge_grandpa::initialize_for_benchmarks::(bridged_header); ( FromBridgedChainMessagesProof { @@ -146,7 +147,7 @@ where } /// Prepare proof of messages delivery for the `receive_messages_delivery_proof` call. -pub fn prepare_message_delivery_proof( +pub fn prepare_message_delivery_proof( params: MessageDeliveryProofParams>>, make_bridged_inbound_lane_data_key: ML, make_bridged_header: MH, @@ -154,7 +155,8 @@ pub fn prepare_message_delivery_proof( where B: MessageBridge, H: Hasher, - R: pallet_substrate_bridge::Config, + R: pallet_bridge_grandpa::Config, + FI: 'static, ::Hash: Into>>, ML: Fn(LaneId) -> Vec, MH: Fn(H::Out) -> ::Header, @@ -181,7 +183,7 @@ where // prepare Bridged chain header and insert it into the Substrate pallet let bridged_header = make_bridged_header(root); let bridged_header_hash = bridged_header.hash(); - pallet_substrate_bridge::initialize_for_benchmarks::(bridged_header); + pallet_bridge_grandpa::initialize_for_benchmarks::(bridged_header); FromBridgedChainMessagesDeliveryProof { bridged_header_hash: bridged_header_hash.into(), diff --git a/polkadot/bridges/deny.toml b/polkadot/bridges/deny.toml index 2e384622f5..7f91bce7c9 100644 --- a/polkadot/bridges/deny.toml +++ b/polkadot/bridges/deny.toml @@ -48,14 +48,14 @@ notice = "warn" # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. ignore = [ + # generic-array lifetime errasure. If all upstream crates upgrade to >=0.14.0 + # we can remove this. + "RUSTSEC-2020-0146", # yaml-rust < clap. Not feasible to upgrade and also not possible to trigger in practice. "RUSTSEC-2018-0006", # We need to wait until Substrate updates their `wasmtime` dependency to fix this. # TODO: See issue #676: https://github.com/paritytech/parity-bridges-common/issues/676 "RUSTSEC-2021-0013", - # We need to wait until Substrate updates their `libp2p` dependency to fix this. - # TODO: See issue #681: https://github.com/paritytech/parity-bridges-common/issues/681 - "RUSTSEC-2020-0123", # We need to wait until Substrate updates their `hyper` dependency to fix this. # TODO: See issue #710: https://github.com/paritytech/parity-bridges-common/issues/681 "RUSTSEC-2021-0020", diff --git a/polkadot/bridges/deployments/BridgeDeps.Dockerfile b/polkadot/bridges/deployments/BridgeDeps.Dockerfile index af0f7816e6..a18a94a715 100644 --- a/polkadot/bridges/deployments/BridgeDeps.Dockerfile +++ b/polkadot/bridges/deployments/BridgeDeps.Dockerfile @@ -2,22 +2,23 @@ # # This image is meant to be used as a building block when building images for # the various components in the bridge repo, such as nodes and relayers. -FROM ubuntu:xenial +FROM ubuntu:20.04 -ENV LAST_DEPS_UPDATE 2020-12-21 +ENV LAST_DEPS_UPDATE 2021-04-01 +ENV DEBIAN_FRONTEND=noninteractive RUN set -eux; \ apt-get update && \ apt-get install -y curl ca-certificates && \ apt-get install -y cmake pkg-config libssl-dev git clang libclang-dev -ENV LAST_CERTS_UPDATE 2020-12-21 +ENV LAST_CERTS_UPDATE 2021-04-01 RUN update-ca-certificates && \ curl https://sh.rustup.rs -sSf | sh -s -- -y ENV PATH="/root/.cargo/bin:${PATH}" -ENV LAST_RUST_UPDATE 2020-12-21 +ENV LAST_RUST_UPDATE 2021-04-01 RUN rustup update stable && \ rustup install nightly && \ @@ -26,7 +27,6 @@ RUN rustup update stable && \ RUN rustc -vV && \ cargo -V && \ gcc -v && \ - g++ -v && \ cmake --version ENV RUST_BACKTRACE 1 diff --git a/polkadot/bridges/deployments/README.md b/polkadot/bridges/deployments/README.md index 857c8c28e0..d553fca611 100644 --- a/polkadot/bridges/deployments/README.md +++ b/polkadot/bridges/deployments/README.md @@ -93,12 +93,12 @@ seeds for the `sr25519` keys. This seed may also be used in the signer argument and PoA relays. Example: ```bash -./substrate-relay relay-headers rialto-to-millau \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-signer //Harry \ +./substrate-relay relay-headers RialtoToMillau \ + --source-host rialto-node-alice \ + --source-port 9944 \ + --target-host millau-node-alice \ + --target-port 9944 \ + --source-signer //Harry \ --prometheus-host=0.0.0.0 ``` @@ -114,14 +114,18 @@ Following accounts are used when `poa-rialto` bridge is running: Following accounts are used when `rialto-millau` bridge is running: -- Millau's `Charlie` signs relay transactions with new Rialto headers; -- Rialto's `Charlie` signs relay transactions with new Millau headers; +- Millau's `Charlie` signs complex headers+messages relay transactions on Millau chain; +- Rialto's `Charlie` signs complex headers+messages relay transactions on Rialto chain; - Millau's `Dave` signs Millau transactions which contain messages for Rialto; - Rialto's `Dave` signs Rialto transactions which contain messages for Millau; -- Millau's `Eve` signs relay transactions with message delivery confirmations from Rialto to Millau; -- Rialto's `Eve` signs relay transactions with messages from Millau to Rialto; -- Millau's `Ferdie` signs relay transactions with messages from Rialto to Millau; -- Rialto's `Ferdie` signs relay transactions with message delivery confirmations from Millau to Rialto. +- Millau's `Eve` signs relay transactions with message delivery confirmations (lane 00000001) from Rialto to Millau; +- Rialto's `Eve` signs relay transactions with messages (lane 00000001) from Millau to Rialto; +- Millau's `Ferdie` signs relay transactions with messages (lane 00000001) from Rialto to Millau; +- Rialto's `Ferdie` signs relay transactions with message delivery confirmations (lane 00000001) from Millau to Rialto. + +Following accounts are used when `westend-millau` bridge is running: + +- Millau's `George` signs relay transactions with new Westend headers. ### Docker Usage When the network is running you can query logs from individual nodes using: diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-exchange-dashboard.json b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json similarity index 100% rename from polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-exchange-dashboard.json rename to polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-headers-dashboard.json b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json similarity index 99% rename from polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-headers-dashboard.json rename to polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json index 36c2ab9469..05d06e9498 100644 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-headers-dashboard.json +++ b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json @@ -239,7 +239,7 @@ { "expr": "max_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])", "interval": "", - "legendFormat": "Number of Ethereum PoA Headers Synced on Rialto", + "legendFormat": "Number of new Headers on Ethereum PoA (Last 2 Mins)", "refId": "A" } ], diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-rialto-to-poa-headers-dashboard.json b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json similarity index 99% rename from polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-rialto-to-poa-headers-dashboard.json rename to polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json index cac19b3fde..149c637fcb 100644 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-rialto-to-poa-headers-dashboard.json +++ b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json @@ -239,7 +239,7 @@ { "expr": "max_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])", "interval": "", - "legendFormat": "Number of Rialto Headers Synced on Ethereum PoA", + "legendFormat": "Number of new Headers on Rialto (Last 2 Mins)", "refId": "A" } ], diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/prometheus.yml b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/prometheus.yml deleted file mode 100644 index 8d8e3ae877..0000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/prometheus.yml +++ /dev/null @@ -1,24 +0,0 @@ -scrape_configs: - # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'poa_to_rialto_relay_node' - - # Override the global default and scrape targets from this job every 15 seconds. - scrape_interval: 15s - static_configs: - - targets: ['relay-headers-poa-to-rialto:9616'] - - # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'poa_exchange_rialto_relay_node' - - # Override the global default and scrape targets from this job every 15 seconds. - scrape_interval: 15s - static_configs: - - targets: ['relay-poa-exchange-rialto:9616'] - - # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'rialto_to_poa_relay_node' - - # Override the global default and scrape targets from this job every 15 seconds. - scrape_interval: 15s - static_configs: - - targets: ['relay-headers-rialto-to-poa:9616'] diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml new file mode 100644 index 0000000000..b0038008ef --- /dev/null +++ b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml @@ -0,0 +1,4 @@ +- targets: + - relay-headers-poa-to-rialto:9616 + - relay-poa-exchange-rialto:9616 + - relay-headers-rialto-to-poa:9616 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml b/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml index 13faa09839..6bdcb23012 100644 --- a/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml +++ b/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml @@ -1,5 +1,7 @@ # This Compose file should be built using the Rialto and Eth-PoA node # compose files. Otherwise it won't work. +# +# Exposed ports: 9616, 9716, 9816, 9916, 8080 version: '3.5' services: @@ -79,12 +81,12 @@ services: # Note: These are being overridden from the top level `monitoring` compose file. prometheus-metrics: volumes: - - ./bridges/poa-rialto/dashboard/prometheus/:/etc/prometheus/ + - ./bridges/poa-rialto/dashboard/prometheus/targets.yml:/etc/prometheus/targets-poa-rialto.yml depends_on: *all-nodes grafana-dashboard: volumes: - - ./bridges/poa-rialto/dashboard/grafana/provisioning/:/etc/grafana/provisioning/ + - ./bridges/poa-rialto/dashboard/grafana:/etc/grafana/dashboards/poa-rialto:ro environment: VIRTUAL_HOST: dashboard.rialto.bridges.test-installations.parity.io,grafana.rialto.brucke.link VIRTUAL_PORT: 3000 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh index 04c9292b21..9af373b021 100755 --- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh @@ -12,8 +12,8 @@ set -eu RELAY_BINARY_PATH=${RELAY_BINARY_PATH:-./ethereum-poa-relay} # Ethereum node host ETH_HOST=${ETH_HOST:-poa-node-arthur} -# Ethereum node port -ETH_PORT=${ETH_PORT:-8545} +# Ethereum node websocket port +ETH_PORT=${ETH_PORT:-8546} # Ethereum chain id ETH_CHAIN_ID=${ETH_CHAIN_ID:-105} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml deleted file mode 100644 index d671bfb224..0000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- name: 'default' - orgId: 1 - folder: '' - type: file - options: - path: '/etc/grafana/provisioning/dashboards' \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-headers-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-headers-dashboard.json deleted file mode 100644 index 2dc4f8a418..0000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-headers-dashboard.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 5 (Millau to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Millau_to_Rialto_Sync_best_block_numbers{node=\"source\"}) - max(Millau_to_Rialto_Sync_best_block_numbers{node=\"target\"})", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "3m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Millau to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Millau_to_Rialto_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Millau_to_Rialto_Sync_best_block_numbers{node=\"source\"}[2m])", - "interval": "", - "legendFormat": "Number of Millau Headers Synced on Rialto", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Rialto (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Millau_to_Rialto_Sync_best_block_numbers", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Header on {{node}} Node", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_Sync_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(Millau_to_Rialto_Sync_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 4, - "options": { - "displayMode": "gradient", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Millau_to_Rialto_Sync_blocks_in_state", - "instant": true, - "interval": "", - "legendFormat": "{{state}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Queued Headers in Relay", - "type": "bargauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_Sync_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Millau to Rialto Header Sync Dashboard", - "uid": "relay-millau-to-rialto-headers", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-messages-dashboard.json deleted file mode 100644 index 69c07f8715..0000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-messages-dashboard.json +++ /dev/null @@ -1,1137 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_best_block_numbers{type=~\"target|target_at_source\"}, \"type\", \"At Rialto\", \"type\", \"target\"), \"type\", \"At Millau\", \"type\", \"target_at_source\")", - "instant": false, - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Rialto headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_best_block_numbers{type=~\"source|source_at_target\"}, \"type\", \"At Millau\", \"type\", \"source\"), \"type\", \"At Rialto\", \"type\", \"source_at_target\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Millau headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages generated at Millau are not detected by relay", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 9 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", - "hide": true, - "interval": "", - "legendFormat": "Messages generated in last 5 minutes", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "sum" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages from Millau to Rialto are not being delivered", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 20 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "Undelivered messages at Rialto", - "refId": "A" - }, - { - "expr": "increase(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m])", - "interval": "", - "legendFormat": "Messages delivered to Rialto in last 1m", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Too many unconfirmed messages", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 20 - }, - "hiddenSeries": false, - "id": 12, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed messages at Millau", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Rewards are not being confirmed", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 20 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Rialto", - "refId": "A" - }, - { - "expr": "(scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Rialto (zero if messages are not being delivered to Rialto)", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Reward lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 27 - }, - "id": 16, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.0.6", - "targets": [ - { - "expr": "avg_over_time(Millau_to_Rialto_MessageLane_00000000_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay process CPU usage (1 CPU = 100)", - "type": "gauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 27 - }, - "hiddenSeries": false, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_MessageLane_00000000_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "System load average", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 16, - "y": 27 - }, - "hiddenSeries": false, - "id": 20, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory used by relay process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 25, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Millau to Rialto Message Sync Dashboard", - "uid": "relay-millau-to-rialto-messages", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-messages-dashboard.json deleted file mode 100644 index 138d1f7f2c..0000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-messages-dashboard.json +++ /dev/null @@ -1,1137 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_best_block_numbers{type=~\"target|target_at_source\"}, \"type\", \"At Millau\", \"type\", \"target\"), \"type\", \"At Rialto\", \"type\", \"target_at_source\")", - "instant": false, - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Millau headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_best_block_numbers{type=~\"source|source_at_target\"}, \"type\", \"At Rialto\", \"type\", \"source\"), \"type\", \"At Millau\", \"type\", \"source_at_target\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Rialto headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages generated at Rialto are not detected by relay", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 9 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Millau\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", - "hide": true, - "interval": "", - "legendFormat": "Messages generated in last 5 minutes", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Millau to Rialto\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "sum" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages from Rialto to Millau are not being delivered", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 20 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "Undelivered messages at Millau", - "refId": "A" - }, - { - "expr": "increase(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m])", - "interval": "", - "legendFormat": "Messages delivered to Millau in last 1m", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Too many unconfirmed messages", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 20 - }, - "hiddenSeries": false, - "id": 12, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed messages at Rialto", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Rewards are not being confirmed", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 20 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Millau", - "refId": "A" - }, - { - "expr": "(scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Millau (zero if messages are not being delivered to Millau)", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Reward lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 27 - }, - "id": 16, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.0.6", - "targets": [ - { - "expr": "avg_over_time(Rialto_to_Millau_MessageLane_00000000_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay process CPU usage (1 CPU = 100)", - "type": "gauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 27 - }, - "hiddenSeries": false, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Rialto_to_Millau_MessageLane_00000000_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "System load average", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 16, - "y": 27 - }, - "hiddenSeries": false, - "id": 20, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Rialto_to_Millau_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory used by relay process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 25, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Rialto to Millau Message Sync Dashboard", - "uid": "relay-rialto-to-millau-messages", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml deleted file mode 100644 index b85cf06e2b..0000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# list of datasources to insert/update depending -# whats available in the database -datasources: - # name of the datasource. Required -- name: Prometheus - # datasource type. Required - type: prometheus - # access mode. direct or proxy. Required - access: proxy - # org id. will default to orgId 1 if not specified - orgId: 1 - # url - url: http://prometheus-metrics:9090 - # mark as default datasource. Max one per org - isDefault: true - version: 1 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml deleted file mode 100644 index 4eb6ea3863..0000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml +++ /dev/null @@ -1,15 +0,0 @@ -notifiers: - - name: Matrix - type: webhook - uid: notifier1 - is_default: true - send_reminder: true - frequency: 1h - disable_resolve_message: false - settings: - url: http://grafana-matrix-notifier:4567/hook?rule=bridge - http_method: POST - -delete_notifiers: - - name: Matrix - uid: notifier1 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json new file mode 100644 index 0000000000..69396162bb --- /dev/null +++ b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json @@ -0,0 +1,1429 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 3, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_best_block_numbers{type=~\"target|target_at_source\"}, \"type\", \"At Rialto\", \"type\", \"target\"), \"type\", \"At Millau\", \"type\", \"target_at_source\")", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Best finalized Rialto headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_best_block_numbers{type=~\"source|source_at_target\"}, \"type\", \"At Millau\", \"type\", \"source\"), \"type\", \"At Rialto\", \"type\", \"source_at_target\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Best finalized Millau headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages generated at Millau are not detected by relay", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", + "hide": true, + "interval": "", + "legendFormat": "Messages generated in last 5 minutes", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 9 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "sum" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages from Millau to Rialto are not being delivered", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 20 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "Undelivered messages at Rialto", + "refId": "A" + }, + { + "expr": "increase(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m])", + "interval": "", + "legendFormat": "Messages delivered to Rialto in last 1m", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Too many unconfirmed messages", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 20 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed messages at Millau", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Rewards are not being confirmed", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 20 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed rewards at Rialto", + "refId": "A" + }, + { + "expr": "(scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed rewards at Rialto (zero if messages are not being delivered to Rialto)", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Reward lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages (00000001) from Millau to Rialto are not being delivered", + "noDataState": "alerting", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 27 + }, + "hiddenSeries": false, + "id": 21, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=\"target_latest_received\"}[10m])", + "hide": true, + "interval": "", + "legendFormat": "Messages generated in last 5 minutes", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race (00000001)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages (00000001) from Millau to Rialto are not being confirmed", + "noDataState": "alerting", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 27 + }, + "hiddenSeries": false, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=\"source_latest_confirmed\"}[10m])", + "hide": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race (00000001)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 38 + }, + "id": 16, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "avg_over_time(Millau_to_Rialto_MessageLane_00000000_process_cpu_usage_percentage[1m])", + "instant": true, + "interval": "", + "legendFormat": "1 CPU = 100", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Relay process CPU usage (1 CPU = 100)", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 38 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Millau_to_Rialto_MessageLane_00000000_system_average_load", + "interval": "", + "legendFormat": "Average system load in last {{over}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "System load average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 38 + }, + "hiddenSeries": false, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Millau_to_Rialto_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", + "interval": "", + "legendFormat": "Process memory, MB", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory used by relay process", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Millau to Rialto Message Sync Dashboard", + "uid": "relay-millau-to-rialto-messages", + "version": 2 +} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json new file mode 100644 index 0000000000..29691e0a06 --- /dev/null +++ b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json @@ -0,0 +1,1420 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 4, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_best_block_numbers{type=~\"target|target_at_source\"}, \"type\", \"At Millau\", \"type\", \"target\"), \"type\", \"At Rialto\", \"type\", \"target_at_source\")", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Best finalized Millau headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_best_block_numbers{type=~\"source|source_at_target\"}, \"type\", \"At Rialto\", \"type\", \"source\"), \"type\", \"At Millau\", \"type\", \"source_at_target\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Best finalized Rialto headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages generated at Rialto are not detected by relay", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Millau\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", + "hide": true, + "interval": "", + "legendFormat": "Messages generated in last 5 minutes", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 9 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Millau to Rialto\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "sum" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages from Rialto to Millau are not being delivered", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 20 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "Undelivered messages at Millau", + "refId": "A" + }, + { + "expr": "increase(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m])", + "interval": "", + "legendFormat": "Messages delivered to Millau in last 1m", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Too many unconfirmed messages", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 20 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed messages at Rialto", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Rewards are not being confirmed", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 20 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed rewards at Millau", + "refId": "A" + }, + { + "expr": "(scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed rewards at Millau (zero if messages are not being delivered to Millau)", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Reward lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages (00000001) from Rialto to Millau are not being delivered", + "noDataState": "alerting", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 27 + }, + "hiddenSeries": false, + "id": 21, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Millau\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=\"target_latest_received\"}[10m])", + "hide": true, + "interval": "", + "legendFormat": "Messages generated in last 5 minutes", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race (00000001)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages (00000001) from Rialto to Millau are not being confirmed", + "noDataState": "alerting", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 27 + }, + "hiddenSeries": false, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Millau to Rialto\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=\"source_latest_confirmed\"}[10m])", + "hide": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race (00000001)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 38 + }, + "id": 16, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "avg_over_time(Rialto_to_Millau_MessageLane_00000000_process_cpu_usage_percentage[1m])", + "instant": true, + "interval": "", + "legendFormat": "1 CPU = 100", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Relay process CPU usage (1 CPU = 100)", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 38 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Rialto_to_Millau_MessageLane_00000000_system_average_load", + "interval": "", + "legendFormat": "Average system load in last {{over}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "System load average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 38 + }, + "hiddenSeries": false, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Rialto_to_Millau_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", + "interval": "", + "legendFormat": "Process memory, MB", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory used by relay process", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rialto to Millau Message Sync Dashboard", + "uid": "relay-rialto-to-millau-messages", + "version": 2 +} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json new file mode 100644 index 0000000000..61ff281cc2 --- /dev/null +++ b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json @@ -0,0 +1,454 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 9, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "Rialto_to_Millau_MessageLane_00000000_rialto_storage_proof_overhead", + "interval": "", + "legendFormat": "Actual overhead", + "refId": "A" + }, + { + "exemplar": true, + "expr": "1024", + "hide": false, + "interval": "", + "legendFormat": "At runtime (hardcoded)", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rialto: storage proof overhead", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:111", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:112", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "Westend_to_Millau_Sync_kusama_to_base_conversion_rate / Westend_to_Millau_Sync_polkadot_to_base_conversion_rate", + "interval": "", + "legendFormat": "Outside of runtime (actually Polkadot -> Kusama)", + "refId": "A" + }, + { + "exemplar": true, + "expr": "Rialto_to_Millau_MessageLane_00000000_rialto_millau_to_rialto_conversion_rate", + "hide": false, + "interval": "", + "legendFormat": "At runtime", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rialto: Millau -> Rialto conversion rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:49", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:50", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "Millau_to_Rialto_MessageLane_00000000_millau_storage_proof_overhead", + "interval": "", + "legendFormat": "Actual overhead", + "refId": "A" + }, + { + "exemplar": true, + "expr": "1024", + "hide": false, + "interval": "", + "legendFormat": "At runtime (hardcoded)", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Millau: storage proof overhead", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:111", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:112", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "Westend_to_Millau_Sync_polkadot_to_base_conversion_rate / Westend_to_Millau_Sync_kusama_to_base_conversion_rate", + "interval": "", + "legendFormat": "Outside of runtime (actually Kusama -> Polkadot)", + "refId": "A" + }, + { + "exemplar": true, + "expr": "Millau_to_Rialto_MessageLane_00000000_millau_rialto_to_millau_conversion_rate", + "hide": false, + "interval": "", + "legendFormat": "At runtime", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Millau: Rialto -> Millau conversion rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:49", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:50", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "10s", + "schemaVersion": 27, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Rialto+Millau maintenance dashboard", + "uid": "7AuyrjlMz", + "version": 2 +} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/prometheus.yml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/prometheus.yml deleted file mode 100644 index 763eaf35b3..0000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/prometheus.yml +++ /dev/null @@ -1,23 +0,0 @@ -scrape_configs: - # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'millau_to_rialto_headers_relay_node' - - # Override the global default and scrape targets from this job every 15 seconds. - scrape_interval: 15s - static_configs: - - targets: ['relay-headers-millau-to-rialto:9616'] - - - job_name: 'rialto_to_millau_headers_relay_node' - scrape_interval: 15s - static_configs: - - targets: ['relay-headers-rialto-to-millau:9616'] - - - job_name: 'millau_to_rialto_messages_relay_node' - scrape_interval: 15s - static_configs: - - targets: ['relay-messages-millau-to-rialto:9616'] - - - job_name: 'rialto_to_millau_messages_relay_node' - scrape_interval: 15s - static_configs: - - targets: ['relay-messages-rialto-to-millau:9616'] diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml new file mode 100644 index 0000000000..16b798b5a2 --- /dev/null +++ b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml @@ -0,0 +1,4 @@ +- targets: + - relay-millau-rialto:9616 + - relay-messages-millau-to-rialto-lane-00000001:9616 + - relay-messages-rialto-to-millau-lane-00000001:9616 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml b/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml index d7a360a2b6..5f00e449c3 100644 --- a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml +++ b/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml @@ -1,3 +1,5 @@ +# Exposed ports: 10016, 10116, 10216, 10316, 10416 + version: '3.5' services: # We provide overrides for these particular nodes since they are public facing @@ -16,15 +18,15 @@ services: LETSENCRYPT_HOST: wss.millau.brucke.link LETSENCRYPT_EMAIL: admin@parity.io - relay-headers-millau-to-rialto: &sub-bridge-relay + relay-millau-rialto: &sub-bridge-relay image: paritytech/substrate-relay - entrypoint: /entrypoints/relay-headers-millau-to-rialto-entrypoint.sh + entrypoint: /entrypoints/relay-millau-rialto-entrypoint.sh volumes: - ./bridges/rialto-millau/entrypoints:/entrypoints environment: RUST_LOG: rpc=trace,bridge=trace ports: - - "9616:9616" + - "10016:9616" depends_on: &all-nodes - millau-node-alice - millau-node-bob @@ -37,49 +39,45 @@ services: - rialto-node-dave - rialto-node-eve - relay-headers-rialto-to-millau: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-headers-rialto-to-millau-entrypoint.sh - ports: - - "9716:9616" - - relay-messages-millau-to-rialto: + relay-messages-millau-to-rialto-lane-00000001: <<: *sub-bridge-relay + environment: + MSG_EXCHANGE_GEN_LANE: "00000001" entrypoint: /entrypoints/relay-messages-millau-to-rialto-entrypoint.sh - environment: - RUST_LOG: rpc=trace,bridge=trace,jsonrpsee=trace,soketto=trace - ports: - - "9816:9616" - depends_on: - - relay-headers-millau-to-rialto - - relay-headers-rialto-to-millau - - relay-messages-millau-to-rialto-generator: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-messages-to-rialto-generator-entrypoint.sh - environment: - RUST_LOG: rpc=trace,bridge=trace,jsonrpsee=trace,soketto=trace - ports: - - "9916:9616" - depends_on: - - relay-messages-millau-to-rialto - - relay-messages-rialto-to-millau: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-messages-rialto-to-millau-entrypoint.sh - ports: - - "10016:9616" - depends_on: - - relay-headers-millau-to-rialto - - relay-headers-rialto-to-millau - - relay-messages-rialto-to-millau-generator: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-messages-to-millau-generator-entrypoint.sh ports: - "10116:9616" depends_on: - - relay-messages-rialto-to-millau + - relay-millau-rialto + + relay-messages-millau-to-rialto-generator: + <<: *sub-bridge-relay + environment: + MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001" + entrypoint: /entrypoints/relay-messages-to-rialto-generator-entrypoint.sh + ports: + - "10216:9616" + depends_on: + - relay-millau-rialto + + relay-messages-rialto-to-millau-lane-00000001: + <<: *sub-bridge-relay + environment: + MSG_EXCHANGE_GEN_LANE: "00000001" + entrypoint: /entrypoints/relay-messages-rialto-to-millau-entrypoint.sh + ports: + - "10316:9616" + depends_on: + - relay-millau-rialto + + relay-messages-rialto-to-millau-generator: + <<: *sub-bridge-relay + environment: + MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001" + entrypoint: /entrypoints/relay-messages-to-millau-generator-entrypoint.sh + ports: + - "10416:9616" + depends_on: + - relay-millau-rialto # Note: These are being overridden from the top level `monitoring` compose file. grafana-dashboard: @@ -89,9 +87,9 @@ services: LETSENCRYPT_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link LETSENCRYPT_EMAIL: admin@parity.io volumes: - - ./bridges/rialto-millau/dashboard/grafana/provisioning/:/etc/grafana/provisioning/ + - ./bridges/rialto-millau/dashboard/grafana:/etc/grafana/dashboards/rialto-millau:ro prometheus-metrics: volumes: - - ./bridges/rialto-millau/dashboard/prometheus/:/etc/prometheus/ + - ./bridges/rialto-millau/dashboard/prometheus/targets.yml:/etc/prometheus/targets-rialto-millau.yml depends_on: *all-nodes diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-millau-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-millau-to-rialto-entrypoint.sh deleted file mode 100755 index e7b073d967..0000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-millau-to-rialto-entrypoint.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 3 -curl -v http://millau-node-alice:9933/health -curl -v http://rialto-node-alice:9933/health - -/home/user/substrate-relay init-bridge millau-to-rialto \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --rialto-signer //Alice - -# Give chain a little bit of time to process initialization transaction -sleep 6 -/home/user/substrate-relay relay-headers millau-to-rialto \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --rialto-signer //Charlie \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-rialto-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-rialto-to-millau-entrypoint.sh deleted file mode 100755 index f3fa7597b2..0000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-rialto-to-millau-entrypoint.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 3 -curl -v http://millau-node-alice:9933/health -curl -v http://rialto-node-alice:9933/health - -/home/user/substrate-relay init-bridge rialto-to-millau \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --millau-signer //Alice - -# Give chain a little bit of time to process initialization transaction -sleep 6 -/home/user/substrate-relay relay-headers rialto-to-millau \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --millau-signer //Charlie \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh index 5b92a9b013..48e5a28179 100755 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh @@ -7,12 +7,12 @@ curl -v http://rialto-node-bob:9933/health MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -/home/user/substrate-relay relay-messages millau-to-rialto \ +/home/user/substrate-relay relay-messages MillauToRialto \ --lane $MESSAGE_LANE \ - --millau-host millau-node-bob \ - --millau-port 9944 \ - --millau-signer //Eve \ - --rialto-host rialto-node-bob \ - --rialto-port 9944 \ - --rialto-signer //Eve \ + --source-host millau-node-bob \ + --source-port 9944 \ + --source-signer //Eve \ + --target-host rialto-node-bob \ + --target-port 9944 \ + --target-signer //Eve \ --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh index 6d23b8d236..378aeedd9f 100755 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh @@ -7,12 +7,12 @@ curl -v http://rialto-node-bob:9933/health MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -/home/user/substrate-relay relay-messages rialto-to-millau \ +/home/user/substrate-relay relay-messages RialtoToMillau \ --lane $MESSAGE_LANE \ - --rialto-host rialto-node-bob \ - --rialto-port 9944 \ - --rialto-signer //Ferdie \ - --millau-host millau-node-bob \ - --millau-port 9944 \ - --millau-signer //Ferdie \ + --source-host rialto-node-bob \ + --source-port 9944 \ + --source-signer //Ferdie \ + --target-host millau-node-bob \ + --target-port 9944 \ + --target-signer //Ferdie \ --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh index 606a3f4e51..96676bad85 100755 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh @@ -10,11 +10,13 @@ set -eu # Max delay before submitting transactions (s) MAX_SUBMIT_DELAY_S=${MSG_EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-30} MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -FERDIE_ADDR=5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL +SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE} +MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=1024 +FERDIE_ADDR=5oSLwptwgySxh5vz1HdvznQJjbQVgwYSvHEpYYeTXu1Ei8j7 -SHARED_CMD="/home/user/substrate-relay send-message rialto-to-millau" -SHARED_HOST="--rialto-host rialto-node-bob --rialto-port 9944" -DAVE_SIGNER="--rialto-signer //Dave --millau-signer //Dave" +SHARED_CMD="/home/user/substrate-relay send-message RialtoToMillau" +SHARED_HOST="--source-host rialto-node-bob --source-port 9944" +DAVE_SIGNER="--source-signer //Dave --target-signer //Dave" SEND_MESSAGE="$SHARED_CMD $SHARED_HOST $DAVE_SIGNER" @@ -25,6 +27,11 @@ rand_sleep() { sleep $SUBMIT_DELAY_S } +# start sending large messages immediately +LARGE_MESSAGES_TIME=0 +# start sending message packs in a hour +BUNCH_OF_MESSAGES_TIME=3600 + while true do rand_sleep @@ -34,6 +41,14 @@ do --origin Target \ remark + if [ ! -z $SECONDARY_MESSAGE_LANE ]; then + echo "Sending Remark from Rialto to Millau using Target Origin using secondary lane: $SECONDARY_MESSAGE_LANE" + $SEND_MESSAGE \ + --lane $SECONDARY_MESSAGE_LANE \ + --origin Target \ + remark + fi + rand_sleep echo "Sending Transfer from Rialto to Millau using Target Origin" $SEND_MESSAGE \ @@ -58,4 +73,49 @@ do transfer \ --amount 1000000000 \ --recipient $FERDIE_ADDR + + # every other hour we're sending 3 large (size, weight, size+weight) messages + if [ $SECONDS -ge $LARGE_MESSAGES_TIME ]; then + LARGE_MESSAGES_TIME=$((SECONDS + 7200)) + + rand_sleep + echo "Sending Maximal Size Remark from Rialto to Millau using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark \ + --remark-size=max + + rand_sleep + echo "Sending Maximal Dispatch Weight Remark from Rialto to Millau using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + --dispatch-weight=max \ + remark + + rand_sleep + echo "Sending Maximal Size and Dispatch Weight Remark from Rialto to Millau using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + --dispatch-weight=max \ + remark \ + --remark-size=max + + fi + + # every other hour we're sending a bunch of small messages + if [ $SECONDS -ge $BUNCH_OF_MESSAGES_TIME ]; then + BUNCH_OF_MESSAGES_TIME=$((SECONDS + 7200)) + + for i in $(seq 1 $MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE); + do + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark + done + + fi done diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh index 66d8e2a076..c24ec8ea7f 100755 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh @@ -10,11 +10,13 @@ set -eu # Max delay before submitting transactions (s) MAX_SUBMIT_DELAY_S=${MSG_EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-30} MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -FERDIE_ADDR=5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL +SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE} +MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=128 +FERDIE_ADDR=6ztG3jPnJTwgZnnYsgCDXbbQVR82M96hBZtPvkN56A9668ZC -SHARED_CMD=" /home/user/substrate-relay send-message millau-to-rialto" -SHARED_HOST="--millau-host millau-node-bob --millau-port 9944" -DAVE_SIGNER="--rialto-signer //Dave --millau-signer //Dave" +SHARED_CMD=" /home/user/substrate-relay send-message MillauToRialto" +SHARED_HOST="--source-host millau-node-bob --source-port 9944" +DAVE_SIGNER="--target-signer //Dave --source-signer //Dave" SEND_MESSAGE="$SHARED_CMD $SHARED_HOST $DAVE_SIGNER" @@ -25,6 +27,11 @@ rand_sleep() { sleep $SUBMIT_DELAY_S } +# start sending large messages immediately +LARGE_MESSAGES_TIME=0 +# start sending message packs in a hour +BUNCH_OF_MESSAGES_TIME=3600 + while true do rand_sleep @@ -34,6 +41,14 @@ do --origin Target \ remark + if [ ! -z $SECONDARY_MESSAGE_LANE ]; then + echo "Sending Remark from Millau to Rialto using Target Origin using secondary lane: $SECONDARY_MESSAGE_LANE" + $SEND_MESSAGE \ + --lane $SECONDARY_MESSAGE_LANE \ + --origin Target \ + remark + fi + rand_sleep echo "Sending Transfer from Millau to Rialto using Target Origin" $SEND_MESSAGE \ @@ -58,4 +73,49 @@ do transfer \ --amount 1000000000 \ --recipient $FERDIE_ADDR + + # every other hour we're sending 3 large (size, weight, size+weight) messages + if [ $SECONDS -ge $LARGE_MESSAGES_TIME ]; then + LARGE_MESSAGES_TIME=$((SECONDS + 7200)) + + rand_sleep + echo "Sending Maximal Size Remark from Millau to Rialto using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark \ + --remark-size=max + + rand_sleep + echo "Sending Maximal Dispatch Weight Remark from Millau to Rialto using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + --dispatch-weight=max \ + remark + + rand_sleep + echo "Sending Maximal Size and Dispatch Weight Remark from Millau to Rialto using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + --dispatch-weight=max \ + remark \ + --remark-size=max + + fi + + # every other hour we're sending a bunch of small messages + if [ $SECONDS -ge $BUNCH_OF_MESSAGES_TIME ]; then + BUNCH_OF_MESSAGES_TIME=$((SECONDS + 7200)) + + for i in $(seq 1 $MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE); + do + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark + done + + fi done diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh new file mode 100755 index 0000000000..d8d3290428 --- /dev/null +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -xeu + +sleep 3 +curl -v http://millau-node-alice:9933/health +curl -v http://rialto-node-alice:9933/health + +/home/user/substrate-relay init-bridge MillauToRialto \ + --source-host millau-node-alice \ + --source-port 9944 \ + --target-host rialto-node-alice \ + --target-port 9944 \ + --target-signer //Alice + +/home/user/substrate-relay init-bridge RialtoToMillau \ + --source-host rialto-node-alice \ + --source-port 9944 \ + --target-host millau-node-alice \ + --target-port 9944 \ + --target-signer //Alice + +# Give chain a little bit of time to process initialization transaction +sleep 6 + +/home/user/substrate-relay relay-headers-and-messages millau-rialto \ + --millau-host millau-node-alice \ + --millau-port 9944 \ + --millau-signer //Charlie \ + --rialto-host rialto-node-alice \ + --rialto-port 9944 \ + --rialto-signer //Charlie \ + --lane=00000000 \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-headers-dashboard.json b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json similarity index 93% rename from polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-headers-dashboard.json rename to polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json index 1f9176ddba..e73ddea40f 100644 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-headers-dashboard.json +++ b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json @@ -50,7 +50,7 @@ "frequency": "5m", "handler": 1, "message": "", - "name": "Synced Header Difference is Over 5 (Rialto to Millau)", + "name": "Synced Header Difference is Over 5 (Westend to Millau)", "noDataState": "no_data", "notifications": [] }, @@ -99,7 +99,7 @@ "steppedLine": false, "targets": [ { - "expr": "max(Rialto_to_Millau_Sync_best_block_numbers{node=\"source\"}) - max(Rialto_to_Millau_Sync_best_block_numbers{node=\"target\"})", + "expr": "max(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}) - max(Westend_to_Millau_Sync_best_block_numbers{node=\"target\"})", "format": "table", "instant": false, "interval": "", @@ -188,7 +188,7 @@ "for": "3m", "frequency": "5m", "handler": 1, - "name": "No New Headers (Rialto to Millau)", + "name": "No New Headers (Westend to Millau)", "noDataState": "no_data", "notifications": [] }, @@ -237,9 +237,9 @@ "steppedLine": false, "targets": [ { - "expr": "max_over_time(Rialto_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Rialto_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])", + "expr": "max_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])", "interval": "", - "legendFormat": "Number of Rialto Headers Synced on Millau", + "legendFormat": "Number of new Headers on Westend (Last 2 Mins)", "refId": "A" } ], @@ -341,7 +341,7 @@ "pluginVersion": "7.1.3", "targets": [ { - "expr": "Rialto_to_Millau_Sync_best_block_numbers", + "expr": "Westend_to_Millau_Sync_best_block_numbers", "format": "time_series", "instant": true, "interval": "", @@ -401,7 +401,7 @@ "steppedLine": false, "targets": [ { - "expr": "Rialto_to_Millau_Sync_system_average_load", + "expr": "Westend_to_Millau_Sync_system_average_load", "interval": "", "legendFormat": "Average system load in last {{over}}", "refId": "A" @@ -500,7 +500,7 @@ "pluginVersion": "7.1.3", "targets": [ { - "expr": "avg_over_time(Rialto_to_Millau_Sync_process_cpu_usage_percentage[1m])", + "expr": "avg_over_time(Westend_to_Millau_Sync_process_cpu_usage_percentage[1m])", "instant": true, "interval": "", "legendFormat": "1 CPU = 100", @@ -557,7 +557,7 @@ "pluginVersion": "7.1.3", "targets": [ { - "expr": "Rialto_to_Millau_Sync_blocks_in_state", + "expr": "Westend_to_Millau_Sync_blocks_in_state", "instant": true, "interval": "", "legendFormat": "{{state}}", @@ -615,7 +615,7 @@ "steppedLine": false, "targets": [ { - "expr": "Rialto_to_Millau_Sync_process_memory_usage_bytes / 1024 / 1024", + "expr": "Westend_to_Millau_Sync_process_memory_usage_bytes / 1024 / 1024", "interval": "", "legendFormat": "Process memory, MB", "refId": "A" @@ -688,7 +688,7 @@ ] }, "timezone": "", - "title": "Rialto to Millau Header Sync Dashboard", - "uid": "relay-rialto-to-millau-headers", + "title": "Westend to Millau Header Sync Dashboard", + "uid": "relay-westend-to-millau-headers", "version": 1 } diff --git a/polkadot/bridges/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml new file mode 100644 index 0000000000..5d49e11274 --- /dev/null +++ b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml @@ -0,0 +1,2 @@ +- targets: + - relay-headers-westend-to-millau:9616 diff --git a/polkadot/bridges/deployments/bridges/westend-millau/docker-compose.yml b/polkadot/bridges/deployments/bridges/westend-millau/docker-compose.yml new file mode 100644 index 0000000000..8caa17ffb8 --- /dev/null +++ b/polkadot/bridges/deployments/bridges/westend-millau/docker-compose.yml @@ -0,0 +1,31 @@ +# Exposed ports: 10616 + +version: '3.5' +services: + relay-headers-westend-to-millau: + image: paritytech/substrate-relay + entrypoint: /entrypoints/relay-headers-westend-to-millau-entrypoint.sh + volumes: + - ./bridges/westend-millau/entrypoints:/entrypoints + environment: + RUST_LOG: rpc=trace,bridge=trace + ports: + - "10616:9616" + depends_on: + - millau-node-alice + + # Note: These are being overridden from the top level `monitoring` compose file. + grafana-dashboard: + environment: + VIRTUAL_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link + VIRTUAL_PORT: 3000 + LETSENCRYPT_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link + LETSENCRYPT_EMAIL: admin@parity.io + volumes: + - ./bridges/westend-millau/dashboard/grafana:/etc/grafana/dashboards/westend-millau:ro + + prometheus-metrics: + volumes: + - ./bridges/westend-millau/dashboard/prometheus/targets.yml:/etc/prometheus/targets-westend-millau.yml + depends_on: + - relay-headers-westend-to-millau diff --git a/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh new file mode 100755 index 0000000000..740a9a9739 --- /dev/null +++ b/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -xeu + +sleep 3 +curl -v http://millau-node-alice:9933/health +curl -v https://westend-rpc.polkadot.io:443/health + +/home/user/substrate-relay init-bridge WestendToMillau \ + --source-host westend-rpc.polkadot.io \ + --source-port 443 \ + --source-secure \ + --target-host millau-node-alice \ + --target-port 9944 \ + --target-signer //George + +# Give chain a little bit of time to process initialization transaction +sleep 6 +/home/user/substrate-relay relay-headers WestendToMillau \ + --source-host westend-rpc.polkadot.io \ + --source-port 443 \ + --source-secure \ + --target-host millau-node-alice \ + --target-port 9944 \ + --target-signer //George \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-westend.sh b/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-westend.sh new file mode 100755 index 0000000000..d54d16f7e3 --- /dev/null +++ b/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-westend.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Run an instance of the Rococo -> Westend header sync. +# +# Right now this relies on local Westend and Rococo networks +# running (which include `pallet-bridge-grandpa` in their +# runtimes), but in the future it could use use public RPC nodes. + +set -xeu + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge RococoToWestend \ + --source-host 127.0.0.1 \ + --source-port 9955 \ + --target-host 127.0.0.1 \ + --target-port 9944 \ + --target-signer //Eve + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers RococoToWestend \ + --source-host 127.0.0.1 \ + --source-port 9955 \ + --target-host 127.0.0.1 \ + --target-port 9944 \ + --target-signer //Bob \ + --prometheus-host=0.0.0.0 \ diff --git a/polkadot/bridges/deployments/local-scripts/relay-headers-westend-to-rococo.sh b/polkadot/bridges/deployments/local-scripts/relay-headers-westend-to-rococo.sh new file mode 100755 index 0000000000..e718656a9d --- /dev/null +++ b/polkadot/bridges/deployments/local-scripts/relay-headers-westend-to-rococo.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Run an instance of the Westend -> Rococo header sync. +# +# Right now this relies on local Westend and Rococo networks +# running (which include `pallet-bridge-grandpa` in their +# runtimes), but in the future it could use use public RPC nodes. + +set -xeu + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge WestendToRococo \ + --source-host 127.0.0.1 \ + --source-port 9944 \ + --target-host 127.0.0.1 \ + --target-port 9955 \ + --target-signer //Dave + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers WestendToRococo \ + --source-host 127.0.0.1 \ + --source-port 9944 \ + --target-host 127.0.0.1 \ + --target-port 9955 \ + --target-signer //Charlie \ + --prometheus-host=0.0.0.0 \ diff --git a/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh b/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh new file mode 100755 index 0000000000..5b298a149f --- /dev/null +++ b/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# A script for relaying Millau messages to the Rialto chain. +# +# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` +# and `run-millau-node.sh). +set -xeu + +MILLAU_PORT="${MILLAU_PORT:-9945}" +RIALTO_PORT="${RIALTO_PORT:-9944}" + +RUST_LOG=bridge=debug \ +./target/debug/substrate-relay relay-messages MillauToRialto \ + --lane 00000000 \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --source-signer //Bob \ + --target-host localhost \ + --target-port $RIALTO_PORT \ + --target-signer //Bob \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh b/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh new file mode 100755 index 0000000000..616697192b --- /dev/null +++ b/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# A script for relaying Rialto messages to the Millau chain. +# +# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` +# and `run-millau-node.sh). +set -xeu + +MILLAU_PORT="${MILLAU_PORT:-9945}" +RIALTO_PORT="${RIALTO_PORT:-9944}" + +RUST_LOG=bridge=debug \ +./target/debug/substrate-relay relay-messages RialtoToMillau \ + --lane 00000000 \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --source-signer //Bob \ + --target-host localhost \ + --target-port $MILLAU_PORT \ + --target-signer //Bob \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh b/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh index 205d7e62f8..59c75de389 100755 --- a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh +++ b/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh @@ -5,20 +5,23 @@ # Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` # and `run-millau-node.sh). +MILLAU_PORT="${MILLAU_PORT:-9945}" +RIALTO_PORT="${RIALTO_PORT:-9944}" + RUST_LOG=bridge=debug \ -./target/debug/substrate-relay init-bridge millau-to-rialto \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --rialto-signer //Alice \ +./target/debug/substrate-relay init-bridge MillauToRialto \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --target-host localhost \ + --target-port $RIALTO_PORT \ + --target-signer //Alice \ sleep 5 RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-headers millau-to-rialto \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --rialto-signer //Alice \ +./target/debug/substrate-relay relay-headers MillauToRialto \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --target-host localhost \ + --target-port $RIALTO_PORT \ + --target-signer //Alice \ --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh b/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh index 710b317fa0..6382cdca82 100755 --- a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh +++ b/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh @@ -5,20 +5,23 @@ # Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` # and `run-millau-node.sh). +MILLAU_PORT="${MILLAU_PORT:-9945}" +RIALTO_PORT="${RIALTO_PORT:-9944}" + RUST_LOG=bridge=debug \ -./target/debug/substrate-relay init-bridge rialto-to-millau \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --millau-signer //Alice \ +./target/debug/substrate-relay init-bridge RialtoToMillau \ + --target-host localhost \ + --target-port $MILLAU_PORT \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --target-signer //Alice \ sleep 5 RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-headers rialto-to-millau \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --millau-signer //Alice \ +./target/debug/substrate-relay relay-headers RialtoToMillau \ + --target-host localhost \ + --target-port $MILLAU_PORT \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --target-signer //Alice \ --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/run-millau-node.sh b/polkadot/bridges/deployments/local-scripts/run-millau-node.sh index 6665c09af5..916f876c53 100755 --- a/polkadot/bridges/deployments/local-scripts/run-millau-node.sh +++ b/polkadot/bridges/deployments/local-scripts/run-millau-node.sh @@ -1,8 +1,11 @@ #!/bin/bash # Run a development instance of the Millau Substrate bridge node. +# To override the default port just export MILLAU_PORT=9945 + +MILLAU_PORT="${MILLAU_PORT:-9945}" RUST_LOG=runtime=trace \ ./target/debug/millau-bridge-node --dev --tmp \ --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33044 --rpc-port 9934 --ws-port 9945 \ + --port 33044 --rpc-port 9934 --ws-port $MILLAU_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh b/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh index 770284b9f4..e7987e2af3 100755 --- a/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh +++ b/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh @@ -1,8 +1,11 @@ #!/bin/bash # Run a development instance of the Rialto Substrate bridge node. +# To override the default port just export RIALTO_PORT=9944 + +RIALTO_PORT="${RIALTO_PORT:-9944}" RUST_LOG=runtime=trace \ ./target/debug/rialto-bridge-node --dev --tmp \ --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33033 --rpc-port 9933 --ws-port 9944 \ + --port 33033 --rpc-port 9933 --ws-port $RIALTO_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/run-rococo-bob-node.sh b/polkadot/bridges/deployments/local-scripts/run-rococo-bob-node.sh new file mode 100755 index 0000000000..550d8cf755 --- /dev/null +++ b/polkadot/bridges/deployments/local-scripts/run-rococo-bob-node.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Run a development instance of the Rococo Substrate bridge node. +# To override the default port just export ROCOCO_PORT=9966 +# +# Note: This script will not work out of the box with the bridges +# repo since it relies on a Polkadot binary. + +ROCOCO_BOB_PORT="${ROCOCO_BOB_PORT:-9966}" + +RUST_LOG=runtime=trace,runtime::bridge=trace \ +./target/debug/polkadot --chain=rococo-local --bob --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ + --port 33055 --rpc-port 9935 --ws-port $ROCOCO_BOB_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/run-rococo-node.sh b/polkadot/bridges/deployments/local-scripts/run-rococo-node.sh new file mode 100755 index 0000000000..073d39a3ea --- /dev/null +++ b/polkadot/bridges/deployments/local-scripts/run-rococo-node.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Run a development instance of the Rococo Substrate bridge node. +# To override the default port just export ROCOCO_PORT=9955 +# +# Note: This script will not work out of the box with the bridges +# repo since it relies on a Polkadot binary. + +ROCOCO_PORT="${ROCOCO_PORT:-9955}" + +RUST_LOG=runtime=trace,runtime::bridge=trace \ +./target/debug/polkadot --chain=rococo-local --alice --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ + --port 33044 --rpc-port 9934 --ws-port $ROCOCO_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/run-westend-node.sh b/polkadot/bridges/deployments/local-scripts/run-westend-node.sh new file mode 100755 index 0000000000..1bb490fc1a --- /dev/null +++ b/polkadot/bridges/deployments/local-scripts/run-westend-node.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Run a development instance of the Westend Substrate bridge node. +# To override the default port just export WESTEND_PORT=9945 +# +# Note: This script will not work out of the box with the bridges +# repo since it relies on a Polkadot binary. + +WESTEND_PORT="${WESTEND_PORT:-9944}" + +RUST_LOG=runtime=trace,runtime::bridge=trace \ +./target/debug/polkadot --chain=westend-dev --alice --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ + --port 33033 --rpc-port 9933 --ws-port $WESTEND_PORT \ diff --git a/polkadot/bridges/deployments/monitoring/docker-compose.yml b/polkadot/bridges/deployments/monitoring/docker-compose.yml index f4356306c8..5456cb76dc 100644 --- a/polkadot/bridges/deployments/monitoring/docker-compose.yml +++ b/polkadot/bridges/deployments/monitoring/docker-compose.yml @@ -2,6 +2,8 @@ version: '3.5' services: prometheus-metrics: image: prom/prometheus:v2.20.1 + volumes: + - ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml ports: - "9090:9090" @@ -11,6 +13,8 @@ services: GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASS:-admin} GF_SERVER_ROOT_URL: ${GRAFANA_SERVER_ROOT_URL} GF_SERVER_DOMAIN: ${GRAFANA_SERVER_DOMAIN} + volumes: + - ./monitoring/grafana/provisioning/:/etc/grafana/provisioning/:ro ports: - "3000:3000" depends_on: diff --git a/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml b/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml index ae55b9b6dc..645ee708fe 100644 --- a/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml +++ b/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml @@ -11,7 +11,7 @@ matrix: # Create a user - log that user in using a post request # curl -XPOST -d '{"type": "m.login.password", # "user":"grafana", - # "password":"2m4ny53cr3t5"}' + # "password":"dummy-password"}' # "https://my-matrix-server/_matrix/client/r0/login" # Fill that access token in here access_token: "" diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml b/polkadot/bridges/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml similarity index 57% rename from polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml rename to polkadot/bridges/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml index d671bfb224..d14ed2637d 100644 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml +++ b/polkadot/bridges/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml @@ -3,4 +3,4 @@ folder: '' type: file options: - path: '/etc/grafana/provisioning/dashboards' \ No newline at end of file + path: '/etc/grafana/dashboards' \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml b/polkadot/bridges/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml similarity index 100% rename from polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml rename to polkadot/bridges/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml b/polkadot/bridges/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml similarity index 100% rename from polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml rename to polkadot/bridges/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml diff --git a/polkadot/bridges/deployments/monitoring/prometheus/prometheus.yml b/polkadot/bridges/deployments/monitoring/prometheus/prometheus.yml new file mode 100644 index 0000000000..7092bd2731 --- /dev/null +++ b/polkadot/bridges/deployments/monitoring/prometheus/prometheus.yml @@ -0,0 +1,7 @@ +global: + scrape_interval: 15s +scrape_configs: + - job_name: dummy + file_sd_configs: + - files: + - /etc/prometheus/targets-*.yml diff --git a/polkadot/bridges/deployments/networks/millau.yml b/polkadot/bridges/deployments/networks/millau.yml index 43238df09b..54790579f1 100644 --- a/polkadot/bridges/deployments/networks/millau.yml +++ b/polkadot/bridges/deployments/networks/millau.yml @@ -20,7 +20,7 @@ services: - --unsafe-rpc-external - --unsafe-ws-external environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,pallet_substrate_bridge=trace,pallet_bridge_call_dispatch=trace,pallet_message_lane=trace + RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace ports: - "19933:9933" - "19944:9944" @@ -37,8 +37,6 @@ services: - --rpc-cors=all - --unsafe-rpc-external - --unsafe-ws-external - environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,pallet_substrate_bridge=trace,pallet_bridge_call_dispatch=trace,pallet_message_lane=trace,jsonrpc_ws_server=trace,parity_ws=trace ports: - "20033:9933" - "20044:9944" diff --git a/polkadot/bridges/deployments/networks/rialto.yml b/polkadot/bridges/deployments/networks/rialto.yml index 7d8ba1abd5..3039d7c33b 100644 --- a/polkadot/bridges/deployments/networks/rialto.yml +++ b/polkadot/bridges/deployments/networks/rialto.yml @@ -20,7 +20,7 @@ services: - --unsafe-rpc-external - --unsafe-ws-external environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,pallet_substrate_bridge=trace,pallet_bridge_call_dispatch=trace,pallet_message_lane=trace + RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace ports: - "9933:9933" - "9944:9944" @@ -37,8 +37,6 @@ services: - --rpc-cors=all - --unsafe-rpc-external - --unsafe-ws-external - environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,pallet_substrate_bridge=trace,pallet_bridge_call_dispatch=trace,pallet_message_lane=trace,jsonrpc_ws_server=trace,parity_ws=trace ports: - "10033:9933" - "10044:9944" diff --git a/polkadot/bridges/deployments/run.sh b/polkadot/bridges/deployments/run.sh index fe4afd0fbb..a79638352a 100755 --- a/polkadot/bridges/deployments/run.sh +++ b/polkadot/bridges/deployments/run.sh @@ -2,9 +2,9 @@ # Script used for running and updating bridge deployments. # -# To deploy a network you can run this script with the name of the network you want to run. +# To deploy a network you can run this script with the name of the bridge (or multiple bridges) you want to run. # -# `./run.sh poa-rialto` +# `./run.sh poa-rialto rialto-millau` # # To update a deployment to use the latest images available from the Docker Hub add the `update` # argument after the bridge name. @@ -14,6 +14,10 @@ # Once you've stopped having fun with your deployment you can take it down with: # # `./run.sh rialto-millau stop` +# +# Stopping the bridge will also bring down all networks that it uses. So if you have started multiple bridges +# that are using the same network (like Millau in rialto-millau and westend-millau bridges), then stopping one +# of these bridges will cause the other bridge to break. set -xeu @@ -28,9 +32,13 @@ function show_help () { echo "Usage:" echo " ./run.sh poa-rialto [stop|update] Run PoA <> Rialto Networks & Bridge" echo " ./run.sh rialto-millau [stop|update] Run Rialto <> Millau Networks & Bridge" + echo " ./run.sh westend-millau [stop|update] Run Westend -> Millau Networks & Bridge" echo " " echo "Options:" echo " --no-monitoring Disable monitoring" + echo " " + echo "You can start multiple bridges at once by passing several bridge names:" + echo " ./run.sh poa-rialto rialto-millau westend-millau [stop|update]" exit 1 } @@ -39,7 +47,7 @@ MILLAU=' -f ./networks/millau.yml' ETH_POA=' -f ./networks/eth-poa.yml' MONITORING=' -f ./monitoring/docker-compose.yml' -BRIDGE='' +BRIDGES=() NETWORKS='' SUB_COMMAND='start' for i in "$@" @@ -48,17 +56,28 @@ do --no-monitoring) MONITORING=" -f ./monitoring/disabled.yml" shift + continue ;; poa-rialto) - BRIDGE=$i + BRIDGES+=($i) NETWORKS+=${RIALTO} + RIALTO='' NETWORKS+=${ETH_POA} + ETH_POA='' shift ;; rialto-millau) - BRIDGE=$i + BRIDGES+=($i) NETWORKS+=${RIALTO} + RIALTO='' NETWORKS+=${MILLAU} + MILLAU='' + shift + ;; + westend-millau) + BRIDGES+=($i) + NETWORKS+=${MILLAU} + MILLAU='' shift ;; start|stop|update) @@ -71,24 +90,38 @@ do esac done -if [ -z "$BRIDGE" ]; then +if [ ${#BRIDGES[@]} -eq 0 ]; then show_help "Missing bridge name." fi -BRIDGE_PATH="./bridges/$BRIDGE" -BRIDGE="-f $BRIDGE_PATH/docker-compose.yml" -COMPOSE_FILES=$BRIDGE$NETWORKS$MONITORING +COMPOSE_FILES=$NETWORKS$MONITORING # Compose looks for .env files in the the current directory by default, we don't want that -COMPOSE_ARGS="--project-directory . --env-file " -COMPOSE_ARGS+=$BRIDGE_PATH/.env +COMPOSE_ARGS="--project-directory ." +# Path to env file that we want to use. Compose only accepts single `--env-file` argument, +# so we'll be using the last .env file we'll found. +COMPOSE_ENV_FILE='' -# Read and source variables from .env file so we can use them here -grep -e MATRIX_ACCESS_TOKEN -e WITH_PROXY $BRIDGE_PATH/.env > .env2 && . ./.env2 && rm .env2 +for BRIDGE in "${BRIDGES[@]}" +do + BRIDGE_PATH="./bridges/$BRIDGE" + BRIDGE=" -f $BRIDGE_PATH/docker-compose.yml" + COMPOSE_FILES=$BRIDGE$COMPOSE_FILES -if [ ! -z ${MATRIX_ACCESS_TOKEN+x} ]; then - sed -i "s/access_token.*/access_token: \"$MATRIX_ACCESS_TOKEN\"/" ./monitoring/grafana-matrix/config.yml -fi + # Remember .env file to use in docker-compose call + if [[ -f "$BRIDGE_PATH/.env" ]]; then + COMPOSE_ENV_FILE=" --env-file $BRIDGE_PATH/.env" + fi + + # Read and source variables from .env file so we can use them here + grep -e MATRIX_ACCESS_TOKEN -e WITH_PROXY $BRIDGE_PATH/.env > .env2 && . ./.env2 && rm .env2 + if [ ! -z ${MATRIX_ACCESS_TOKEN+x} ]; then + sed -i "s/access_token.*/access_token: \"$MATRIX_ACCESS_TOKEN\"/" ./monitoring/grafana-matrix/config.yml + fi +done + +# Final COMPOSE_ARGS +COMPOSE_ARGS="$COMPOSE_ARGS $COMPOSE_ENV_FILE" # Check the sub-command, perhaps we just mean to stop the network instead of starting it. if [ "$SUB_COMMAND" == "stop" ]; then diff --git a/polkadot/bridges/deployments/types-millau.json b/polkadot/bridges/deployments/types-millau.json new file mode 100644 index 0000000000..2414620733 --- /dev/null +++ b/polkadot/bridges/deployments/types-millau.json @@ -0,0 +1,172 @@ +{ + "--1": "Millau Types", + "MillauBalance": "u64", + "MillauBlockHash": "H512", + "MillauBlockNumber": "u64", + "MillauHeader": { + "parent_Hash": "MillauBlockHash", + "number": "Compact", + "state_root": "MillauBlockHash", + "extrinsics_root": "MillauBlockHash", + "digest": "MillauDigest" + }, + "MillauDigest": { + "logs": "Vec" + }, + "MillauDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "MillauBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--2": "Rialto Types", + "RialtoBalance": "u128", + "RialtoBlockHash": "H256", + "RialtoBlockNumber": "u32", + "RialtoHeader": { + "parent_Hash": "RialtoBlockHash", + "number": "Compact", + "state_root": "RialtoBlockHash", + "extrinsics_root": "RialtoBlockHash", + "digest": "RialtoDigest" + }, + "RialtoDigest": { + "logs": "Vec" + }, + "RialtoDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "RialtoBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--3": "Common types", + "Address": "AccountId", + "LookupSource": "AccountId", + "AccountSigner": "MultiSigner", + "SpecVersion": "u32", + "RelayerId": "AccountId", + "SourceAccountId": "AccountId", + "ImportedHeader": { + "header": "BridgedHeader", + "requires_justification": "bool", + "is_finalized": "bool", + "signal_hash": "Option" + }, + "AuthoritySet": { + "authorities": "AuthorityList", + "set_id": "SetId" + }, + "Id": "[u8; 4]", + "InstanceId": "Id", + "LaneId": "Id", + "MessageNonce": "u64", + "MessageId": "(Id, u64)", + "MessageKey": { + "lane_id": "LaneId", + "nonce:": "MessageNonce" + }, + "InboundRelayer": "AccountId", + "InboundLaneData": { + "relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>", + "last_confirmed_nonce": "MessageNonce" + }, + "OutboundLaneData": { + "latest_generated_nonce": "MessageNonce", + "latest_received_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce" + }, + "MessageData": { + "payload": "MessagePayload", + "fee": "Fee" + }, + "MessagePayload": "Vec", + "BridgedOpaqueCall": "Vec", + "OutboundMessageFee": "Fee", + "OutboundPayload": { + "spec_version": "SpecVersion", + "weight": "Weight", + "origin": "CallOrigin", + "call": "BridgedOpaqueCall" + }, + "CallOrigin": { + "_enum": { + "SourceRoot": "()", + "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", + "SourceAccount": "SourceAccountId" + } + }, + "MultiSigner": { + "_enum": { + "Ed25519": "H256", + "Sr25519": "H256", + "Ecdsa": "[u8;33]" + } + }, + "MessagesProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId", + "nonces_start": "MessageNonce", + "nonces_end": "MessageNonce" + }, + "StorageProofItem": "Vec", + "MessagesDeliveryProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId" + }, + "UnrewardedRelayersState": { + "unrewarded_relayer_entries": "MessageNonce", + "messages_in_oldest_entry": "MessageNonce", + "total_messages": "MessageNonce" + }, + "AncestryProof": "()", + "MessageFeeData": { + "lane_id": "LaneId", + "payload": "OutboundPayload" + }, + "Precommit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber" + }, + "AuthoritySignature": "[u8;64]", + "AuthorityId": "[u8;32]", + "SignedPrecommit": { + "precommit": "Precommit", + "signature": "AuthoritySignature", + "id": "AuthorityId" + }, + "Commit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber", + "precommits": "Vec" + }, + "GrandpaJustification": { + "round": "u64", + "commit": "Commit", + "votes_ancestries": "Vec" + }, + "Fee": "MillauBalance", + "Balance": "MillauBalance", + "Hash": "MillauBlockHash", + "BlockHash": "MillauBlockHash", + "BlockNumber": "MillauBlockNumber", + "BridgedBlockHash": "RialtoBlockHash", + "BridgedBlockNumber": "RialtoBlockNumber", + "BridgedHeader": "RialtoHeader", + "Parameter": { + "_enum": { + "MillauToRialtoConversionRate": "u128" + } + } +} diff --git a/polkadot/bridges/deployments/types-rialto.json b/polkadot/bridges/deployments/types-rialto.json new file mode 100644 index 0000000000..bd746e003e --- /dev/null +++ b/polkadot/bridges/deployments/types-rialto.json @@ -0,0 +1,171 @@ +{ + "--1": "Millau Types", + "MillauBalance": "u64", + "MillauBlockHash": "H512", + "MillauBlockNumber": "u64", + "MillauHeader": { + "parent_Hash": "MillauBlockHash", + "number": "Compact", + "state_root": "MillauBlockHash", + "extrinsics_root": "MillauBlockHash", + "digest": "MillauDigest" + }, + "MillauDigest": { + "logs": "Vec" + }, + "MillauDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "MillauBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--2": "Rialto Types", + "RialtoBalance": "u128", + "RialtoBlockHash": "H256", + "RialtoBlockNumber": "u32", + "RialtoHeader": { + "parent_Hash": "RialtoBlockHash", + "number": "Compact", + "state_root": "RialtoBlockHash", + "extrinsics_root": "RialtoBlockHash", + "digest": "RialtoDigest" + }, + "RialtoDigest": { + "logs": "Vec" + }, + "RialtoDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "RialtoBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--3": "Common types", + "Address": "AccountId", + "LookupSource": "AccountId", + "AccountSigner": "MultiSigner", + "SpecVersion": "u32", + "RelayerId": "AccountId", + "SourceAccountId": "AccountId", + "ImportedHeader": { + "header": "BridgedHeader", + "requires_justification": "bool", + "is_finalized": "bool", + "signal_hash": "Option" + }, + "AuthoritySet": { + "authorities": "AuthorityList", + "set_id": "SetId" + }, + "Id": "[u8; 4]", + "InstanceId": "Id", + "LaneId": "Id", + "MessageNonce": "u64", + "MessageId": "(Id, u64)", + "MessageKey": { + "lane_id": "LaneId", + "nonce:": "MessageNonce" + }, + "InboundRelayer": "AccountId", + "InboundLaneData": { + "relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>", + "last_confirmed_nonce": "MessageNonce" + }, + "OutboundLaneData": { + "latest_generated_nonce": "MessageNonce", + "latest_received_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce" + }, + "MessageData": { + "payload": "MessagePayload", + "fee": "Fee" + }, + "MessagePayload": "Vec", + "BridgedOpaqueCall": "Vec", + "OutboundMessageFee": "Fee", + "OutboundPayload": { + "spec_version": "SpecVersion", + "weight": "Weight", + "origin": "CallOrigin", + "call": "BridgedOpaqueCall" + }, + "CallOrigin": { + "_enum": { + "SourceRoot": "()", + "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", + "SourceAccount": "SourceAccountId" + } + }, + "MultiSigner": { + "_enum": { + "Ed25519": "H256", + "Sr25519": "H256", + "Ecdsa": "[u8;33]" + } + }, + "MessagesProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId", + "nonces_start": "MessageNonce", + "nonces_end": "MessageNonce" + }, + "StorageProofItem": "Vec", + "MessagesDeliveryProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId" + }, + "UnrewardedRelayersState": { + "unrewarded_relayer_entries": "MessageNonce", + "messages_in_oldest_entry": "MessageNonce", + "total_messages": "MessageNonce" + }, + "AncestryProof": "()", + "MessageFeeData": { + "lane_id": "LaneId", + "payload": "OutboundPayload" + }, + "Precommit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber" + }, + "AuthoritySignature": "[u8;64]", + "AuthorityId": "[u8;32]", + "SignedPrecommit": { + "precommit": "Precommit", + "signature": "AuthoritySignature", + "id": "AuthorityId" + }, + "Commit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber", + "precommits": "Vec" + }, + "GrandpaJustification": { + "round": "u64", + "commit": "Commit", + "votes_ancestries": "Vec" + }, + "Fee": "RialtoBalance", + "Balance": "RialtoBalance", + "BlockHash": "RialtoBlockHash", + "BlockNumber": "RialtoBlockNumber", + "BridgedBlockHash": "MillauBlockHash", + "BridgedBlockNumber": "MillauBlockNumber", + "BridgedHeader": "MillauHeader", + "Parameter": { + "_enum": { + "RialtoToMillauConversionRate": "u128" + } + } +} diff --git a/polkadot/bridges/deployments/types.json b/polkadot/bridges/deployments/types.json deleted file mode 100644 index b7b0c35d2f..0000000000 --- a/polkadot/bridges/deployments/types.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "HeaderId": { - "number": "u64", - "hash": "Hash" - }, - "PruningRange": { - "oldest_unpruned_block": "u64", - "oldest_block_to_keep": "u64" - }, - "FinalityVotes": { - "votes": "Map", - "ancestry": "Vec" - }, - "FinalityAncestor": { - "id": "HeaderId", - "submitter": "Option
", - "signers": "Vec
" - }, - "StoredHeader": { - "submitter": "Option
", - "header": "AuraHeader", - "total_difficulty": "U256", - "next_validator_set_id": "u64", - "last_signal_block": "Option" - }, - "AuraHeader": { - "parent_hash": "Hash", - "timestamp": "u64", - "number": "u64", - "author": "Address", - "transactions_root": "Hash", - "uncles_hash": "Hash", - "extra_data": "Bytes", - "state_root": "Hash", - "receipts_root": "Hash", - "log_bloom": "Hash", - "gas_used": "u64", - "gas_limit": "u64", - "difficulty": "u64", - "seal": "Vec" - }, - "AuraScheduledChange": { - "validators": "Vec
", - "prev_signal_block": "Option" - }, - "ValidatorsSet": { - "validators": "Vec
", - "signal_block": "Option", - "enact_block": "HeaderId" - }, - "BridgedBlockHash": "H256", - "BridgedBlockHasher": "BlakeTwo256", - "BridgedBlockNumber": "u32", - "BridgedHeader": "Header", - "ImportedHeader": { - "header": "BridgedHeader", - "requires_justification": "bool", - "is_finalized": "bool", - "signal_hash": "Option" - }, - "AuthoritySet": { - "authorities": "AuthorityList", - "set_id": "SetId" - }, - "ScheduledChange": { - "authority_set": "AuthoritySet", - "height": "BridgedBlockNumber" - }, - "Id": "[u8; 4]", - "InstanceId": "Id", - "LaneId": "Id", - "MessageNonce": "u64", - "MessageId": "(Id, u64)", - "MessageKey": { - "lane_id": "LaneId", - "nonce:": "MessageNonce" - } -} diff --git a/polkadot/bridges/deployments/types/build.sh b/polkadot/bridges/deployments/types/build.sh new file mode 100755 index 0000000000..52605e7e4d --- /dev/null +++ b/polkadot/bridges/deployments/types/build.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +# The script generates JSON type definition files in `./deployment` directory to be used for +# JS clients. +# Both networks have a lot of common types, so to avoid duplication we merge `common.json` file with +# chain-specific definitions in `rialto|millau.json`. + +set -exu + +# Make sure we are in the right dir. +cd $(dirname $(realpath $0)) + +# Create rialto and millau types. +jq -s '.[0] * .[1]' common.json rialto.json > ../types-rialto.json +jq -s '.[0] * .[1]' common.json millau.json > ../types-millau.json diff --git a/polkadot/bridges/deployments/types/common.json b/polkadot/bridges/deployments/types/common.json new file mode 100644 index 0000000000..cf88128869 --- /dev/null +++ b/polkadot/bridges/deployments/types/common.json @@ -0,0 +1,159 @@ +{ + "--1": "Millau Types", + "MillauBalance": "u64", + "MillauBlockHash": "H512", + "MillauBlockNumber": "u64", + "MillauHeader": { + "parent_Hash": "MillauBlockHash", + "number": "Compact", + "state_root": "MillauBlockHash", + "extrinsics_root": "MillauBlockHash", + "digest": "MillauDigest" + }, + "MillauDigest": { + "logs": "Vec" + }, + "MillauDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "MillauBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--2": "Rialto Types", + "RialtoBalance": "u128", + "RialtoBlockHash": "H256", + "RialtoBlockNumber": "u32", + "RialtoHeader": { + "parent_Hash": "RialtoBlockHash", + "number": "Compact", + "state_root": "RialtoBlockHash", + "extrinsics_root": "RialtoBlockHash", + "digest": "RialtoDigest" + }, + "RialtoDigest": { + "logs": "Vec" + }, + "RialtoDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "RialtoBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--3": "Common types", + "Address": "AccountId", + "LookupSource": "AccountId", + "AccountSigner": "MultiSigner", + "SpecVersion": "u32", + "RelayerId": "AccountId", + "SourceAccountId": "AccountId", + "ImportedHeader": { + "header": "BridgedHeader", + "requires_justification": "bool", + "is_finalized": "bool", + "signal_hash": "Option" + }, + "AuthoritySet": { + "authorities": "AuthorityList", + "set_id": "SetId" + }, + "Id": "[u8; 4]", + "InstanceId": "Id", + "LaneId": "Id", + "MessageNonce": "u64", + "MessageId": "(Id, u64)", + "MessageKey": { + "lane_id": "LaneId", + "nonce:": "MessageNonce" + }, + "InboundRelayer": "AccountId", + "InboundLaneData": { + "relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>", + "last_confirmed_nonce": "MessageNonce" + }, + "OutboundLaneData": { + "latest_generated_nonce": "MessageNonce", + "latest_received_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce" + }, + "MessageData": { + "payload": "MessagePayload", + "fee": "Fee" + }, + "MessagePayload": "Vec", + "BridgedOpaqueCall": "Vec", + "OutboundMessageFee": "Fee", + "OutboundPayload": { + "spec_version": "SpecVersion", + "weight": "Weight", + "origin": "CallOrigin", + "call": "BridgedOpaqueCall" + }, + "CallOrigin": { + "_enum": { + "SourceRoot": "()", + "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", + "SourceAccount": "SourceAccountId" + } + }, + "MultiSigner": { + "_enum": { + "Ed25519": "H256", + "Sr25519": "H256", + "Ecdsa": "[u8;33]" + } + }, + "MessagesProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId", + "nonces_start": "MessageNonce", + "nonces_end": "MessageNonce" + }, + "StorageProofItem": "Vec", + "MessagesDeliveryProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId" + }, + "UnrewardedRelayersState": { + "unrewarded_relayer_entries": "MessageNonce", + "messages_in_oldest_entry": "MessageNonce", + "total_messages": "MessageNonce" + }, + "AncestryProof": "()", + "MessageFeeData": { + "lane_id": "LaneId", + "payload": "OutboundPayload" + }, + "Precommit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber" + }, + "AuthoritySignature": "[u8;64]", + "AuthorityId": "[u8;32]", + "SignedPrecommit": { + "precommit": "Precommit", + "signature": "AuthoritySignature", + "id": "AuthorityId" + }, + "Commit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber", + "precommits": "Vec" + }, + "GrandpaJustification": { + "round": "u64", + "commit": "Commit", + "votes_ancestries": "Vec" + } +} diff --git a/polkadot/bridges/deployments/types/millau.json b/polkadot/bridges/deployments/types/millau.json new file mode 100644 index 0000000000..bfc86491a5 --- /dev/null +++ b/polkadot/bridges/deployments/types/millau.json @@ -0,0 +1,16 @@ +{ + "Fee": "MillauBalance", + "Balance": "MillauBalance", + "Hash": "MillauBlockHash", + "BlockHash": "MillauBlockHash", + "BlockNumber": "MillauBlockNumber", + "BridgedBlockHash": "RialtoBlockHash", + "BridgedBlockNumber": "RialtoBlockNumber", + "BridgedHeader": "RialtoHeader", + "Parameter": { + "_enum": { + "MillauToRialtoConversionRate": "u128" + } + } + +} diff --git a/polkadot/bridges/deployments/types/rialto.json b/polkadot/bridges/deployments/types/rialto.json new file mode 100644 index 0000000000..fe1ba31e8a --- /dev/null +++ b/polkadot/bridges/deployments/types/rialto.json @@ -0,0 +1,14 @@ +{ + "Fee": "RialtoBalance", + "Balance": "RialtoBalance", + "BlockHash": "RialtoBlockHash", + "BlockNumber": "RialtoBlockNumber", + "BridgedBlockHash": "MillauBlockHash", + "BridgedBlockNumber": "MillauBlockNumber", + "BridgedHeader": "MillauHeader", + "Parameter": { + "_enum": { + "RialtoToMillauConversionRate": "u128" + } + } +} diff --git a/polkadot/bridges/docs/high-level-overview.md b/polkadot/bridges/docs/high-level-overview.md index 763371bbf1..14b1eee6d4 100644 --- a/polkadot/bridges/docs/high-level-overview.md +++ b/polkadot/bridges/docs/high-level-overview.md @@ -90,7 +90,7 @@ message dispatch. #### Message Lanes Delivery -The [Message delivery pallet](../modules/message-lane/src/lib.rs) is responsible for queueing up +The [Message delivery pallet](../modules/messages/src/lib.rs) is responsible for queueing up messages and delivering them in order on the target chain. It also dispatches messages, but we will cover that in the next section. @@ -131,7 +131,7 @@ require bi-directional header sync (i.e. you can't use message delivery with one #### Dispatching Messages -The [Message dispatch pallet](../modules/call-dispatch/src/lib.rs) is used to perform the actions +The [Message dispatch pallet](../modules/dispatch/src/lib.rs) is used to perform the actions specified by messages which have come over the bridge. For Substrate-based chains this means interpreting the source chain's message as a `Call` on the target chain. @@ -172,6 +172,6 @@ source chain needs to prove ownership of this account by using their target chai sign: `(Call, SourceChainAccountId).encode()`. This will be included in the message payload and verified by the target chain before dispatch. -See [`CallOrigin` documentation](../modules/call-dispatch/src/lib.rs) for more details. +See [`CallOrigin` documentation](../modules/dispatch/src/lib.rs) for more details. #### Message Relayers Strategy diff --git a/polkadot/bridges/docs/send-message.md b/polkadot/bridges/docs/send-message.md index 243bf8ce59..91d3bfd976 100644 --- a/polkadot/bridges/docs/send-message.md +++ b/polkadot/bridges/docs/send-message.md @@ -10,10 +10,10 @@ USAGE: substrate-relay FLAGS: - -h, --help + -h, --help Prints help information - -V, --version + -V, --version Prints version information @@ -34,7 +34,7 @@ For sending custom messages over an avialable bridge, the `send-message` command ``` Send custom message over the bridge. -Allows interacting with the bridge by sending messages over `MessageLane` component. The message is being sent to the +Allows interacting with the bridge by sending messages over `Messages` component. The message is being sent to the source chain, delivered to the target chain and dispatched there. USAGE: @@ -46,22 +46,22 @@ FLAGS: SUBCOMMANDS: help Prints this message or the help of the given subcommand(s) - millau-to-rialto Submit message to given Millau -> Rialto lane - rialto-to-millau Submit message to given Rialto -> Millau lane + MillauToRialto Submit message to given Millau -> Rialto lane + RialtoToMillau Submit message to given Rialto -> Millau lane ``` Messages are send from a source chain to a target chain using a so called `message lane`. Message lanes handle both, message transport and message dispatch. There is one command for submitting a message to each of the two -available bridges, namely `millau-to-rialto` and `rialto-to-millau`. +available bridges, namely `MillauToRialto` and `RialtoToMillau`. Submitting a message requires a number of arguments to be provided. Those arguments are essentially the same -for both submit message commands, hence only the output for `millau-to-rialto` is shown below. +for both submit message commands, hence only the output for `MillauToRialto` is shown below. ``` Submit message to given Millau -> Rialto lane USAGE: - substrate-relay send-message millau-to-rialto [OPTIONS] --lane --millau-host --millau-port --millau-signer --origin --rialto-signer + substrate-relay send-message MillauToRialto [OPTIONS] --lane --source-host --source-port --source-signer --origin --target-signer FLAGS: -h, --help Prints help information @@ -72,22 +72,22 @@ OPTIONS: Delivery and dispatch fee. If not passed, determined automatically --lane Hex-encoded lane id - --millau-host Connect to Millau node at given host - --millau-port Connect to Millau node websocket server at given port - --millau-signer - The SURI of secret key to use when transactions are submitted to the Millau node + --source-host Connect to Source node at given host + --source-port Connect to Source node websocket server at given port + --source-signer + The SURI of secret key to use when transactions are submitted to the Source node - --millau-signer-password - The password for the SURI of secret key to use when transactions are submitted to the Millau node + --source-signer-password + The password for the SURI of secret key to use when transactions are submitted to the Source node --origin The origin to use when dispatching the message on the target chain [possible values: Target, Source] - --rialto-signer - The SURI of secret key to use when transactions are submitted to the Rialto node + --target-signer + The SURI of secret key to use when transactions are submitted to the Target node - --rialto-signer-password - The password for the SURI of secret key to use when transactions are submitted to the Rialto node + --target-signer-password + The password for the SURI of secret key to use when transactions are submitted to the Target node SUBCOMMANDS: @@ -100,30 +100,30 @@ As can be seen from the output, there are two types of messages available: `rema A remark is some opaque message which will be placed on-chain. For basic testing, a remark is the easiest to go with. -Usage of the arguments is best explained with an example. Below you can see, how a remark +Usage of the arguments is best explained with an example. Below you can see, how a remark would look like: ``` -substrate-relay send-message millau-to-rialto \ - --millau-host=127.0.0.1 \ - --millau-port=10946 \ - --millau-signer=//Dave \ - --rialto-signer=//Dave \ +substrate-relay send-message MillauToRialto \ + --source-host=127.0.0.1 \ + --source-port=10946 \ + --source-signer=//Dave \ + --target-signer=//Dave \ --lane=00000000 \ --origin Target \ remark ``` Messages are basically regular transactions. That means, they have to be signed. In order to send a message, you have to control an account private key on both, the source and -the target chain. Those accounts are specified using the `--millau-signer` and `--rialto-signer` -arguments in the example above. +the target chain. Those accounts are specified using the `--source-signer` and `--target-signer` +arguments in the example above. -Message delivery and dispatch requires a fee to be paid. In the example above, we have not +Message delivery and dispatch requires a fee to be paid. In the example above, we have not specified the `--fee` argument. Hence, the fee will be estimated automatically. Note that in order to pay the fee, the message sender account has to have sufficient funds available. The `--origin` argument allows to denote under which authority the message will be dispatched -on the target chain. Accepted values are `Target` and `Source`. +on the target chain. Accepted values are `Target` and `Source`. Although not strictly necessary, it is recommended, to use one of the well-known development accounts (`Alice`, `Bob`, `Charlie`, `Dave`, `Eve`) for message sending. Those accounts are diff --git a/polkadot/bridges/docs/testing-scenarios.md b/polkadot/bridges/docs/testing-scenarios.md index da2e9c0b43..343720524e 100644 --- a/polkadot/bridges/docs/testing-scenarios.md +++ b/polkadot/bridges/docs/testing-scenarios.md @@ -49,7 +49,7 @@ kCharlie. 1. kAlice prepares Kusama transaction: ```rust - kusama::Call::MessageLane::::send_message( + kusama::Call::Messages::::send_message( // dot-transfer-lane (truncated to 4bytes) lane_id, payload: MessagePayload { @@ -124,7 +124,7 @@ kCharlie. Lanes contains `latest_generated_nonce` and `latest_received_nonce` respectively. The relayer syncs messages between that range. -1. The relayer gets a proof for every message in that range (using the RPC of message lanes module) +1. The relayer gets a proof for every message in that range (using the RPC of messages module) 1. The relayer creates a message delivery transaction (but it has weight, size, and count limits). The count limit is there to make the loop of delivery code bounded. diff --git a/polkadot/bridges/fuzz/storage-proof/Cargo.lock b/polkadot/bridges/fuzz/storage-proof/Cargo.lock new file mode 100644 index 0000000000..e303f3a887 --- /dev/null +++ b/polkadot/bridges/fuzz/storage-proof/Cargo.lock @@ -0,0 +1,2362 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + +[[package]] +name = "addr2line" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" + +[[package]] +name = "ahash" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" + +[[package]] +name = "aho-corasick" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +dependencies = [ + "memchr", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "anyhow" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" + +[[package]] +name = "arbitrary" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db55d72333851e17d572bec876e390cd3b11eb1ef53ae821dd9f3b653d2b4569" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "async-trait" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "backtrace" +version = "0.3.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" +dependencies = [ + "addr2line", + "cfg-if 1.0.0", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base58" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "bitvec" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +dependencies = [ + "either", + "radium", +] + +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array 0.12.3", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "bp-header-chain" +version = "0.1.0" +dependencies = [ + "finality-grandpa", + "frame-support", + "parity-scale-codec", + "serde", + "sp-core", + "sp-finality-grandpa", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "bp-runtime" +version = "0.1.0" +dependencies = [ + "frame-support", + "num-traits", + "parity-scale-codec", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "bp-test-utils" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "finality-grandpa", + "sp-finality-grandpa", + "sp-keyring", + "sp-runtime", +] + +[[package]] +name = "byte-slice-cast" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "byteorder" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits", + "time", + "winapi", +] + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "cpuid-bool" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array 0.12.3", + "subtle 1.0.0", +] + +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.4.0", +] + +[[package]] +name = "curve25519-dalek" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8" +dependencies = [ + "byteorder", + "digest 0.8.1", + "rand_core 0.5.1", + "subtle 2.4.0", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle 2.4.0", + "zeroize", +] + +[[package]] +name = "derive_more" +version = "0.99.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array 0.12.3", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "dyn-clonable" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] + +[[package]] +name = "dyn-clonable-impl" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dyn-clone" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" + +[[package]] +name = "ed25519" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" +dependencies = [ + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek 3.0.2", + "ed25519", + "rand 0.7.3", + "serde", + "sha2 0.9.2", + "zeroize", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "environmental" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e" + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + +[[package]] +name = "finality-grandpa" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" +dependencies = [ + "either", + "futures", + "futures-timer", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.9.0", +] + +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "byteorder", + "rand 0.8.2", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "frame-metadata" +version = "12.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-std", +] + +[[package]] +name = "frame-support" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "bitflags", + "frame-metadata", + "frame-support-procedural", + "impl-trait-for-tuples", + "log", + "once_cell", + "parity-scale-codec", + "paste", + "serde", + "smallvec 1.6.1", + "sp-arithmetic", + "sp-core", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-tracing", +] + +[[package]] +name = "frame-support-procedural" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "Inflector", + "frame-support-procedural-tools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "frame-support-procedural-tools-derive", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-system" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "frame-support", + "impl-trait-for-tuples", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-version", +] + +[[package]] +name = "futures" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" + +[[package]] +name = "futures-executor" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" + +[[package]] +name = "futures-macro" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" + +[[package]] +name = "futures-task" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" +dependencies = [ + "once_cell", +] + +[[package]] +name = "futures-timer" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" + +[[package]] +name = "futures-util" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + +[[package]] +name = "generic-array" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.10.1+wasi-snapshot-preview1", +] + +[[package]] +name = "gimli" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" + +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +dependencies = [ + "ahash", +] + +[[package]] +name = "heck" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" + +[[package]] +name = "hmac" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +dependencies = [ + "crypto-mac 0.7.0", + "digest 0.8.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + +[[package]] +name = "hmac-drbg" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +dependencies = [ + "digest 0.8.1", + "generic-array 0.12.3", + "hmac 0.7.1", +] + +[[package]] +name = "honggfuzz" +version = "0.5.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ead88897bcad1c396806d6ccba260a0363e11da997472e9e19ab9889969083a2" +dependencies = [ + "arbitrary", + "lazy_static", + "memmap", +] + +[[package]] +name = "impl-codec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-serde" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f65a8ecf74feeacdab8d38cb129e550ca871cccaa7d1921d8636ecd75534903" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "instant" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "integer-sqrt" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", +] + +[[package]] +name = "itoa" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" + +[[package]] +name = "keccak" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" + +[[package]] +name = "libsecp256k1" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +dependencies = [ + "arrayref", + "crunchy", + "digest 0.8.1", + "hmac-drbg", + "rand 0.7.3", + "sha2 0.8.2", + "subtle 2.4.0", + "typenum", +] + +[[package]] +name = "lock_api" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "lock_api" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +dependencies = [ + "cfg-if 0.1.10", +] + +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + +[[package]] +name = "memchr" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" + +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "memory-db" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cbd2a22f201c03cc1706a727842490abfea17b7b53260358239828208daba3c" +dependencies = [ + "hash-db", + "hashbrown", + "parity-util-mem", +] + +[[package]] +name = "memory_units" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" + +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "miniz_oxide" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" +dependencies = [ + "adler", + "autocfg", +] + +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" +dependencies = [ + "autocfg", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" + +[[package]] +name = "once_cell" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +dependencies = [ + "parking_lot 0.11.1", +] + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "pallet-substrate-bridge" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-runtime", + "finality-grandpa", + "frame-support", + "frame-system", + "hash-db", + "parity-scale-codec", + "serde", + "sp-finality-grandpa", + "sp-runtime", + "sp-std", + "sp-trie", +] + +[[package]] +name = "parity-scale-codec" +version = "1.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79602888a81ace83e3d1d4b2873286c1f5f906c84db667594e8db8da3506c383" +dependencies = [ + "arrayvec 0.5.2", + "bitvec", + "byte-slice-cast", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "198db82bb1c18fc00176004462dd809b2a6d851669550aa17af6dacd21ae0c14" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-util-mem" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f17f15cb05897127bf36a240085a1f0bbef7bce3024849eccf7f93f6171bc27" +dependencies = [ + "cfg-if 1.0.0", + "hashbrown", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "parking_lot 0.11.1", + "primitive-types", + "winapi", +] + +[[package]] +name = "parity-util-mem-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" +dependencies = [ + "proc-macro2", + "syn", + "synstructure", +] + +[[package]] +name = "parity-wasm" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" + +[[package]] +name = "parking_lot" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" +dependencies = [ + "lock_api 0.3.4", + "parking_lot_core 0.6.2", + "rustc_version", +] + +[[package]] +name = "parking_lot" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +dependencies = [ + "instant", + "lock_api 0.4.2", + "parking_lot_core 0.8.2", +] + +[[package]] +name = "parking_lot_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" +dependencies = [ + "cfg-if 0.1.10", + "cloudabi", + "libc", + "redox_syscall", + "rustc_version", + "smallvec 0.6.14", + "winapi", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" +dependencies = [ + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall", + "smallvec 1.6.1", + "winapi", +] + +[[package]] +name = "paste" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "pbkdf2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" +dependencies = [ + "byteorder", + "crypto-mac 0.7.0", +] + +[[package]] +name = "pbkdf2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +dependencies = [ + "crypto-mac 0.8.0", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "ppv-lite86" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + +[[package]] +name = "primitive-types" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3824ae2c5e27160113b9e029a10ec9e3f0237bad8029f69c7724393c9fdefd8" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + +[[package]] +name = "proc-macro-nested" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" + +[[package]] +name = "proc-macro2" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", + "rand_pcg", +] + +[[package]] +name = "rand" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.1", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.1", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" +dependencies = [ + "getrandom 0.2.2", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rand_pcg" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "ref-cast" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "regex" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" + +[[package]] +name = "rustc-demangle" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "schnorrkel" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.2", + "getrandom 0.1.16", + "merlin", + "rand 0.7.3", + "rand_core 0.5.1", + "serde", + "sha2 0.8.2", + "subtle 2.4.0", + "zeroize", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "secrecy" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" +dependencies = [ + "zeroize", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "166b2349061381baf54a58e4b13c89369feb0ef2eaa57198899e2312aac30aab" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca2a8cb5805ce9e3b95435e3765b7b553cecc762d938d409434338386cb5775" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sha2" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpuid-bool", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sharded-slab" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signature" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" + +[[package]] +name = "smallvec" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" +dependencies = [ + "maybe-uninit", +] + +[[package]] +name = "smallvec" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" + +[[package]] +name = "sp-api" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "hash-db", + "parity-scale-codec", + "sp-api-proc-macro", + "sp-core", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-version", + "thiserror", +] + +[[package]] +name = "sp-api-proc-macro" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "blake2-rfc", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-application-crypto" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-arithmetic" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-core" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "base58", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", + "futures", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot 0.11.1", + "primitive-types", + "rand 0.7.3", + "regex", + "schnorrkel", + "secrecy", + "serde", + "sha2 0.9.2", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std", + "sp-storage", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "tiny-keccak", + "twox-hash", + "wasmi", + "zeroize", +] + +[[package]] +name = "sp-debug-derive" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-externalities" +version = "0.8.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-std", + "sp-storage", +] + +[[package]] +name = "sp-finality-grandpa" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "finality-grandpa", + "log", + "parity-scale-codec", + "serde", + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-keystore", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-inherents" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "parity-scale-codec", + "parking_lot 0.11.1", + "sp-core", + "sp-std", + "thiserror", +] + +[[package]] +name = "sp-io" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "futures", + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec", + "parking_lot 0.11.1", + "sp-core", + "sp-externalities", + "sp-keystore", + "sp-runtime-interface", + "sp-state-machine", + "sp-std", + "sp-tracing", + "sp-trie", + "sp-wasm-interface", + "tracing", + "tracing-core", +] + +[[package]] +name = "sp-keyring" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "lazy_static", + "sp-core", + "sp-runtime", + "strum", +] + +[[package]] +name = "sp-keystore" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "async-trait", + "derive_more", + "futures", + "merlin", + "parity-scale-codec", + "parking_lot 0.11.1", + "schnorrkel", + "sp-core", + "sp-externalities", +] + +[[package]] +name = "sp-panic-handler" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "backtrace", +] + +[[package]] +name = "sp-runtime" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "parity-util-mem", + "paste", + "rand 0.7.3", + "serde", + "sp-application-crypto", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-runtime-interface" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std", + "sp-storage", + "sp-tracing", + "sp-wasm-interface", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "Inflector", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-state-machine" +version = "0.8.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "hash-db", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.11.1", + "rand 0.7.3", + "smallvec 1.6.1", + "sp-core", + "sp-externalities", + "sp-panic-handler", + "sp-std", + "sp-trie", + "thiserror", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-std" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" + +[[package]] +name = "sp-storage" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-tracing" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "log", + "parity-scale-codec", + "sp-std", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sp-trie" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "hash-db", + "memory-db", + "parity-scale-codec", + "sp-core", + "sp-std", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-version" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "serde", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-wasm-interface" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-std", + "wasmi", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strum" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6138f8f88a16d90134763314e3fc76fa3ed6a7db4725d6acf9a3ef95a3188d22" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "substrate-bip39" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" +dependencies = [ + "hmac 0.7.1", + "pbkdf2 0.3.0", + "schnorrkel", + "sha2 0.8.2", + "zeroize", +] + +[[package]] +name = "substrate-bridge-fuzzer" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-runtime", + "bp-test-utils", + "finality-grandpa", + "frame-support", + "frame-system", + "hash-db", + "honggfuzz", + "pallet-substrate-bridge", + "parity-scale-codec", + "serde", + "sp-core", + "sp-finality-grandpa", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-trie", +] + +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "subtle" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" + +[[package]] +name = "syn" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "thiserror" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "time" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "tiny-bip39" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" +dependencies = [ + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", + "rand 0.7.3", + "rustc-hash", + "sha2 0.9.2", + "thiserror", + "unicode-normalization", + "zeroize", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinyvec" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "toml" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +dependencies = [ + "serde", +] + +[[package]] +name = "tracing" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +dependencies = [ + "cfg-if 1.0.0", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "tracing-log" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" +dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec 1.6.1", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "trie-db" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc176c377eb24d652c9c69c832c832019011b6106182bf84276c66b66d5c9a6" +dependencies = [ + "hash-db", + "hashbrown", + "log", + "rustc-hex", + "smallvec 1.6.1", +] + +[[package]] +name = "trie-root" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" +dependencies = [ + "hash-db", +] + +[[package]] +name = "twox-hash" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" +dependencies = [ + "cfg-if 0.1.10", + "rand 0.7.3", + "static_assertions", +] + +[[package]] +name = "typenum" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" + +[[package]] +name = "uint" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "version_check" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.10.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" + +[[package]] +name = "wasmi" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" +dependencies = [ + "libc", + "memory_units", + "num-rational", + "num-traits", + "parity-wasm", + "wasmi-validation", +] + +[[package]] +name = "wasmi-validation" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" +dependencies = [ + "parity-wasm", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "zeroize" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] diff --git a/polkadot/bridges/fuzz/storage-proof/Cargo.toml b/polkadot/bridges/fuzz/storage-proof/Cargo.toml new file mode 100644 index 0000000000..05456114e6 --- /dev/null +++ b/polkadot/bridges/fuzz/storage-proof/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "storage-proof-fuzzer" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.3.1" } +finality-grandpa = "0.12.3" +hash-db = "0.15.2" +honggfuzz = "0.5.54" +log = "0.4.0" +env_logger = "0.8.3" + +# Bridge Dependencies + +bp-header-chain = { path = "../../primitives/header-chain" } +bp-runtime = { path = "../../primitives/runtime" } +bp-test-utils = { path = "../../primitives/test-utils" } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/fuzz/storage-proof/README.md b/polkadot/bridges/fuzz/storage-proof/README.md new file mode 100644 index 0000000000..da3c7b1565 --- /dev/null +++ b/polkadot/bridges/fuzz/storage-proof/README.md @@ -0,0 +1,32 @@ +# Storage Proof Fuzzer + +## How to run? + +Install dependencies: +``` +$ sudo apt install build-essential binutils-dev libunwind-dev +``` + + +Install `cargo hfuzz` plugin: +``` +$ cargo install honggfuzz +``` + +Run: +``` +$ cargo hfuzz run storage-proof-fuzzer +``` + +Use `HFUZZ_RUN_ARGS` to customize execution: +``` +# 1 second of timeout +# use 12 fuzzing thread +# be verbose +# stop after 1000000 fuzzing iteration +# exit upon crash +HFUZZ_RUN_ARGS="-t 1 -n 12 -v -N 1000000 --exit_upon_crash" cargo hfuzz run example +``` + +More details in the [official documentation](https://docs.rs/honggfuzz/0.5.52/honggfuzz/#about-honggfuzz). + diff --git a/polkadot/bridges/fuzz/storage-proof/src/main.rs b/polkadot/bridges/fuzz/storage-proof/src/main.rs new file mode 100644 index 0000000000..18be72e72f --- /dev/null +++ b/polkadot/bridges/fuzz/storage-proof/src/main.rs @@ -0,0 +1,84 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Storage Proof Checker fuzzer. + +#![warn(missing_docs)] + +use honggfuzz::fuzz; +// Logic for checking Substrate storage proofs. + +use sp_core::{Blake2Hasher, H256}; +use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend}; +use sp_std::vec::Vec; +use sp_trie::StorageProof; +use std::collections::HashMap; + +fn craft_known_storage_proof(input_vec: Vec<(Vec, Vec)>) -> (H256, StorageProof) { + let storage_proof_vec = vec![( + None, + input_vec.iter().map(|x| (x.0.clone(), Some(x.1.clone()))).collect(), + )]; + log::info!("Storage proof vec {:?}", storage_proof_vec); + let backend = >::from(storage_proof_vec); + let root = backend.storage_root(std::iter::empty()).0; + let vector_element_proof = StorageProof::new( + prove_read(backend, input_vec.iter().map(|x| x.0.as_slice())) + .unwrap() + .iter_nodes() + .collect(), + ); + (root, vector_element_proof) +} + +fn transform_into_unique(input_vec: Vec<(Vec, Vec)>) -> Vec<(Vec, Vec)> { + let mut output_hashmap = HashMap::new(); + let mut output_vec = Vec::new(); + for key_value_pair in input_vec.clone() { + output_hashmap.insert(key_value_pair.0, key_value_pair.1); //Only 1 value per key + } + for (key, val) in output_hashmap.iter() { + output_vec.push((key.clone(), val.clone())); + } + output_vec +} + +fn run_fuzzer() { + fuzz!(|input_vec: Vec<(Vec, Vec)>| { + if input_vec.is_empty() { + return; + } + let unique_input_vec = transform_into_unique(input_vec); + let (root, craft_known_storage_proof) = craft_known_storage_proof(unique_input_vec.clone()); + let checker = >::new(root, craft_known_storage_proof) + .expect("Valid proof passed; qed"); + for key_value_pair in unique_input_vec { + log::info!("Reading value for pair {:?}", key_value_pair); + assert_eq!( + checker.read_value(&key_value_pair.0), + Ok(Some(key_value_pair.1.clone())) + ); + } + }) +} + +fn main() { + env_logger::init(); + + loop { + run_fuzzer(); + } +} diff --git a/polkadot/bridges/modules/currency-exchange/Cargo.toml b/polkadot/bridges/modules/currency-exchange/Cargo.toml index cf58b5e81c..8094f0f2b6 100644 --- a/polkadot/bridges/modules/currency-exchange/Cargo.toml +++ b/polkadot/bridges/modules/currency-exchange/Cargo.toml @@ -8,6 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.14", default-features = false } serde = { version = "1.0", optional = true } # Bridge dependencies @@ -36,6 +37,7 @@ std = [ "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "log/std", "serde", "sp-runtime/std", "sp-std/std", diff --git a/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs b/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs index d10dd3c684..574ae93f6e 100644 --- a/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs +++ b/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -19,7 +19,7 @@ //! before invoking module calls. use super::{ - Call, Config as CurrencyExchangeConfig, InclusionProofVerifier, Instance, Module as CurrencyExchangeModule, + Call, Config as CurrencyExchangeConfig, InclusionProofVerifier, Instance, Pallet as CurrencyExchangePallet, }; use sp_std::prelude::*; @@ -30,8 +30,8 @@ const SEED: u32 = 0; const WORST_TX_SIZE_FACTOR: u32 = 1000; const WORST_PROOF_SIZE_FACTOR: u32 = 1000; -/// Module we're benchmarking here. -pub struct Module, I: Instance>(CurrencyExchangeModule); +/// Pallet we're benchmarking here. +pub struct Pallet, I: Instance>(CurrencyExchangePallet); /// Proof benchmarking parameters. pub struct ProofParams { diff --git a/polkadot/bridges/modules/currency-exchange/src/lib.rs b/polkadot/bridges/modules/currency-exchange/src/lib.rs index 54e4558d89..542082f85a 100644 --- a/polkadot/bridges/modules/currency-exchange/src/lib.rs +++ b/polkadot/bridges/modules/currency-exchange/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -61,7 +61,7 @@ pub trait Config: frame_system::Config { } decl_error! { - pub enum Error for Module, I: Instance> { + pub enum Error for Pallet, I: Instance> { /// Invalid peer blockchain transaction provided. InvalidTransaction, /// Peer transaction has invalid amount. @@ -113,7 +113,7 @@ decl_module! { // reward submitter for providing valid message T::OnTransactionSubmitted::on_valid_transaction_submitted(submitter); - frame_support::debug::trace!( + log::trace!( target: "runtime", "Completed currency exchange: {:?}", deposit.transfer_id, @@ -125,20 +125,20 @@ decl_module! { } decl_storage! { - trait Store for Module, I: Instance = DefaultInstance> as Bridge { + trait Store for Pallet, I: Instance = DefaultInstance> as Bridge { /// All transfers that have already been claimed. Transfers: map hasher(blake2_128_concat) ::Id => (); } } -impl, I: Instance> Module { +impl, I: Instance> Pallet { /// Returns true if currency exchange module is able to import given transaction proof in /// its current state. pub fn filter_transaction_proof( proof: &::TransactionInclusionProof, ) -> bool { if let Err(err) = prepare_deposit_details::(proof) { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Can't accept exchange transaction: {:?}", err, diff --git a/polkadot/bridges/modules/call-dispatch/Cargo.toml b/polkadot/bridges/modules/dispatch/Cargo.toml similarity index 93% rename from polkadot/bridges/modules/call-dispatch/Cargo.toml rename to polkadot/bridges/modules/dispatch/Cargo.toml index 64910df861..6170af272a 100644 --- a/polkadot/bridges/modules/call-dispatch/Cargo.toml +++ b/polkadot/bridges/modules/dispatch/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "pallet-bridge-call-dispatch" +name = "pallet-bridge-dispatch" description = "A Substrate Runtime module that dispatches a bridge message, treating it simply as encoded Call" version = "0.1.0" authors = ["Parity Technologies "] @@ -8,6 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.14", default-features = false } # Bridge dependencies @@ -33,6 +34,7 @@ std = [ "bp-runtime/std", "frame-support/std", "frame-system/std", + "log/std", "sp-core/std", "sp-runtime/std", "sp-std/std", diff --git a/polkadot/bridges/modules/call-dispatch/README.md b/polkadot/bridges/modules/dispatch/README.md similarity index 89% rename from polkadot/bridges/modules/call-dispatch/README.md rename to polkadot/bridges/modules/dispatch/README.md index 0351aa9e50..f2ee04beaf 100644 --- a/polkadot/bridges/modules/call-dispatch/README.md +++ b/polkadot/bridges/modules/dispatch/README.md @@ -1,7 +1,7 @@ # Call Dispatch Module The call dispatch module has a single internal (only callable by other runtime modules) entry point -for dispatching encoded calls (`pallet_bridge_call_dispatch::Module::dispatch`). Every dispatch +for dispatching encoded calls (`pallet_bridge_dispatch::Module::dispatch`). Every dispatch (successful or not) emits a corresponding module event. The module doesn't have any call-related requirements - they may come from the bridged chain over some message lane, or they may be crafted locally. But in this document we'll mostly talk about this module in the context of bridges. @@ -11,9 +11,9 @@ Every message that is being dispatched has three main characteristics: identifier of the bridged chain (like `b"rlto"` for messages coming from `Rialto`), or the identifier of the bridge itself (`b"rimi"` for `Rialto` <-> `Millau` bridge); - `id` is the unique id of the message within the given bridge. For messages coming from the - [message lane module](../message-lane/README.md), it may worth to use a tuple + [messages module](../messages/README.md), it may worth to use a tuple `(LaneId, MessageNonce)` to identify a message; -- `message` is the `pallet_bridge_call_dispatch::MessagePayload` structure. The `call` field is set +- `message` is the `pallet_bridge_dispatch::MessagePayload` structure. The `call` field is set to the (potentially) encoded `Call` of this chain. The easiest way to understand what is happening when a `Call` is being dispatched, is to look at the @@ -33,7 +33,7 @@ module events set: chain storage has been corrupted. The `Call` is decoded after `spec_version` check, so we'll never try to decode `Call` from other runtime version; - `MessageSignatureMismatch` event is emitted if submitter has chose to dispatch message using - specified this chain account (`pallet_bridge_call_dispatch::CallOrigin::TargetAccount` origin), + specified this chain account (`pallet_bridge_dispatch::CallOrigin::TargetAccount` origin), but he has failed to prove that he owns the private key for this account; - `MessageCallRejected` event is emitted if the module has been deployed with some call filter and this filter has rejected the `Call`. In your bridge you may choose to reject all messages except @@ -52,7 +52,7 @@ When we talk about module in context of bridges, these events are helping in fol 1. when the message submitter has access to the state of both chains and wants to monitor what has happened with his message. Then he could use the message id (that he gets from the - [message lane module events](../message-lane/README.md#General-Information)) to filter events of + [messages module events](../messages/README.md#General-Information)) to filter events of call dispatch module at the target chain and actually see what has happened with his message; 1. when the message submitter only has access to the source chain state (for example, when sender is diff --git a/polkadot/bridges/modules/call-dispatch/src/lib.rs b/polkadot/bridges/modules/dispatch/src/lib.rs similarity index 93% rename from polkadot/bridges/modules/call-dispatch/src/lib.rs rename to polkadot/bridges/modules/dispatch/src/lib.rs index 1f0da20282..416d080b0c 100644 --- a/polkadot/bridges/modules/call-dispatch/src/lib.rs +++ b/polkadot/bridges/modules/dispatch/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -45,6 +45,7 @@ use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; /// Spec version type. pub type SpecVersion = u32; +// TODO [#895] move to primitives /// Origin of a Call when it is dispatched on the target chain. /// /// The source chain can (and should) verify that the message can be dispatched on the target chain @@ -56,7 +57,7 @@ pub enum CallOrigin { /// Runtime specification version. We only dispatch messages that have the same @@ -118,7 +120,7 @@ pub trait Config: frame_system::Config { type Event: From> + Into<::Event>; /// Id of the message. Whenever message is passed to the dispatch module, it emits /// event with this id + dispatch result. Could be e.g. (LaneId, MessageNonce) if - /// it comes from message-lane module. + /// it comes from the messages module. type MessageId: Parameter; /// Type of account ID on source chain. type SourceChainAccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default; @@ -153,7 +155,7 @@ pub trait Config: frame_system::Config { } decl_storage! { - trait Store for Module, I: Instance = DefaultInstance> as CallDispatch {} + trait Store for Pallet, I: Instance = DefaultInstance> as Dispatch {} } decl_event!( @@ -189,7 +191,7 @@ decl_module! { } } -impl, I: Instance> MessageDispatch for Module { +impl, I: Instance> MessageDispatch for Pallet { type Message = MessagePayload; @@ -202,7 +204,7 @@ impl, I: Instance> MessageDispatch for Module { let message = match message { Ok(message) => message, Err(_) => { - frame_support::debug::trace!("Message {:?}/{:?}: rejected before actual dispatch", bridge, id); + log::trace!(target: "runtime::bridge-dispatch", "Message {:?}/{:?}: rejected before actual dispatch", bridge, id); Self::deposit_event(RawEvent::MessageRejected(bridge, id)); return; } @@ -212,7 +214,7 @@ impl, I: Instance> MessageDispatch for Module { // (we want it to be the same, because otherwise we may decode Call improperly) let expected_version = ::Version::get().spec_version; if message.spec_version != expected_version { - frame_support::debug::trace!( + log::trace!( "Message {:?}/{:?}: spec_version mismatch. Expected {:?}, got {:?}", bridge, id, @@ -232,7 +234,7 @@ impl, I: Instance> MessageDispatch for Module { let call = match message.call.into() { Ok(call) => call, Err(_) => { - frame_support::debug::trace!("Failed to decode Call from message {:?}/{:?}", bridge, id,); + log::trace!(target: "runtime::bridge-dispatch", "Failed to decode Call from message {:?}/{:?}", bridge, id,); Self::deposit_event(RawEvent::MessageCallDecodeFailed(bridge, id)); return; } @@ -243,7 +245,7 @@ impl, I: Instance> MessageDispatch for Module { CallOrigin::SourceRoot => { let hex_id = derive_account_id::(bridge, SourceAccount::Root); let target_id = T::AccountIdConverter::convert(hex_id); - frame_support::debug::trace!("Root Account: {:?}", &target_id); + log::trace!(target: "runtime::bridge-dispatch", "Root Account: {:?}", &target_id); target_id } CallOrigin::TargetAccount(source_account_id, target_public, target_signature) => { @@ -251,7 +253,8 @@ impl, I: Instance> MessageDispatch for Module { let target_account = target_public.into_account(); if !target_signature.verify(&digest[..], &target_account) { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-dispatch", "Message {:?}/{:?}: origin proof is invalid. Expected account: {:?} from signature: {:?}", bridge, id, @@ -262,20 +265,21 @@ impl, I: Instance> MessageDispatch for Module { return; } - frame_support::debug::trace!("Target Account: {:?}", &target_account); + log::trace!(target: "runtime::bridge-dispatch", "Target Account: {:?}", &target_account); target_account } CallOrigin::SourceAccount(source_account_id) => { let hex_id = derive_account_id(bridge, SourceAccount::Account(source_account_id)); let target_id = T::AccountIdConverter::convert(hex_id); - frame_support::debug::trace!("Source Account: {:?}", &target_id); + log::trace!(target: "runtime::bridge-dispatch", "Source Account: {:?}", &target_id); target_id } }; // filter the call if !T::CallFilter::filter(&call) { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-dispatch", "Message {:?}/{:?}: the call ({:?}) is rejected by filter", bridge, id, @@ -291,7 +295,8 @@ impl, I: Instance> MessageDispatch for Module { let dispatch_info = call.get_dispatch_info(); let expected_weight = dispatch_info.weight; if message.weight < expected_weight { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-dispatch", "Message {:?}/{:?}: passed weight is too low. Expected at least {:?}, got {:?}", bridge, id, @@ -310,11 +315,12 @@ impl, I: Instance> MessageDispatch for Module { // finally dispatch message let origin = RawOrigin::Signed(origin_account).into(); - frame_support::debug::trace!("Message being dispatched is: {:?}", &call); + log::trace!(target: "runtime::bridge-dispatch", "Message being dispatched is: {:?}", &call); let dispatch_result = call.dispatch(origin); let actual_call_weight = extract_actual_weight(&dispatch_result, &dispatch_info); - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-dispatch", "Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}", bridge, id, @@ -452,7 +458,7 @@ mod tests { UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, - CallDispatch: call_dispatch::{Pallet, Call, Event}, + Dispatch: call_dispatch::{Pallet, Call, Event}, } } @@ -531,7 +537,7 @@ mod tests { fn prepare_message( origin: CallOrigin, call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { + ) -> as MessageDispatch<::MessageId>>::Message { MessagePayload { spec_version: TEST_SPEC_VERSION, weight: TEST_WEIGHT, @@ -542,20 +548,20 @@ mod tests { fn prepare_root_message( call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { + ) -> as MessageDispatch<::MessageId>>::Message { prepare_message(CallOrigin::SourceRoot, call) } fn prepare_target_message( call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { + ) -> as MessageDispatch<::MessageId>>::Message { let origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(1)); prepare_message(origin, call) } fn prepare_source_message( call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { + ) -> as MessageDispatch<::MessageId>>::Message { let origin = CallOrigin::SourceAccount(1); prepare_message(origin, call) } @@ -572,7 +578,7 @@ mod tests { message.spec_version = BAD_SPEC_VERSION; System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); + Dispatch::dispatch(bridge, id, Ok(message)); assert_eq!( System::events(), @@ -600,14 +606,14 @@ mod tests { message.weight = 0; System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); + Dispatch::dispatch(bridge, id, Ok(message)); assert_eq!( System::events(), vec![EventRecord { phase: Phase::Initialization, event: Event::call_dispatch(call_dispatch::Event::::MessageWeightMismatch( - bridge, id, 1973000, 0, + bridge, id, 1345000, 0, )), topics: vec![], }], @@ -628,7 +634,7 @@ mod tests { ); System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); + Dispatch::dispatch(bridge, id, Ok(message)); assert_eq!( System::events(), @@ -650,7 +656,7 @@ mod tests { let id = [0; 4]; System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Err(())); + Dispatch::dispatch(bridge, id, Err(())); assert_eq!( System::events(), @@ -674,7 +680,7 @@ mod tests { message.call.0 = vec![]; System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); + Dispatch::dispatch(bridge, id, Ok(message)); assert_eq!( System::events(), @@ -701,7 +707,7 @@ mod tests { message.weight = weight; System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); + Dispatch::dispatch(bridge, id, Ok(message)); assert_eq!( System::events(), @@ -722,7 +728,7 @@ mod tests { let message = prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); + Dispatch::dispatch(bridge, id, Ok(message)); assert_eq!( System::events(), @@ -749,7 +755,7 @@ mod tests { let message = prepare_target_message(call); System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); + Dispatch::dispatch(bridge, id, Ok(message)); assert_eq!( System::events(), @@ -776,7 +782,7 @@ mod tests { let message = prepare_source_message(call); System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); + Dispatch::dispatch(bridge, id, Ok(message)); assert_eq!( System::events(), diff --git a/polkadot/bridges/modules/ethereum-contract/builtin/Cargo.toml b/polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml similarity index 94% rename from polkadot/bridges/modules/ethereum-contract/builtin/Cargo.toml rename to polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml index d17b0ba7e1..82e287a3ab 100644 --- a/polkadot/bridges/modules/ethereum-contract/builtin/Cargo.toml +++ b/polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml @@ -15,7 +15,7 @@ log = "0.4.14" # Runtime/chain specific dependencies -rialto-runtime = { path = "../../../bin/rialto/runtime" } +rialto-runtime = { path = "../../bin/rialto/runtime" } # Substrate Dependencies diff --git a/polkadot/bridges/modules/ethereum-contract/builtin/src/lib.rs b/polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs similarity index 99% rename from polkadot/bridges/modules/ethereum-contract/builtin/src/lib.rs rename to polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs index 5762d510b2..47c4452aee 100644 --- a/polkadot/bridges/modules/ethereum-contract/builtin/src/lib.rs +++ b/polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/modules/ethereum/Cargo.toml b/polkadot/bridges/modules/ethereum/Cargo.toml index 1912f45bab..fdd93ed733 100644 --- a/polkadot/bridges/modules/ethereum/Cargo.toml +++ b/polkadot/bridges/modules/ethereum/Cargo.toml @@ -9,6 +9,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"], optional = true } +log = { version = "0.4.14", default-features = false } serde = { version = "1.0", optional = true } # Bridge dependencies @@ -36,6 +37,7 @@ std = [ "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "log/std", "serde", "sp-io/std", "sp-runtime/std", diff --git a/polkadot/bridges/modules/ethereum/src/benchmarking.rs b/polkadot/bridges/modules/ethereum/src/benchmarking.rs index beb8ba2a0e..960dbe9afe 100644 --- a/polkadot/bridges/modules/ethereum/src/benchmarking.rs +++ b/polkadot/bridges/modules/ethereum/src/benchmarking.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,13 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use super::{ - BridgeStorage, - RawOrigin, - HeadersByNumber, - Instance, - Config, -}; +use super::*; use crate::test_utils::{ build_custom_header, build_genesis_header, insert_header, validator_utils::*, validators_change_receipt, diff --git a/polkadot/bridges/modules/ethereum/src/error.rs b/polkadot/bridges/modules/ethereum/src/error.rs index 50dccd6ea2..ad798379da 100644 --- a/polkadot/bridges/modules/ethereum/src/error.rs +++ b/polkadot/bridges/modules/ethereum/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/modules/ethereum/src/finality.rs b/polkadot/bridges/modules/ethereum/src/finality.rs index 608708a0c7..58987c6b29 100644 --- a/polkadot/bridges/modules/ethereum/src/finality.rs +++ b/polkadot/bridges/modules/ethereum/src/finality.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/modules/ethereum/src/import.rs b/polkadot/bridges/modules/ethereum/src/import.rs index 1b41c3a8b2..8cd4c8a17c 100644 --- a/polkadot/bridges/modules/ethereum/src/import.rs +++ b/polkadot/bridges/modules/ethereum/src/import.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/modules/ethereum/src/lib.rs b/polkadot/bridges/modules/ethereum/src/lib.rs index 05beb279a9..aeb7d69f76 100644 --- a/polkadot/bridges/modules/ethereum/src/lib.rs +++ b/polkadot/bridges/modules/ethereum/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -313,7 +313,7 @@ pub trait PruningStrategy: Default { /// Every value that is returned from this function, must be greater or equal to the /// previous value. Otherwise it will be ignored (we can't revert pruning). /// - /// Module may prune both finalized and unfinalized blocks. But it can't give any + /// Pallet may prune both finalized and unfinalized blocks. But it can't give any /// guarantees on when it will happen. Example: if some unfinalized block at height N /// has scheduled validators set change, then the module won't prune any blocks with /// number >= N even if strategy allows that. @@ -457,7 +457,7 @@ decl_module! { } decl_storage! { - trait Store for Module, I: Instance = DefaultInstance> as Bridge { + trait Store for Pallet, I: Instance = DefaultInstance> as Bridge { /// Best known block. BestBlock: (HeaderId, U256); /// Best finalized block. @@ -505,7 +505,7 @@ decl_storage! { } } -impl, I: Instance> Module { +impl, I: Instance> Pallet { /// Returns number and hash of the best block known to the bridge module. /// The caller should only submit `import_header` transaction that makes /// (or leads to making) other header the best one. @@ -542,7 +542,7 @@ impl, I: Instance> Module { } } -impl, I: Instance> frame_support::unsigned::ValidateUnsigned for Module { +impl, I: Instance> frame_support::unsigned::ValidateUnsigned for Pallet { type Call = Call; fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { @@ -603,7 +603,7 @@ impl, I: Instance> BridgeStorage { // start pruning blocks let begin = new_pruning_range.oldest_unpruned_block; let end = new_pruning_range.oldest_block_to_keep; - frame_support::debug::trace!(target: "runtime", "Pruning blocks in range [{}..{})", begin, end); + log::trace!(target: "runtime", "Pruning blocks in range [{}..{})", begin, end); for number in begin..end { // if we can't prune anything => break if max_blocks_to_prune == 0 { @@ -629,7 +629,7 @@ impl, I: Instance> BridgeStorage { // we have pruned all headers at number new_pruning_range.oldest_unpruned_block = number + 1; - frame_support::debug::trace!( + log::trace!( target: "runtime", "Oldest unpruned PoA header is now: {}", new_pruning_range.oldest_unpruned_block, @@ -658,7 +658,7 @@ impl, I: Instance> BridgeStorage { // physically remove headers and (probably) obsolete validators sets while let Some(hash) = blocks_at_number.pop() { let header = Headers::::take(&hash); - frame_support::debug::trace!( + log::trace!( target: "runtime", "Pruning PoA header: ({}, {})", number, @@ -818,7 +818,7 @@ impl, I: Instance> Storage for BridgeStorage { } } - frame_support::debug::trace!( + log::trace!( target: "runtime", "Inserting PoA header: ({}, {})", header.header.number, @@ -846,7 +846,7 @@ impl, I: Instance> Storage for BridgeStorage { .map(|f| f.number) .unwrap_or_else(|| FinalizedBlock::::get().number); if let Some(finalized) = finalized { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Finalizing PoA header: ({}, {})", finalized.number, @@ -869,7 +869,7 @@ pub(crate) fn initialize_storage, I: Instance>( initial_validators: &[Address], ) { let initial_hash = initial_header.compute_hash(); - frame_support::debug::trace!( + log::trace!( target: "runtime", "Initializing bridge with PoA header: ({}, {})", initial_header.number, @@ -917,7 +917,7 @@ pub fn verify_transaction_finalized( proof: &[(RawTransaction, RawTransactionReceipt)], ) -> bool { if tx_index >= proof.len() as _ { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Tx finality check failed: transaction index ({}) is larger than number of transactions ({})", tx_index, @@ -930,7 +930,7 @@ pub fn verify_transaction_finalized( let header = match storage.header(&block) { Some((header, _)) => header, None => { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Tx finality check failed: can't find header in the storage: {}", block, @@ -943,7 +943,7 @@ pub fn verify_transaction_finalized( // if header is not yet finalized => return if header.number > finalized.number { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Tx finality check failed: header {}/{} is not finalized. Best finalized: {}", header.number, @@ -962,7 +962,7 @@ pub fn verify_transaction_finalized( false => block == finalized.hash, }; if !is_finalized { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Tx finality check failed: header {} is not finalized: no canonical path to best finalized block {}", block, @@ -974,7 +974,7 @@ pub fn verify_transaction_finalized( // verify that transaction is included in the block if let Err(computed_root) = header.check_transactions_root(proof.iter().map(|(tx, _)| tx)) { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Tx finality check failed: transactions root mismatch. Expected: {}, computed: {}", header.transactions_root, @@ -986,7 +986,7 @@ pub fn verify_transaction_finalized( // verify that transaction receipt is included in the block if let Err(computed_root) = header.check_raw_receipts_root(proof.iter().map(|(_, r)| r)) { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Tx finality check failed: receipts root mismatch. Expected: {}, computed: {}", header.receipts_root, @@ -1001,7 +1001,7 @@ pub fn verify_transaction_finalized( match is_successful_raw_receipt { Ok(true) => true, Ok(false) => { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Tx finality check failed: receipt shows that transaction has failed", ); @@ -1009,7 +1009,7 @@ pub fn verify_transaction_finalized( false } Err(err) => { - frame_support::debug::trace!( + log::trace!( target: "runtime", "Tx finality check failed: receipt check has failed: {}", err, diff --git a/polkadot/bridges/modules/ethereum/src/mock.rs b/polkadot/bridges/modules/ethereum/src/mock.rs index e812b65f36..35c093f363 100644 --- a/polkadot/bridges/modules/ethereum/src/mock.rs +++ b/polkadot/bridges/modules/ethereum/src/mock.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/modules/ethereum/src/test_utils.rs b/polkadot/bridges/modules/ethereum/src/test_utils.rs index ad40194128..18ad6876d6 100644 --- a/polkadot/bridges/modules/ethereum/src/test_utils.rs +++ b/polkadot/bridges/modules/ethereum/src/test_utils.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/modules/ethereum/src/validators.rs b/polkadot/bridges/modules/ethereum/src/validators.rs index d4ddac66b7..7ec22a4439 100644 --- a/polkadot/bridges/modules/ethereum/src/validators.rs +++ b/polkadot/bridges/modules/ethereum/src/validators.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/modules/ethereum/src/verification.rs b/polkadot/bridges/modules/ethereum/src/verification.rs index 3882e6b529..c79242d1d4 100644 --- a/polkadot/bridges/modules/ethereum/src/verification.rs +++ b/polkadot/bridges/modules/ethereum/src/verification.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -144,7 +144,7 @@ pub fn accept_aura_header_into_pool( // the heaviest, but rare operation - we do not want invalid receipts in the pool if let Some(receipts) = receipts { - frame_support::debug::trace!(target: "runtime", "Got receipts! {:?}", receipts); + log::trace!(target: "runtime", "Got receipts! {:?}", receipts); if header.check_receipts_root(receipts).is_err() { return Err(Error::TransactionsReceiptsMismatch); } @@ -166,7 +166,7 @@ pub fn verify_aura_header( // the rest of checks requires access to the parent header let context = storage.import_context(submitter, &header.parent_hash).ok_or_else(|| { - frame_support::debug::warn!( + log::warn!( target: "runtime", "Missing parent PoA block: ({:?}, {})", header.number.checked_sub(1), diff --git a/polkadot/bridges/modules/finality-verifier/Cargo.toml b/polkadot/bridges/modules/finality-verifier/Cargo.toml deleted file mode 100644 index 35d4d6880a..0000000000 --- a/polkadot/bridges/modules/finality-verifier/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -name = "pallet-finality-verifier" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -finality-grandpa = { version = "0.14.0", default-features = false } -serde = { version = "1.0", optional = true } - -# Bridge Dependencies - -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -bp-test-utils = {path = "../../primitives/test-utils" } -pallet-substrate-bridge = { path = "../../modules/substrate" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } - - -[features] -default = ["std"] -std = [ - "bp-runtime/std", - "bp-header-chain/std", - "codec/std", - "finality-grandpa/std", - "frame-support/std", - "frame-system/std", - "serde", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/modules/finality-verifier/src/lib.rs b/polkadot/bridges/modules/finality-verifier/src/lib.rs deleted file mode 100644 index d799cc27f3..0000000000 --- a/polkadot/bridges/modules/finality-verifier/src/lib.rs +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate Finality Verifier Pallet -//! -//! The goal of this pallet is to provide a safe interface for writing finalized headers to an -//! external pallet which tracks headers and finality proofs. By safe, we mean that only headers -//! whose finality has been verified will be written to the underlying pallet. -//! -//! By verifying the finality of headers before writing them to storage we prevent DoS vectors in -//! which unfinalized headers get written to storage even if they don't have a chance of being -//! finalized in the future (such as in the case where a different fork gets finalized). -//! -//! The underlying pallet used for storage is assumed to be a pallet which tracks headers and -//! GRANDPA authority set changes. This information is used during the verification of GRANDPA -//! finality proofs. - -#![cfg_attr(not(feature = "std"), no_std)] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] - -use bp_header_chain::{justification::verify_justification, AncestryChecker, HeaderChain}; -use bp_runtime::{Chain, HeaderOf}; -use finality_grandpa::voter_set::VoterSet; -use frame_support::{dispatch::DispatchError, ensure}; -use frame_system::ensure_signed; -use sp_runtime::traits::Header as HeaderT; -use sp_std::vec::Vec; - -#[cfg(test)] -mod mock; - -// Re-export in crate namespace for `construct_runtime!` -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - /// Header of the bridged chain. - pub(crate) type BridgedHeader = HeaderOf<::BridgedChain>; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The chain we are bridging to here. - type BridgedChain: Chain; - - /// The pallet which we will use as our underlying storage mechanism. - type HeaderChain: HeaderChain<::Header, DispatchError>; - - /// The type of ancestry proof used by the pallet. - /// - /// Will be used by the ancestry checker to verify that the header being finalized is - /// related to the best finalized header in storage. - type AncestryProof: Parameter; - - /// The type through which we will verify that a given header is related to the last - /// finalized header in our storage pallet. - type AncestryChecker: AncestryChecker<::Header, Self::AncestryProof>; - - /// The upper bound on the number of requests allowed by the pallet. - /// - /// Once this bound is reached the pallet will not allow any dispatchables to be called - /// until the request count has decreased. - #[pallet::constant] - type MaxRequests: Get; - } - - #[pallet::pallet] - pub struct Pallet(PhantomData); - - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { - >::mutate(|count| *count = count.saturating_sub(1)); - - (0_u64) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - } - - #[pallet::call] - impl Pallet { - /// Verify a target header is finalized according to the given finality proof. - /// - /// It will use the underlying storage pallet to fetch information about the current - /// authorities and best finalized header in order to verify that the header is finalized. - /// - /// If successful in verification, it will write the target header to the underlying storage - /// pallet. - #[pallet::weight(0)] - pub fn submit_finality_proof( - origin: OriginFor, - finality_target: BridgedHeader, - justification: Vec, - ancestry_proof: T::AncestryProof, - ) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; - - ensure!( - Self::request_count() < T::MaxRequests::get(), - >::TooManyRequests - ); - >::mutate(|count| *count += 1); - - frame_support::debug::trace!("Going to try and finalize header {:?}", finality_target); - - let authority_set = T::HeaderChain::authority_set(); - let voter_set = VoterSet::new(authority_set.authorities).ok_or(>::InvalidAuthoritySet)?; - let set_id = authority_set.set_id; - - let (hash, number) = (finality_target.hash(), *finality_target.number()); - verify_justification::>((hash, number), set_id, voter_set, &justification).map_err( - |e| { - frame_support::debug::error!("Received invalid justification for {:?}: {:?}", finality_target, e); - >::InvalidJustification - }, - )?; - - let best_finalized = T::HeaderChain::best_finalized(); - frame_support::debug::trace!("Checking ancestry against best finalized header: {:?}", &best_finalized); - - ensure!( - T::AncestryChecker::are_ancestors(&best_finalized, &finality_target, &ancestry_proof), - >::InvalidAncestryProof - ); - - T::HeaderChain::append_header(finality_target); - frame_support::debug::info!("Succesfully imported finalized header with hash {:?}!", hash); - - Ok(().into()) - } - } - - /// The current number of requests for calling dispatchables. - /// - /// If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until - /// the request capacity is increased. - /// - /// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure - /// that the pallet can always make progress. - #[pallet::storage] - #[pallet::getter(fn request_count)] - pub(super) type RequestCount = StorageValue<_, u32, ValueQuery>; - - #[pallet::error] - pub enum Error { - /// The given justification is invalid for the given header. - InvalidJustification, - /// The given ancestry proof is unable to verify that the child and ancestor headers are - /// related. - InvalidAncestryProof, - /// The authority set from the underlying header chain is invalid. - InvalidAuthoritySet, - /// Failed to write a header to the underlying header chain. - FailedToWriteHeader, - /// There are too many requests for the current window to handle. - TooManyRequests, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, test_header, Origin, TestRuntime}; - use bp_test_utils::{authority_list, make_justification_for_header}; - use codec::Encode; - use frame_support::{assert_err, assert_ok}; - - fn initialize_substrate_bridge() { - let genesis = test_header(0); - - let init_data = pallet_substrate_bridge::InitializationData { - header: genesis, - authority_list: authority_list(), - set_id: 1, - scheduled_change: None, - is_halted: false, - }; - - assert_ok!(pallet_substrate_bridge::Module::::initialize( - Origin::root(), - init_data - )); - } - - fn submit_finality_proof() -> frame_support::dispatch::DispatchResultWithPostInfo { - let child = test_header(1); - let header = test_header(2); - - let set_id = 1; - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authority_list()).encode(); - let ancestry_proof = vec![child, header.clone()]; - - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof) - } - - fn next_block() { - use frame_support::traits::OnInitialize; - - let current_number = frame_system::Pallet::::block_number(); - frame_system::Pallet::::set_block_number(current_number + 1); - let _ = Module::::on_initialize(current_number); - } - - #[test] - fn succesfully_imports_header_with_valid_finality_and_ancestry_proofs() { - run_test(|| { - initialize_substrate_bridge(); - - assert_ok!(submit_finality_proof()); - - let header = test_header(2); - assert_eq!( - pallet_substrate_bridge::Module::::best_headers(), - vec![(*header.number(), header.hash())] - ); - - assert_eq!(pallet_substrate_bridge::Module::::best_finalized(), header); - }) - } - - #[test] - fn rejects_justification_that_skips_authority_set_transition() { - run_test(|| { - initialize_substrate_bridge(); - - let child = test_header(1); - let header = test_header(2); - - let set_id = 2; - let grandpa_round = 1; - let justification = - make_justification_for_header(&header, grandpa_round, set_id, &authority_list()).encode(); - let ancestry_proof = vec![child, header.clone()]; - - assert_err!( - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof,), - >::InvalidJustification - ); - }) - } - - #[test] - fn does_not_import_header_with_invalid_finality_proof() { - run_test(|| { - initialize_substrate_bridge(); - - let child = test_header(1); - let header = test_header(2); - - let justification = [1u8; 32].encode(); - let ancestry_proof = vec![child, header.clone()]; - - assert_err!( - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof,), - >::InvalidJustification - ); - }) - } - - #[test] - fn does_not_import_header_with_invalid_ancestry_proof() { - run_test(|| { - initialize_substrate_bridge(); - - let header = test_header(2); - - let set_id = 1; - let grandpa_round = 1; - let justification = - make_justification_for_header(&header, grandpa_round, set_id, &authority_list()).encode(); - - // For testing, we've made it so that an empty ancestry proof is invalid - let ancestry_proof = vec![]; - - assert_err!( - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof,), - >::InvalidAncestryProof - ); - }) - } - - #[test] - fn disallows_invalid_authority_set() { - run_test(|| { - use bp_test_utils::{alice, bob}; - - let genesis = test_header(0); - - let invalid_authority_list = vec![(alice(), u64::MAX), (bob(), u64::MAX)]; - let init_data = pallet_substrate_bridge::InitializationData { - header: genesis, - authority_list: invalid_authority_list, - set_id: 1, - scheduled_change: None, - is_halted: false, - }; - - assert_ok!(pallet_substrate_bridge::Module::::initialize( - Origin::root(), - init_data - )); - - let header = test_header(1); - let justification = [1u8; 32].encode(); - let ancestry_proof = vec![]; - - assert_err!( - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof,), - >::InvalidAuthoritySet - ); - }) - } - - #[test] - fn disallows_imports_once_limit_is_hit_in_single_block() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - assert_err!(submit_finality_proof(), >::TooManyRequests); - }) - } - - #[test] - fn allows_request_after_new_block_has_started() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - - next_block(); - assert_ok!(submit_finality_proof()); - }) - } - - #[test] - fn disallows_imports_once_limit_is_hit_across_different_blocks() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - - next_block(); - assert_ok!(submit_finality_proof()); - assert_err!(submit_finality_proof(), >::TooManyRequests); - }) - } - - #[test] - fn allows_max_requests_after_long_time_with_no_activity() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - - next_block(); - next_block(); - - next_block(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - }) - } -} diff --git a/polkadot/bridges/modules/finality-verifier/src/mock.rs b/polkadot/bridges/modules/finality-verifier/src/mock.rs deleted file mode 100644 index d87af92599..0000000000 --- a/polkadot/bridges/modules/finality-verifier/src/mock.rs +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -use crate::pallet::{BridgedHeader, Config}; -use bp_runtime::{BlockNumberOf, Chain}; -use frame_support::{construct_runtime, parameter_types, weights::Weight}; -use sp_runtime::{ - testing::{Header, H256}, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -pub type AccountId = u64; -pub type TestHeader = BridgedHeader; -pub type TestNumber = BlockNumberOf<::BridgedChain>; - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as finality_verifier; - -construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Bridge: pallet_substrate_bridge::{Pallet}, - FinalityVerifier: finality_verifier::{Pallet}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = (); - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); -} - -impl pallet_substrate_bridge::Config for TestRuntime { - type BridgedChain = TestBridgedChain; -} - -parameter_types! { - pub const MaxRequests: u32 = 2; -} - -impl finality_verifier::Config for TestRuntime { - type BridgedChain = TestBridgedChain; - type HeaderChain = pallet_substrate_bridge::Module; - type AncestryProof = Vec<::Header>; - type AncestryChecker = Checker<::Header, Self::AncestryProof>; - type MaxRequests = MaxRequests; -} - -#[derive(Debug)] -pub struct TestBridgedChain; - -impl Chain for TestBridgedChain { - type BlockNumber = ::BlockNumber; - type Hash = ::Hash; - type Hasher = ::Hashing; - type Header = ::Header; -} - -#[derive(Debug)] -pub struct Checker(std::marker::PhantomData<(H, P)>); - -impl bp_header_chain::AncestryChecker> for Checker> { - fn are_ancestors(_ancestor: &H, _child: &H, proof: &Vec) -> bool { - !proof.is_empty() - } -} - -pub fn run_test(test: impl FnOnce() -> T) -> T { - sp_io::TestExternalities::new(Default::default()).execute_with(test) -} - -pub fn test_header(num: TestNumber) -> TestHeader { - // We wrap the call to avoid explicit type annotations in our tests - bp_test_utils::test_header(num) -} diff --git a/polkadot/bridges/modules/substrate/Cargo.toml b/polkadot/bridges/modules/grandpa/Cargo.toml similarity index 76% rename from polkadot/bridges/modules/substrate/Cargo.toml rename to polkadot/bridges/modules/grandpa/Cargo.toml index 490aa2098b..810dce3dd5 100644 --- a/polkadot/bridges/modules/substrate/Cargo.toml +++ b/polkadot/bridges/modules/grandpa/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "pallet-substrate-bridge" +name = "pallet-bridge-grandpa" version = "0.1.0" authors = ["Parity Technologies "] edition = "2018" @@ -10,13 +10,14 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } finality-grandpa = { version = "0.14.0", default-features = false } -hash-db = { version = "0.15.2", default-features = false } +log = { version = "0.4.14", default-features = false } +num-traits = { version = "0.2", default-features = false } serde = { version = "1.0", optional = true } # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } # Substrate Dependencies @@ -27,27 +28,32 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +# Optional Benchmarking Dependencies +bp-test-utils = { path = "../../primitives/test-utils", default-features = false, optional = true } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } + [dev-dependencies] -bp-test-utils = {path = "../../primitives/test-utils" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] default = ["std"] std = [ "bp-header-chain/std", "bp-runtime/std", - "bp-header-chain/std", + "bp-test-utils/std", "codec/std", "finality-grandpa/std", "frame-support/std", "frame-system/std", - "hash-db/std", + "log/std", + "num-traits/std", "serde", "sp-finality-grandpa/std", "sp-runtime/std", "sp-std/std", "sp-trie/std", ] -runtime-benchmarks = [] +runtime-benchmarks = [ + "bp-test-utils", + "frame-benchmarking", +] diff --git a/polkadot/bridges/modules/grandpa/src/benchmarking.rs b/polkadot/bridges/modules/grandpa/src/benchmarking.rs new file mode 100644 index 0000000000..cb170fdc8b --- /dev/null +++ b/polkadot/bridges/modules/grandpa/src/benchmarking.rs @@ -0,0 +1,272 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Benchmarks for the GRANDPA Pallet. +//! +//! The main dispatchable for the GRANDPA pallet is `submit_finality_proof`, so these benchmarks are +//! based around that. There are to main factors which affect finality proof verification: +//! +//! 1. The number of `votes-ancestries` in the justification +//! 2. The number of `pre-commits` in the justification +//! +//! Vote ancestries are the headers between (`finality_target`, `head_of_chain`], where +//! `header_of_chain` is a decendant of `finality_target`. +//! +//! Pre-commits are messages which are signed by validators at the head of the chain they think is +//! the best. +//! +//! Consider the following: +//! +//! / [B'] <- [C'] +//! [A] <- [B] <- [C] +//! +//! The common ancestor of both forks is block A, so this is what GRANDPA will finalize. In order to +//! verify this we will have vote ancestries of [B, C, B', C'] and pre-commits [C, C']. +//! +//! Note that the worst case scenario here would be a justification where each validator has it's +//! own fork which is `SESSION_LENGTH` blocks long. +//! +//! As far as benchmarking results go, the only benchmark that should be used in +//! `pallet-bridge-grandpa` to annotate weights is the `submit_finality_proof` one. The others are +//! looking at the effects of specific code paths and do not actually reflect the overall worst case +//! scenario. + +use crate::*; + +use bp_test_utils::{ + accounts, authority_list, make_justification_for_header, test_keyring, JustificationGeneratorParams, ALICE, + TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID, +}; +use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller}; +use frame_system::RawOrigin; +use sp_finality_grandpa::AuthorityId; +use sp_runtime::traits::{One, Zero}; +use sp_std::{vec, vec::Vec}; + +// The maximum number of vote ancestries to include in a justification. +// +// In practice this would be limited by the session length (number of blocks a single authority set +// can produce) of a given chain. +const MAX_VOTE_ANCESTRIES: u32 = 1000; + +// The maximum number of pre-commits to include in a justification. In practice this scales with the +// number of validators. +const MAX_VALIDATOR_SET_SIZE: u32 = 1024; + +benchmarks_instance_pallet! { + // This is the "gold standard" benchmark for this extrinsic, and it's what should be used to + // annotate the weight in the pallet. + // + // The other benchmarks related to `submit_finality_proof` are looking at the effect of specific + // parameters and are there mostly for seeing how specific codepaths behave. + submit_finality_proof { + let v in 1..MAX_VOTE_ANCESTRIES; + let p in 1..MAX_VALIDATOR_SET_SIZE; + + let caller: T::AccountId = whitelisted_caller(); + + let authority_list = accounts(p as u16) + .iter() + .map(|id| (AuthorityId::from(*id), 1)) + .collect::>(); + + let init_data = InitializationData { + header: bp_test_utils::test_header(Zero::zero()), + authority_list, + set_id: TEST_GRANDPA_SET_ID, + is_halted: false, + }; + + initialize_bridge::(init_data); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + + let params = JustificationGeneratorParams { + header: header.clone(), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: accounts(p as u16).iter().map(|k| (*k, 1)).collect::>(), + votes: v, + forks: 1, + }; + + let justification = make_justification_for_header(params); + + }: _(RawOrigin::Signed(caller), header, justification) + verify { + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + let expected_hash = header.hash(); + + assert_eq!(>::get(), expected_hash); + assert!(>::contains_key(expected_hash)); + } + + // What we want to check here is the effect of vote ancestries on justification verification + // do this by varying the number of headers between `finality_target` and `header_of_chain`. + submit_finality_proof_on_single_fork { + let v in 1..MAX_VOTE_ANCESTRIES; + + let caller: T::AccountId = whitelisted_caller(); + + let init_data = InitializationData { + header: bp_test_utils::test_header(Zero::zero()), + authority_list: authority_list(), + set_id: TEST_GRANDPA_SET_ID, + is_halted: false, + }; + + initialize_bridge::(init_data); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + + let params = JustificationGeneratorParams { + header: header.clone(), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: test_keyring(), + votes: v, + forks: 1, + }; + + let justification = make_justification_for_header(params); + + }: submit_finality_proof(RawOrigin::Signed(caller), header, justification) + verify { + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + let expected_hash = header.hash(); + + assert_eq!(>::get(), expected_hash); + assert!(>::contains_key(expected_hash)); + } + + // What we want to check here is the effect of many pre-commits on justification verification. + // We do this by creating many forks, whose head will be used as a signed pre-commit in the + // final justification. + submit_finality_proof_on_many_forks { + let p in 1..MAX_VALIDATOR_SET_SIZE; + + let caller: T::AccountId = whitelisted_caller(); + + let authority_list = accounts(p as u16) + .iter() + .map(|id| (AuthorityId::from(*id), 1)) + .collect::>(); + + let init_data = InitializationData { + header: bp_test_utils::test_header(Zero::zero()), + authority_list, + set_id: TEST_GRANDPA_SET_ID, + is_halted: false, + }; + + initialize_bridge::(init_data); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + + let params = JustificationGeneratorParams { + header: header.clone(), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: accounts(p as u16).iter().map(|k| (*k, 1)).collect::>(), + votes: p, + forks: p, + }; + + let justification = make_justification_for_header(params); + + }: submit_finality_proof(RawOrigin::Signed(caller), header, justification) + verify { + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + let expected_hash = header.hash(); + + assert_eq!(>::get(), expected_hash); + assert!(>::contains_key(expected_hash)); + } + + // Here we want to find out the overheaded of looking through consensus digests found in a + // header. As the number of logs in a header grows, how much more work do we require to look + // through them? + // + // Note that this should be the same for looking through scheduled changes and forces changes, + // which is why we only have one benchmark for this. + find_scheduled_change { + // Not really sure what a good bound for this is. + let n in 1..1000; + + let mut logs = vec![]; + for i in 0..n { + // We chose a non-consensus log on purpose since that way we have to look through all + // the logs in the header + logs.push(sp_runtime::DigestItem::Other(vec![])); + } + + let mut header: BridgedHeader = bp_test_utils::test_header(Zero::zero()); + let digest = header.digest_mut(); + *digest = sp_runtime::Digest { + logs, + }; + + }: { + crate::find_scheduled_change(&header) + } + + // What we want to check here is how long it takes to read and write the authority set tracked + // by the pallet as the number of authorities grows. + read_write_authority_sets { + // The current max target number of validators on Polkadot/Kusama + let n in 1..1000; + + let mut authorities = vec![]; + for i in 0..n { + authorities.push((ALICE, 1)); + } + + let authority_set = bp_header_chain::AuthoritySet { + authorities: authorities.iter().map(|(id, w)| (AuthorityId::from(*id), *w)).collect(), + set_id: 0 + }; + + >::put(&authority_set); + + }: { + let authority_set = >::get(); + >::put(&authority_set); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::assert_ok; + + #[test] + fn finality_proof_is_valid() { + mock::run_test(|| { + assert_ok!(test_benchmark_submit_finality_proof::()); + }); + } + + #[test] + fn single_fork_finality_proof_is_valid() { + mock::run_test(|| { + assert_ok!(test_benchmark_submit_finality_proof_on_single_fork::()); + }); + } + + #[test] + fn multi_fork_finality_proof_is_valid() { + mock::run_test(|| { + assert_ok!(test_benchmark_submit_finality_proof_on_many_forks::()); + }); + } +} diff --git a/polkadot/bridges/modules/grandpa/src/lib.rs b/polkadot/bridges/modules/grandpa/src/lib.rs new file mode 100644 index 0000000000..9fb7372b02 --- /dev/null +++ b/polkadot/bridges/modules/grandpa/src/lib.rs @@ -0,0 +1,1036 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate GRANDPA Pallet +//! +//! This pallet is an on-chain GRANDPA light client for Substrate based chains. +//! +//! This pallet achieves this by trustlessly verifying GRANDPA finality proofs on-chain. Once +//! verified, finalized headers are stored in the pallet, thereby creating a sparse header chain. +//! This sparse header chain can be used as a source of truth for other higher-level applications. +//! +//! The pallet is responsible for tracking GRANDPA validator set hand-offs. We only import headers +//! with justifications signed by the current validator set we know of. The header is inspected for +//! a `ScheduledChanges` digest item, which is then used to update to next validator set. +//! +//! Since this pallet only tracks finalized headers it does not deal with forks. Forks can only +//! occur if the GRANDPA validator set on the bridged chain is either colluding or there is a severe +//! bug causing resulting in an equivocation. Such events are outside of the scope of this pallet. +//! Shall the fork occur on the bridged chain governance intervention will be required to +//! re-initialize the bridge and track the right fork. + +#![cfg_attr(not(feature = "std"), no_std)] +// Runtime-generated enums +#![allow(clippy::large_enum_variant)] + +use crate::weights::WeightInfo; + +use bp_header_chain::justification::GrandpaJustification; +use bp_header_chain::InitializationData; +use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; +use finality_grandpa::voter_set::VoterSet; +use frame_support::ensure; +use frame_system::{ensure_signed, RawOrigin}; +use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; +use sp_runtime::traits::{BadOrigin, Header as HeaderT, Zero}; + +#[cfg(test)] +mod mock; + +/// Pallet containing weights for this pallet. +pub mod weights; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; + +// Re-export in crate namespace for `construct_runtime!` +pub use pallet::*; + +/// Block number of the bridged chain. +pub type BridgedBlockNumber = BlockNumberOf<>::BridgedChain>; +/// Block hash of the bridged chain. +pub type BridgedBlockHash = HashOf<>::BridgedChain>; +/// Hasher of the bridged chain. +pub type BridgedBlockHasher = HasherOf<>::BridgedChain>; +/// Header of the bridged chain. +pub type BridgedHeader = HeaderOf<>::BridgedChain>; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The chain we are bridging to here. + type BridgedChain: Chain; + + /// The upper bound on the number of requests allowed by the pallet. + /// + /// A request refers to an action which writes a header to storage. + /// + /// Once this bound is reached the pallet will not allow any dispatchables to be called + /// until the request count has decreased. + #[pallet::constant] + type MaxRequests: Get; + + /// Maximal number of finalized headers to keep in the storage. + /// + /// The setting is there to prevent growing the on-chain state indefinitely. Note + /// the setting does not relate to block numbers - we will simply keep as much items + /// in the storage, so it doesn't guarantee any fixed timeframe for finality headers. + #[pallet::constant] + type HeadersToKeep: Get; + + /// Weights gathered through benchmarking. + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { + >::mutate(|count| *count = count.saturating_sub(1)); + + (0_u64) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + } + + #[pallet::call] + impl, I: 'static> Pallet { + /// Verify a target header is finalized according to the given finality proof. + /// + /// It will use the underlying storage pallet to fetch information about the current + /// authorities and best finalized header in order to verify that the header is finalized. + /// + /// If successful in verification, it will write the target header to the underlying storage + /// pallet. + #[pallet::weight(T::WeightInfo::submit_finality_proof( + justification.votes_ancestries.len() as u32, + justification.commit.precommits.len() as u32, + ))] + pub fn submit_finality_proof( + origin: OriginFor, + finality_target: BridgedHeader, + justification: GrandpaJustification>, + ) -> DispatchResultWithPostInfo { + ensure_operational::()?; + let _ = ensure_signed(origin)?; + + ensure!( + Self::request_count() < T::MaxRequests::get(), + >::TooManyRequests + ); + + let (hash, number) = (finality_target.hash(), finality_target.number()); + log::trace!(target: "runtime::bridge-grandpa", "Going to try and finalize header {:?}", finality_target); + + let best_finalized = >::get(>::get()).expect( + "In order to reach this point the bridge must have been initialized. Afterwards, + every time `BestFinalized` is updated `ImportedHeaders` is also updated. Therefore + `ImportedHeaders` must contain an entry for `BestFinalized`.", + ); + + // We do a quick check here to ensure that our header chain is making progress and isn't + // "travelling back in time" (which could be indicative of something bad, e.g a hard-fork). + ensure!(best_finalized.number() < number, >::OldHeader); + + let authority_set = >::get(); + let set_id = authority_set.set_id; + verify_justification::(&justification, hash, *number, authority_set)?; + + let _enacted = try_enact_authority_change::(&finality_target, set_id)?; + let index = >::get(); + let pruning = >::try_get(index); + >::put(hash); + >::insert(hash, finality_target); + >::insert(index, hash); + >::mutate(|count| *count += 1); + + // Update ring buffer pointer and remove old header. + >::put((index + 1) % T::HeadersToKeep::get()); + if let Ok(hash) = pruning { + log::debug!(target: "runtime::bridge-grandpa", "Pruning old header: {:?}.", hash); + >::remove(hash); + } + + log::info!(target: "runtime::bridge-grandpa", "Succesfully imported finalized header with hash {:?}!", hash); + + Ok(().into()) + } + + /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. + /// + /// The initial configuration provided does not need to be the genesis header of the bridged + /// chain, it can be any arbirary header. You can also provide the next scheduled set change + /// if it is already know. + /// + /// This function is only allowed to be called from a trusted origin and writes to storage + /// with practically no checks in terms of the validity of the data. It is important that + /// you ensure that valid data is being passed in. + #[pallet::weight((T::DbWeight::get().reads_writes(2, 5), DispatchClass::Operational))] + pub fn initialize( + origin: OriginFor, + init_data: super::InitializationData>, + ) -> DispatchResultWithPostInfo { + ensure_owner_or_root::(origin)?; + + let init_allowed = !>::exists(); + ensure!(init_allowed, >::AlreadyInitialized); + initialize_bridge::(init_data.clone()); + + log::info!( + target: "runtime::bridge-grandpa", + "Pallet has been initialized with the following parameters: {:?}", + init_data + ); + + Ok(().into()) + } + + /// Change `PalletOwner`. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResultWithPostInfo { + ensure_owner_or_root::(origin)?; + match new_owner { + Some(new_owner) => { + PalletOwner::::put(&new_owner); + log::info!(target: "runtime::bridge-grandpa", "Setting pallet Owner to: {:?}", new_owner); + } + None => { + PalletOwner::::kill(); + log::info!(target: "runtime::bridge-grandpa", "Removed Owner of pallet."); + } + } + + Ok(().into()) + } + + /// Halt or resume all pallet operations. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operational(origin: OriginFor, operational: bool) -> DispatchResultWithPostInfo { + ensure_owner_or_root::(origin)?; + >::put(operational); + + if operational { + log::info!(target: "runtime::bridge-grandpa", "Resuming pallet operations."); + } else { + log::warn!(target: "runtime::bridge-grandpa", "Stopping pallet operations."); + } + + Ok(().into()) + } + } + + /// The current number of requests which have written to storage. + /// + /// If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until + /// the request capacity is increased. + /// + /// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure + /// that the pallet can always make progress. + #[pallet::storage] + #[pallet::getter(fn request_count)] + pub(super) type RequestCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; + + /// Hash of the header used to bootstrap the pallet. + #[pallet::storage] + pub(super) type InitialHash, I: 'static = ()> = StorageValue<_, BridgedBlockHash, ValueQuery>; + + /// Hash of the best finalized header. + #[pallet::storage] + pub(super) type BestFinalized, I: 'static = ()> = StorageValue<_, BridgedBlockHash, ValueQuery>; + + /// A ring buffer of imported hashes. Ordered by the insertion time. + #[pallet::storage] + pub(super) type ImportedHashes, I: 'static = ()> = + StorageMap<_, Identity, u32, BridgedBlockHash>; + + /// Current ring buffer position. + #[pallet::storage] + pub(super) type ImportedHashesPointer, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; + + /// Headers which have been imported into the pallet. + #[pallet::storage] + pub(super) type ImportedHeaders, I: 'static = ()> = + StorageMap<_, Identity, BridgedBlockHash, BridgedHeader>; + + /// The current GRANDPA Authority set. + #[pallet::storage] + pub(super) type CurrentAuthoritySet, I: 'static = ()> = + StorageValue<_, bp_header_chain::AuthoritySet, ValueQuery>; + + /// Optional pallet owner. + /// + /// Pallet owner has a right to halt all pallet operations and then resume it. If it is + /// `None`, then there are no direct ways to halt/resume pallet operations, but other + /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt + /// flag directly or call the `halt_operations`). + #[pallet::storage] + pub(super) type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; + + /// If true, all pallet transactions are failed immediately. + #[pallet::storage] + pub(super) type IsHalted, I: 'static = ()> = StorageValue<_, bool, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + /// Optional module owner account. + pub owner: Option, + /// Optional module initialization data. + pub init_data: Option>>, + } + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { + owner: None, + init_data: None, + } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + if let Some(ref owner) = self.owner { + >::put(owner); + } + + if let Some(init_data) = self.init_data.clone() { + initialize_bridge::(init_data); + } else { + // Since the bridge hasn't been initialized we shouldn't allow anyone to perform + // transactions. + >::put(true); + } + } + } + + #[pallet::error] + pub enum Error { + /// The given justification is invalid for the given header. + InvalidJustification, + /// The authority set from the underlying header chain is invalid. + InvalidAuthoritySet, + /// There are too many requests for the current window to handle. + TooManyRequests, + /// The header being imported is older than the best finalized header known to the pallet. + OldHeader, + /// The header is unknown to the pallet. + UnknownHeader, + /// The scheduled authority set change found in the header is unsupported by the pallet. + /// + /// This is the case for non-standard (e.g forced) authority set changes. + UnsupportedScheduledChange, + /// The pallet has already been initialized. + AlreadyInitialized, + /// All pallet operations are halted. + Halted, + /// The storage proof doesn't contains storage root. So it is invalid for given header. + StorageRootMismatch, + } + + /// Check the given header for a GRANDPA scheduled authority set change. If a change + /// is found it will be enacted immediately. + /// + /// This function does not support forced changes, or scheduled changes with delays + /// since these types of changes are indicitive of abnormal behaviour from GRANDPA. + /// + /// Returned value will indicate if a change was enacted or not. + pub(crate) fn try_enact_authority_change, I: 'static>( + header: &BridgedHeader, + current_set_id: sp_finality_grandpa::SetId, + ) -> Result { + let mut change_enacted = false; + + // We don't support forced changes - at that point governance intervention is required. + ensure!( + super::find_forced_change(header).is_none(), + >::UnsupportedScheduledChange + ); + + if let Some(change) = super::find_scheduled_change(header) { + // GRANDPA only includes a `delay` for forced changes, so this isn't valid. + ensure!(change.delay == Zero::zero(), >::UnsupportedScheduledChange); + + // TODO [#788]: Stop manually increasing the `set_id` here. + let next_authorities = bp_header_chain::AuthoritySet { + authorities: change.next_authorities, + set_id: current_set_id + 1, + }; + + // Since our header schedules a change and we know the delay is 0, it must also enact + // the change. + >::put(&next_authorities); + change_enacted = true; + + log::info!( + target: "runtime::bridge-grandpa", + "Transitioned from authority set {} to {}! New authorities are: {:?}", + current_set_id, + current_set_id + 1, + next_authorities, + ); + }; + + Ok(change_enacted) + } + + /// Verify a GRANDPA justification (finality proof) for a given header. + /// + /// Will use the GRANDPA current authorities known to the pallet. + /// + /// If succesful it returns the decoded GRANDPA justification so we can refund any weight which + /// was overcharged in the initial call. + pub(crate) fn verify_justification, I: 'static>( + justification: &GrandpaJustification>, + hash: BridgedBlockHash, + number: BridgedBlockNumber, + authority_set: bp_header_chain::AuthoritySet, + ) -> Result<(), sp_runtime::DispatchError> { + use bp_header_chain::justification::verify_justification; + + let voter_set = VoterSet::new(authority_set.authorities).ok_or(>::InvalidAuthoritySet)?; + let set_id = authority_set.set_id; + + Ok( + verify_justification::>((hash, number), set_id, &voter_set, &justification).map_err( + |e| { + log::error!(target: "runtime::bridge-grandpa", "Received invalid justification for {:?}: {:?}", hash, e); + >::InvalidJustification + }, + )?, + ) + } + + /// Since this writes to storage with no real checks this should only be used in functions that + /// were called by a trusted origin. + pub(crate) fn initialize_bridge, I: 'static>( + init_params: super::InitializationData>, + ) { + let super::InitializationData { + header, + authority_list, + set_id, + is_halted, + } = init_params; + + let initial_hash = header.hash(); + >::put(initial_hash); + >::put(initial_hash); + >::insert(initial_hash, header); + + let authority_set = bp_header_chain::AuthoritySet::new(authority_list, set_id); + >::put(authority_set); + + >::put(is_halted); + } + + /// Ensure that the origin is either root, or `PalletOwner`. + fn ensure_owner_or_root, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> { + match origin.into() { + Ok(RawOrigin::Root) => Ok(()), + Ok(RawOrigin::Signed(ref signer)) if Some(signer) == >::get().as_ref() => Ok(()), + _ => Err(BadOrigin), + } + } + + /// Ensure that the pallet is in operational mode (not halted). + fn ensure_operational, I: 'static>() -> Result<(), Error> { + if >::get() { + Err(>::Halted) + } else { + Ok(()) + } + } +} + +impl, I: 'static> Pallet { + /// Get the best finalized header the pallet knows of. + /// + /// Returns a dummy header if there is no best header. This can only happen + /// if the pallet has not been initialized yet. + pub fn best_finalized() -> BridgedHeader { + let hash = >::get(); + >::get(hash).unwrap_or_else(|| { + >::new( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + }) + } + + /// Check if a particular header is known to the bridge pallet. + pub fn is_known_header(hash: BridgedBlockHash) -> bool { + >::contains_key(hash) + } + + /// Verify that the passed storage proof is valid, given it is crafted using + /// known finalized header. If the proof is valid, then the `parse` callback + /// is called and the function returns its result. + pub fn parse_finalized_storage_proof( + hash: BridgedBlockHash, + storage_proof: sp_trie::StorageProof, + parse: impl FnOnce(bp_runtime::StorageProofChecker>) -> R, + ) -> Result { + let header = >::get(hash).ok_or(Error::::UnknownHeader)?; + let storage_proof_checker = bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof) + .map_err(|_| Error::::StorageRootMismatch)?; + + Ok(parse(storage_proof_checker)) + } +} + +pub(crate) fn find_scheduled_change(header: &H) -> Option> { + use sp_runtime::generic::OpaqueDigestItemId; + + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + }; + + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) +} + +/// Checks the given header for a consensus digest signalling a **forced** scheduled change and +/// extracts it. +pub(crate) fn find_forced_change( + header: &H, +) -> Option<(H::Number, sp_finality_grandpa::ScheduledChange)> { + use sp_runtime::generic::OpaqueDigestItemId; + + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), + _ => None, + }; + + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) +} + +/// (Re)initialize bridge with given header for using it in `pallet-bridge-messages` benchmarks. +#[cfg(feature = "runtime-benchmarks")] +pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader) { + initialize_bridge::(InitializationData { + header, + authority_list: sp_std::vec::Vec::new(), // we don't verify any proofs in external benchmarks + set_id: 0, + is_halted: false, + }); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{run_test, test_header, Origin, TestHash, TestHeader, TestNumber, TestRuntime}; + use bp_test_utils::{ + authority_list, make_default_justification, make_justification_for_header, JustificationGeneratorParams, ALICE, + BOB, + }; + use codec::Encode; + use frame_support::weights::PostDispatchInfo; + use frame_support::{assert_err, assert_noop, assert_ok}; + use sp_runtime::{Digest, DigestItem, DispatchError}; + + fn initialize_substrate_bridge() { + assert_ok!(init_with_origin(Origin::root())); + } + + fn init_with_origin( + origin: Origin, + ) -> Result, sp_runtime::DispatchErrorWithPostInfo> { + let genesis = test_header(0); + + let init_data = InitializationData { + header: genesis, + authority_list: authority_list(), + set_id: 1, + is_halted: false, + }; + + Pallet::::initialize(origin, init_data.clone()).map(|_| init_data) + } + + fn submit_finality_proof(header: u8) -> frame_support::dispatch::DispatchResultWithPostInfo { + let header = test_header(header.into()); + let justification = make_default_justification(&header); + + Pallet::::submit_finality_proof(Origin::signed(1), header, justification) + } + + fn next_block() { + use frame_support::traits::OnInitialize; + + let current_number = frame_system::Pallet::::block_number(); + frame_system::Pallet::::set_block_number(current_number + 1); + let _ = Pallet::::on_initialize(current_number); + } + + fn change_log(delay: u64) -> Digest { + let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { + next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], + delay, + }); + + Digest:: { + logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], + } + } + + fn forced_change_log(delay: u64) -> Digest { + let consensus_log = ConsensusLog::::ForcedChange( + delay, + sp_finality_grandpa::ScheduledChange { + next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], + delay, + }, + ); + + Digest:: { + logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], + } + } + + #[test] + fn init_root_or_owner_origin_can_initialize_pallet() { + run_test(|| { + assert_noop!(init_with_origin(Origin::signed(1)), DispatchError::BadOrigin); + assert_ok!(init_with_origin(Origin::root())); + + // Reset storage so we can initialize the pallet again + BestFinalized::::kill(); + PalletOwner::::put(2); + assert_ok!(init_with_origin(Origin::signed(2))); + }) + } + + #[test] + fn init_storage_entries_are_correctly_initialized() { + run_test(|| { + assert_eq!( + BestFinalized::::get(), + BridgedBlockHash::::default() + ); + assert_eq!(Pallet::::best_finalized(), test_header(0)); + + let init_data = init_with_origin(Origin::root()).unwrap(); + + assert!(>::contains_key(init_data.header.hash())); + assert_eq!(BestFinalized::::get(), init_data.header.hash()); + assert_eq!( + CurrentAuthoritySet::::get().authorities, + init_data.authority_list + ); + assert_eq!(IsHalted::::get(), false); + }) + } + + #[test] + fn init_can_only_initialize_pallet_once() { + run_test(|| { + initialize_substrate_bridge(); + assert_noop!( + init_with_origin(Origin::root()), + >::AlreadyInitialized + ); + }) + } + + #[test] + fn pallet_owner_may_change_owner() { + run_test(|| { + PalletOwner::::put(2); + + assert_ok!(Pallet::::set_owner(Origin::root(), Some(1))); + assert_noop!( + Pallet::::set_operational(Origin::signed(2), false), + DispatchError::BadOrigin, + ); + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + + assert_ok!(Pallet::::set_owner(Origin::signed(1), None)); + assert_noop!( + Pallet::::set_operational(Origin::signed(1), true), + DispatchError::BadOrigin, + ); + assert_noop!( + Pallet::::set_operational(Origin::signed(2), true), + DispatchError::BadOrigin, + ); + assert_ok!(Pallet::::set_operational(Origin::root(), true)); + }); + } + + #[test] + fn pallet_may_be_halted_by_root() { + run_test(|| { + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + assert_ok!(Pallet::::set_operational(Origin::root(), true)); + }); + } + + #[test] + fn pallet_may_be_halted_by_owner() { + run_test(|| { + PalletOwner::::put(2); + + assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); + assert_ok!(Pallet::::set_operational(Origin::signed(2), true)); + + assert_noop!( + Pallet::::set_operational(Origin::signed(1), false), + DispatchError::BadOrigin, + ); + assert_noop!( + Pallet::::set_operational(Origin::signed(1), true), + DispatchError::BadOrigin, + ); + + assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); + assert_noop!( + Pallet::::set_operational(Origin::signed(1), true), + DispatchError::BadOrigin, + ); + }); + } + + #[test] + fn pallet_rejects_transactions_if_halted() { + run_test(|| { + >::put(true); + + assert_noop!(submit_finality_proof(1), Error::::Halted,); + }) + } + + #[test] + fn succesfully_imports_header_with_valid_finality() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + + let header = test_header(1); + assert_eq!(>::get(), header.hash()); + assert!(>::contains_key(header.hash())); + }) + } + + #[test] + fn rejects_justification_that_skips_authority_set_transition() { + run_test(|| { + initialize_substrate_bridge(); + + let header = test_header(1); + + let params = JustificationGeneratorParams:: { + set_id: 2, + ..Default::default() + }; + let justification = make_justification_for_header(params); + + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification,), + >::InvalidJustification + ); + }) + } + + #[test] + fn does_not_import_header_with_invalid_finality_proof() { + run_test(|| { + initialize_substrate_bridge(); + + let header = test_header(1); + let mut justification = make_default_justification(&header); + justification.round = 42; + + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification,), + >::InvalidJustification + ); + }) + } + + #[test] + fn disallows_invalid_authority_set() { + run_test(|| { + let genesis = test_header(0); + + let invalid_authority_list = vec![(ALICE.into(), u64::MAX), (BOB.into(), u64::MAX)]; + let init_data = InitializationData { + header: genesis, + authority_list: invalid_authority_list, + set_id: 1, + is_halted: false, + }; + + assert_ok!(Pallet::::initialize(Origin::root(), init_data)); + + let header = test_header(1); + let justification = make_default_justification(&header); + + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification,), + >::InvalidAuthoritySet + ); + }) + } + + #[test] + fn importing_header_ensures_that_chain_is_extended() { + run_test(|| { + initialize_substrate_bridge(); + + assert_ok!(submit_finality_proof(4)); + assert_err!(submit_finality_proof(3), Error::::OldHeader); + assert_ok!(submit_finality_proof(5)); + }) + } + + #[test] + fn importing_header_enacts_new_authority_set() { + run_test(|| { + initialize_substrate_bridge(); + + let next_set_id = 2; + let next_authorities = vec![(ALICE.into(), 1), (BOB.into(), 1)]; + + // Need to update the header digest to indicate that our header signals an authority set + // change. The change will be enacted when we import our header. + let mut header = test_header(2); + header.digest = change_log(0); + + // Create a valid justification for the header + let justification = make_default_justification(&header); + + // Let's import our test header + assert_ok!(Pallet::::submit_finality_proof( + Origin::signed(1), + header.clone(), + justification + )); + + // Make sure that our header is the best finalized + assert_eq!(>::get(), header.hash()); + assert!(>::contains_key(header.hash())); + + // Make sure that the authority set actually changed upon importing our header + assert_eq!( + >::get(), + bp_header_chain::AuthoritySet::new(next_authorities, next_set_id), + ); + }) + } + + #[test] + fn importing_header_rejects_header_with_scheduled_change_delay() { + run_test(|| { + initialize_substrate_bridge(); + + // Need to update the header digest to indicate that our header signals an authority set + // change. However, the change doesn't happen until the next block. + let mut header = test_header(2); + header.digest = change_log(1); + + // Create a valid justification for the header + let justification = make_default_justification(&header); + + // Should not be allowed to import this header + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification), + >::UnsupportedScheduledChange + ); + }) + } + + #[test] + fn importing_header_rejects_header_with_forced_changes() { + run_test(|| { + initialize_substrate_bridge(); + + // Need to update the header digest to indicate that it signals a forced authority set + // change. + let mut header = test_header(2); + header.digest = forced_change_log(0); + + // Create a valid justification for the header + let justification = make_default_justification(&header); + + // Should not be allowed to import this header + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification), + >::UnsupportedScheduledChange + ); + }) + } + + #[test] + fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { + run_test(|| { + assert_noop!( + Pallet::::parse_finalized_storage_proof( + Default::default(), + sp_trie::StorageProof::new(vec![]), + |_| (), + ), + Error::::UnknownHeader, + ); + }); + } + + #[test] + fn parse_finalized_storage_accepts_valid_proof() { + run_test(|| { + let (state_root, storage_proof) = bp_runtime::craft_valid_storage_proof(); + + let mut header = test_header(2); + header.set_state_root(state_root); + + let hash = header.hash(); + >::put(hash); + >::insert(hash, header); + + assert_ok!( + Pallet::::parse_finalized_storage_proof(hash, storage_proof, |_| (),), + (), + ); + }); + } + + #[test] + fn rate_limiter_disallows_imports_once_limit_is_hit_in_single_block() { + run_test(|| { + initialize_substrate_bridge(); + + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + assert_err!(submit_finality_proof(3), >::TooManyRequests); + }) + } + + #[test] + fn rate_limiter_invalid_requests_do_not_count_towards_request_count() { + run_test(|| { + let submit_invalid_request = || { + let header = test_header(1); + let mut invalid_justification = make_default_justification(&header); + invalid_justification.round = 42; + + Pallet::::submit_finality_proof(Origin::signed(1), header, invalid_justification) + }; + + initialize_substrate_bridge(); + + for _ in 0..::MaxRequests::get() + 1 { + // Notice that the error here *isn't* `TooManyRequests` + assert_err!(submit_invalid_request(), >::InvalidJustification); + } + + // Can still submit `MaxRequests` requests afterwards + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + assert_err!(submit_finality_proof(3), >::TooManyRequests); + }) + } + + #[test] + fn rate_limiter_allows_request_after_new_block_has_started() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + + next_block(); + assert_ok!(submit_finality_proof(3)); + }) + } + + #[test] + fn rate_limiter_disallows_imports_once_limit_is_hit_across_different_blocks() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + + next_block(); + assert_ok!(submit_finality_proof(3)); + assert_err!(submit_finality_proof(4), >::TooManyRequests); + }) + } + + #[test] + fn rate_limiter_allows_max_requests_after_long_time_with_no_activity() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + + next_block(); + next_block(); + + next_block(); + assert_ok!(submit_finality_proof(5)); + assert_ok!(submit_finality_proof(7)); + }) + } + + #[test] + fn should_prune_headers_over_headers_to_keep_parameter() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + let first_header = Pallet::::best_finalized(); + next_block(); + + assert_ok!(submit_finality_proof(2)); + next_block(); + assert_ok!(submit_finality_proof(3)); + next_block(); + assert_ok!(submit_finality_proof(4)); + next_block(); + assert_ok!(submit_finality_proof(5)); + next_block(); + + assert_ok!(submit_finality_proof(6)); + + assert!( + !Pallet::::is_known_header(first_header.hash()), + "First header should be pruned." + ); + }) + } +} diff --git a/polkadot/bridges/modules/substrate/src/mock.rs b/polkadot/bridges/modules/grandpa/src/mock.rs similarity index 77% rename from polkadot/bridges/modules/substrate/src/mock.rs rename to polkadot/bridges/modules/grandpa/src/mock.rs index a205c09e83..20f5ea7bdf 100644 --- a/polkadot/bridges/modules/substrate/src/mock.rs +++ b/polkadot/bridges/modules/grandpa/src/mock.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,17 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Mock Runtime for Substrate Pallet Testing. -//! -//! Includes some useful testing types and functions. - -#![cfg(test)] // From construct_runtime macro #![allow(clippy::from_over_into)] -use crate::{BridgedBlockHash, BridgedBlockNumber, BridgedHeader, Config}; use bp_runtime::Chain; -use frame_support::{parameter_types, weights::Weight}; +use frame_support::{construct_runtime, parameter_types, weights::Weight}; use sp_runtime::{ testing::{Header, H256}, traits::{BlakeTwo256, IdentityLookup}, @@ -32,23 +26,23 @@ use sp_runtime::{ }; pub type AccountId = u64; -pub type TestHeader = BridgedHeader; -pub type TestNumber = BridgedBlockNumber; -pub type TestHash = BridgedBlockHash; +pub type TestHeader = crate::BridgedHeader; +pub type TestNumber = crate::BridgedBlockNumber; +pub type TestHash = crate::BridgedBlockHash; type Block = frame_system::mocking::MockBlock; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -use crate as pallet_substrate; +use crate as grandpa; -frame_support::construct_runtime! { +construct_runtime! { pub enum TestRuntime where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, - Substrate: pallet_substrate::{Pallet, Call}, + Grandpa: grandpa::{Pallet}, } } @@ -85,8 +79,18 @@ impl frame_system::Config for TestRuntime { type OnSetCode = (); } -impl Config for TestRuntime { +parameter_types! { + pub const MaxRequests: u32 = 2; + pub const HeadersToKeep: u32 = 5; + pub const SessionLength: u64 = 5; + pub const NumValidators: u32 = 5; +} + +impl grandpa::Config for TestRuntime { type BridgedChain = TestBridgedChain; + type MaxRequests = MaxRequests; + type HeadersToKeep = HeadersToKeep; + type WeightInfo = (); } #[derive(Debug)] @@ -107,12 +111,3 @@ pub fn test_header(num: TestNumber) -> TestHeader { // We wrap the call to avoid explicit type annotations in our tests bp_test_utils::test_header(num) } - -pub fn unfinalized_header(num: u64) -> crate::storage::ImportedHeader { - crate::storage::ImportedHeader { - header: test_header(num), - requires_justification: false, - is_finalized: false, - signal_hash: None, - } -} diff --git a/polkadot/bridges/modules/grandpa/src/weights.rs b/polkadot/bridges/modules/grandpa/src/weights.rs new file mode 100644 index 0000000000..a548534a20 --- /dev/null +++ b/polkadot/bridges/modules/grandpa/src/weights.rs @@ -0,0 +1,121 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Autogenerated weights for pallet_bridge_grandpa +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-04-14, STEPS: [50, ], REPEAT: 20 +//! LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled +//! CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/rialto-bridge-node +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_bridge_grandpa +// --extrinsic=* +// --execution=wasm +// --wasm-execution=Compiled +// --heap-pages=4096 +// --output=./modules/grandpa/src/weights.rs +// --template=./.maintain/rialto-weight-template.hbs + +#![allow(clippy::all)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{ + traits::Get, + weights::{constants::RocksDbWeight, Weight}, +}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_bridge_grandpa. +pub trait WeightInfo { + fn submit_finality_proof(v: u32, p: u32) -> Weight; + fn submit_finality_proof_on_single_fork(v: u32) -> Weight; + fn submit_finality_proof_on_many_forks(p: u32) -> Weight; + fn find_scheduled_change(n: u32) -> Weight; + fn read_write_authority_sets(n: u32) -> Weight; +} + +/// Weights for pallet_bridge_grandpa using the Rialto node and recommended hardware. +pub struct RialtoWeight(PhantomData); +impl WeightInfo for RialtoWeight { + fn submit_finality_proof(v: u32, p: u32) -> Weight { + (0 as Weight) + .saturating_add((837_084_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((874_929_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn submit_finality_proof_on_single_fork(v: u32) -> Weight { + (276_463_000 as Weight) + .saturating_add((14_149_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn submit_finality_proof_on_many_forks(p: u32) -> Weight { + (10_676_019_000 as Weight) + .saturating_add((97_598_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn find_scheduled_change(n: u32) -> Weight { + (618_000 as Weight).saturating_add((8_000 as Weight).saturating_mul(n as Weight)) + } + fn read_write_authority_sets(n: u32) -> Weight { + (8_582_000 as Weight) + .saturating_add((234_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn submit_finality_proof(v: u32, p: u32) -> Weight { + (0 as Weight) + .saturating_add((837_084_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((874_929_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn submit_finality_proof_on_single_fork(v: u32) -> Weight { + (276_463_000 as Weight) + .saturating_add((14_149_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn submit_finality_proof_on_many_forks(p: u32) -> Weight { + (10_676_019_000 as Weight) + .saturating_add((97_598_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn find_scheduled_change(n: u32) -> Weight { + (618_000 as Weight).saturating_add((8_000 as Weight).saturating_mul(n as Weight)) + } + fn read_write_authority_sets(n: u32) -> Weight { + (8_582_000 as Weight) + .saturating_add((234_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/polkadot/bridges/modules/message-lane/rpc/Cargo.toml b/polkadot/bridges/modules/message-lane/rpc/Cargo.toml deleted file mode 100644 index 23dac80b40..0000000000 --- a/polkadot/bridges/modules/message-lane/rpc/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "pallet-message-lane-rpc" -description = "Module that provides RPC methods specific to message-lane pallet." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -derive_more = "0.99.2" -futures = { version = "0.3.5", features = ["compat"] } -jsonrpc-core = "15.1.0" -jsonrpc-core-client = "15.1.0" -jsonrpc-derive = "15.1.0" -log = "0.4.11" - -# Bridge dependencies - -bp-runtime = { path = "../../../primitives/runtime" } -bp-message-lane = { path = "../../../primitives/message-lane" } - -# Substrate Dependencies - -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/modules/message-lane/rpc/src/error.rs b/polkadot/bridges/modules/message-lane/rpc/src/error.rs deleted file mode 100644 index 74fd829fcd..0000000000 --- a/polkadot/bridges/modules/message-lane/rpc/src/error.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Possible errors and results of message-lane RPC calls. - -/// Future Result type. -pub type FutureResult = jsonrpc_core::BoxFuture; - -/// State RPC errors. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum Error { - /// When unknown instance id is passed. - #[display(fmt = "Message lane instance is unknown")] - UnknownInstance, - /// Client error. - #[display(fmt = "Client error: {}", _0)] - Client(Box), -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::UnknownInstance => None, - Error::Client(ref err) => Some(&**err), - } - } -} - -impl From for jsonrpc_core::Error { - fn from(e: Error) -> Self { - const UNKNOW_INSTANCE_CODE: i64 = 1; - - match e { - Error::UnknownInstance => jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::ServerError(UNKNOW_INSTANCE_CODE), - message: "Unknown instance passed".into(), - data: None, - }, - Error::Client(e) => jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::InternalError, - message: format!("Unknown error occured: {}", e), - data: Some(format!("{:?}", e).into()), - }, - } - } -} diff --git a/polkadot/bridges/modules/message-lane/rpc/src/lib.rs b/polkadot/bridges/modules/message-lane/rpc/src/lib.rs deleted file mode 100644 index 8532ed0c5d..0000000000 --- a/polkadot/bridges/modules/message-lane/rpc/src/lib.rs +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module that provides RPC methods specific to message-lane pallet. - -use crate::error::{Error, FutureResult}; - -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::InstanceId; -use futures::{FutureExt, TryFutureExt}; -use jsonrpc_core::futures::Future as _; -use jsonrpc_derive::rpc; -use sc_client_api::Backend as BackendT; -use sp_blockchain::{Error as BlockchainError, HeaderBackend}; -use sp_core::{storage::StorageKey, Bytes}; -use sp_runtime::{codec::Encode, generic::BlockId, traits::Block as BlockT}; -use sp_state_machine::prove_read; -use sp_trie::StorageProof; -use std::sync::Arc; - -mod error; - -/// Trie-based storage proof that the message(s) with given key(s) have been sent by the bridged chain. -/// SCALE-encoded trie nodes array `Vec>`. -pub type MessagesProof = Bytes; - -/// Trie-based storage proof that the message(s) with given key(s) have been received by the bridged chain. -/// SCALE-encoded trie nodes array `Vec>`. -pub type MessagesDeliveryProof = Bytes; - -/// Runtime adapter. -pub trait Runtime: Send + Sync + 'static { - /// Return runtime storage key for given message. May return None if instance is unknown. - fn message_key(&self, instance: &InstanceId, lane: &LaneId, nonce: MessageNonce) -> Option; - /// Return runtime storage key for outbound lane state. May return None if instance is unknown. - fn outbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option; - /// Return runtime storage key for inbound lane state. May return None if instance is unknown. - fn inbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option; -} - -/// Provides RPC methods for interacting with message-lane pallet. -#[rpc] -pub trait MessageLaneApi { - /// Returns storage proof of messages in given inclusive range. The state of outbound - /// lane is included in the proof if `include_outbound_lane_state` is true. - #[rpc(name = "messageLane_proveMessages")] - fn prove_messages( - &self, - instance: InstanceId, - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - include_outbound_lane_state: bool, - block: Option, - ) -> FutureResult; - - /// Returns proof-of-message(s) delivery. - #[rpc(name = "messageLane_proveMessagesDelivery")] - fn prove_messages_delivery( - &self, - instance: InstanceId, - lane: LaneId, - block: Option, - ) -> FutureResult; -} - -/// Implements the MessageLaneApi trait for interacting with message lanes. -pub struct MessageLaneRpcHandler { - backend: Arc, - runtime: Arc, - _phantom: std::marker::PhantomData, -} - -impl MessageLaneRpcHandler { - /// Creates new mesage lane RPC handler. - pub fn new(backend: Arc, runtime: Arc) -> Self { - Self { - backend, - runtime, - _phantom: Default::default(), - } - } -} - -impl MessageLaneApi for MessageLaneRpcHandler -where - Block: BlockT, - Backend: BackendT + 'static, - R: Runtime, -{ - fn prove_messages( - &self, - instance: InstanceId, - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - include_outbound_lane_state: bool, - block: Option, - ) -> FutureResult { - let runtime = self.runtime.clone(); - let outbound_lane_data_key = if include_outbound_lane_state { - Some(runtime.outbound_lane_data_key(&instance, &lane)) - } else { - None - }; - let messages_count = if end >= begin { end - begin + 1 } else { 0 }; - Box::new( - prove_keys_read( - self.backend.clone(), - block, - (begin..=end) - .map(move |nonce| runtime.message_key(&instance, &lane, nonce)) - .chain(outbound_lane_data_key.into_iter()), - ) - .boxed() - .compat() - .map(move |proof| { - let serialized_proof = serialize_storage_proof(proof); - log::trace!( - "Generated proof of {} messages. Size: {}", - messages_count, - serialized_proof.len() - ); - serialized_proof - }) - .map_err(Into::into), - ) - } - - fn prove_messages_delivery( - &self, - instance: InstanceId, - lane: LaneId, - block: Option, - ) -> FutureResult { - Box::new( - prove_keys_read( - self.backend.clone(), - block, - vec![self.runtime.inbound_lane_data_key(&instance, &lane)], - ) - .boxed() - .compat() - .map(|proof| { - let serialized_proof = serialize_storage_proof(proof); - log::trace!("Generated message delivery proof. Size: {}", serialized_proof.len()); - serialized_proof - }) - .map_err(Into::into), - ) - } -} - -async fn prove_keys_read( - backend: Arc, - block: Option, - keys: impl IntoIterator>, -) -> Result -where - Block: BlockT, - Backend: BackendT + 'static, -{ - let block = unwrap_or_best(&*backend, block); - let state = backend.state_at(BlockId::Hash(block)).map_err(blockchain_err)?; - let keys = keys - .into_iter() - .map(|key| key.ok_or(Error::UnknownInstance).map(|key| key.0)) - .collect::, _>>()?; - let storage_proof = prove_read(state, keys) - .map_err(BlockchainError::Execution) - .map_err(blockchain_err)?; - Ok(storage_proof) -} - -fn serialize_storage_proof(proof: StorageProof) -> Bytes { - let raw_nodes: Vec> = proof.iter_nodes().map(Into::into).collect(); - raw_nodes.encode().into() -} - -fn unwrap_or_best(backend: &impl BackendT, block: Option) -> Block::Hash { - match block { - Some(block) => block, - None => backend.blockchain().info().best_hash, - } -} - -fn blockchain_err(err: BlockchainError) -> Error { - Error::Client(Box::new(err)) -} diff --git a/polkadot/bridges/modules/message-lane/Cargo.toml b/polkadot/bridges/modules/messages/Cargo.toml similarity index 85% rename from polkadot/bridges/modules/message-lane/Cargo.toml rename to polkadot/bridges/modules/messages/Cargo.toml index abbfb60761..4a75fa8181 100644 --- a/polkadot/bridges/modules/message-lane/Cargo.toml +++ b/polkadot/bridges/modules/messages/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "pallet-message-lane" +name = "pallet-bridge-messages" description = "Module that allows bridged chains to exchange messages using lane concept." version = "0.1.0" authors = ["Parity Technologies "] @@ -8,13 +8,14 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.14", default-features = false } num-traits = { version = "0.2", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } # Bridge dependencies -bp-message-lane = { path = "../../primitives/message-lane", default-features = false } -bp-rialto = { path = "../../primitives/rialto", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-rialto = { path = "../../primitives/chain-rialto", default-features = false } bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Dependencies @@ -27,6 +28,7 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } [dev-dependencies] +hex = "0.4" hex-literal = "0.3" sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -34,12 +36,13 @@ pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "m [features] default = ["std"] std = [ - "bp-message-lane/std", + "bp-messages/std", "bp-runtime/std", "bp-rialto/std", "codec/std", "frame-support/std", "frame-system/std", + "log/std", "num-traits/std", "serde", "sp-core/std", diff --git a/polkadot/bridges/modules/message-lane/README.md b/polkadot/bridges/modules/messages/README.md similarity index 88% rename from polkadot/bridges/modules/message-lane/README.md rename to polkadot/bridges/modules/messages/README.md index a732042bd0..eda5e28a6c 100644 --- a/polkadot/bridges/modules/message-lane/README.md +++ b/polkadot/bridges/modules/messages/README.md @@ -1,25 +1,25 @@ -# Message Lane Module +# Messages Module -The message lane module is used to deliver messages from source chain to target chain. Message is +The messages module is used to deliver messages from source chain to target chain. Message is (almost) opaque to the module and the final goal is to hand message to the message dispatch mechanism. ## Contents - [Overview](#overview) - [Message Workflow](#message-workflow) -- [Integrating Message Lane Module into Runtime](#integrating-message-lane-module-into-runtime) +- [Integrating Message Lane Module into Runtime](#integrating-messages-module-into-runtime) - [Non-Essential Functionality](#non-essential-functionality) - [Weights of Module Extrinsics](#weights-of-module-extrinsics) ## Overview Message lane is an unidirectional channel, where messages are sent from source chain to the target -chain. At the same time, a single instance of message lane module supports both outbound lanes and +chain. At the same time, a single instance of messages module supports both outbound lanes and inbound lanes. So the chain where the module is deployed (this chain), may act as a source chain for outbound messages (heading to a bridged chain) and as a target chain for inbound messages (coming from a bridged chain). -Message lane module supports multiple message lanes. Every message lane is identified with a 4-byte +Messages module supports multiple message lanes. Every message lane is identified with a 4-byte identifier. Messages sent through the lane are assigned unique (for this lane) increasing integer value that is known as nonce ("number that can only be used once"). Messages that are sent over the same lane are guaranteed to be delivered to the target chain in the same order they're sent from @@ -41,12 +41,12 @@ now. We assume that there are external, offchain actors, called relayers, that are submitting module related transactions to both target and source chains. The pallet itself has no assumptions about relayers incentivization scheme, but it has some callbacks for paying rewards. See -[Integrating Message Lane Module into runtime](#Integrating-Message-Lane-Module-into-runtime) +[Integrating Messages Module into runtime](#Integrating-Messages-Module-into-runtime) for details. Eventually, some relayer would notice this message in the "undelivered" state and it would decide to deliver this message. Relayer then crafts `receive_messages_proof()` transaction (aka delivery -transaction) for the message lane module instance, deployed at the target chain. Relayer provides +transaction) for the messages module instance, deployed at the target chain. Relayer provides his account id at the source chain, the proof of message (or several messages), the number of messages in the transaction and their cumulative dispatch weight. Once a transaction is mined, the message is considered "delivered". @@ -54,12 +54,12 @@ message is considered "delivered". Once a message is delivered, the relayer may want to confirm delivery back to the source chain. There are two reasons why he would want to do that. The first is that we intentionally limit number of "delivered", but not yet "confirmed" messages at inbound lanes -(see [What about other Constants in the Message Lane Module Configuration Trait](#What-about-other-Constants-in-the-Message-Lane-Module-Configuration-Trait) for explanation). +(see [What about other Constants in the Messages Module Configuration Trait](#What-about-other-Constants-in-the-Messages-Module-Configuration-Trait) for explanation). So at some point, the target chain may stop accepting new messages until relayers confirm some of these. The second is that if the relayer wants to be rewarded for delivery, he must prove the fact that he has actually delivered the message. And this proof may only be generated after the delivery transaction is mined. So relayer crafts the `receive_messages_delivery_proof()` transaction (aka -confirmation transaction) for the message lane module instance, deployed at the source chain. Once +confirmation transaction) for the messages module instance, deployed at the source chain. Once this transaction is mined, the message is considered "confirmed". The "confirmed" state is the final state of the message. But there's one last thing related to the @@ -69,21 +69,21 @@ the limit of "unconfirmed" messages at the target chain and it will stop accepti relayer sometimes includes a nonce of the latest "confirmed" message in the next `receive_messages_proof()` transaction, proving that some messages have been confirmed. -## Integrating Message Lane Module into Runtime +## Integrating Messages Module into Runtime -As it has been said above, the message lane module supports both outbound and inbound message lanes. +As it has been said above, the messages module supports both outbound and inbound message lanes. So if we will integrate a module in some runtime, it may act as the source chain runtime for outbound messages and as the target chain runtime for inbound messages. In this section, we'll sometimes refer to the chain we're currently integrating with, as this chain and the other chain as bridged chain. -Message lane module doesn't simply accept transactions that are claiming that the bridged chain has +Messages module doesn't simply accept transactions that are claiming that the bridged chain has some updated data for us. Instead of this, the module assumes that the bridged chain is able to prove that updated data in some way. The proof is abstracted from the module and may be of any kind. In our Substrate-to-Substrate bridge we're using runtime storage proofs. Other bridges may use transaction proofs, Substrate header digests or anything else that may be proved. -**IMPORTANT NOTE**: everything below in this chapter describes details of the message lane module +**IMPORTANT NOTE**: everything below in this chapter describes details of the messages module configuration. But if you interested in well-probed and relatively easy integration of two Substrate-based chains, you may want to look at the [bridge-runtime-common](../../bin/runtime-common/README.md) crate. This crate is providing a lot of @@ -92,7 +92,7 @@ to change something in this scheme, get back here for detailed information. ### General Information -The message lane module supports instances. Every module instance is supposed to bridge this chain +The messages module supports instances. Every module instance is supposed to bridge this chain and some bridged chain. To bridge with another chain, using another instance is suggested (this isn't forced anywhere in the code, though). @@ -103,10 +103,10 @@ to the target chain, the `MessagesDelivered` event is emitted from the `receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane identifier and inclusive range of delivered message nonces. -### How to plug-in Message Lane Module to Send Messages to the Bridged Chain? +### How to plug-in Messages Module to Send Messages to the Bridged Chain? -The `pallet_message_lane::Config` trait has 3 main associated types that are used to work with -outbound messages. The `pallet_message_lane::Config::TargetHeaderChain` defines how we see the +The `pallet_bridge_messages::Config` trait has 3 main associated types that are used to work with +outbound messages. The `pallet_bridge_messages::Config::TargetHeaderChain` defines how we see the bridged chain as the target for our outbound messages. It must be able to check that the bridged chain may accept our message - like that the message has size below maximal possible transaction size of the chain and so on. And when the relayer sends us a confirmation transaction, this @@ -114,7 +114,7 @@ implementation must be able to parse and verify the proof of messages delivery. reuse the same (configurable) type on all chains that are sending messages to the same bridged chain. -The `pallet_message_lane::Config::LaneMessageVerifier` defines a single callback to verify outbound +The `pallet_bridge_messages::Config::LaneMessageVerifier` defines a single callback to verify outbound messages. The simplest callback may just accept all messages. But in this case you'll need to answer many questions first. Who will pay for the delivery and confirmation transaction? Are we sure that someone will ever deliver this message to the bridged chain? Are we sure that we don't bloat our @@ -123,15 +123,15 @@ fields set to invalid values? Answering all those (and similar) questions would implementation. There's another thing to consider when implementing type for use in -`pallet_message_lane::Config::LaneMessageVerifier`. It is whether we treat all message lanes +`pallet_bridge_messages::Config::LaneMessageVerifier`. It is whether we treat all message lanes identically, or they'll have different sets of verification rules? For example, you may reserve lane#1 for messages coming from some 'wrapped-token' pallet - then you may verify in your implementation that the origin is associated with this pallet. Lane#2 may be reserved for 'system' messages and you may charge zero fee for such messages. You may have some rate limiting for messages sent over the lane#3. Or you may just verify the same rules set for all outbound messages - it is -all up to the `pallet_message_lane::Config::LaneMessageVerifier` implementation. +all up to the `pallet_bridge_messages::Config::LaneMessageVerifier` implementation. -The last type is the `pallet_message_lane::Config::MessageDeliveryAndDispatchPayment`. When all +The last type is the `pallet_bridge_messages::Config::MessageDeliveryAndDispatchPayment`. When all checks are made and we have decided to accept the message, we're calling the `pay_delivery_and_dispatch_fee()` callback, passing the corresponding argument of the `send_message` function. Later, when message delivery is confirmed, we're calling `pay_relayers_rewards()` @@ -140,41 +140,41 @@ implementation of this trait is in the [`instant_payments.rs`](./src/instant_pay simply calls `Currency::transfer()` when those callbacks are called. So `Currency` units are transferred between submitter, 'relayers fund' and relayers accounts. Other implementations may use more or less sophisticated techniques - the whole relayers incentivization scheme is not a part of -the message lane module. +the messages module. -### I have a Message Lane Module in my Runtime, but I Want to Reject all Outbound Messages. What shall I do? +### I have a Messages Module in my Runtime, but I Want to Reject all Outbound Messages. What shall I do? -You should be looking at the `bp_message_lane::source_chain::ForbidOutboundMessages` structure -[`bp_message_lane::source_chain`](../../primitives/message-lane/src/source_chain.rs). It implements +You should be looking at the `bp_messages::source_chain::ForbidOutboundMessages` structure +[`bp_messages::source_chain`](../../primitives/messages/src/source_chain.rs). It implements all required traits and will simply reject all transactions, related to outbound messages. -### How to plug-in Message Lane Module to Receive Messages from the Bridged Chain? +### How to plug-in Messages Module to Receive Messages from the Bridged Chain? -The `pallet_message_lane::Config` trait has 2 main associated types that are used to work with -inbound messages. The `pallet_message_lane::Config::SourceHeaderChain` defines how we see the +The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with +inbound messages. The `pallet_bridge_messages::Config::SourceHeaderChain` defines how we see the bridged chain as the source or our inbound messages. When relayer sends us a delivery transaction, this implementation must be able to parse and verify the proof of messages wrapped in this transaction. Normally, you would reuse the same (configurable) type on all chains that are sending messages to the same bridged chain. -The `pallet_message_lane::Config::MessageDispatch` defines a way on how to dispatch delivered +The `pallet_bridge_messages::Config::MessageDispatch` defines a way on how to dispatch delivered messages. Apart from actually dispatching the message, the implementation must return the correct dispatch weight of the message before dispatch is called. -### I have a Message Lane Module in my Runtime, but I Want to Reject all Inbound Messages. What +### I have a Messages Module in my Runtime, but I Want to Reject all Inbound Messages. What shall I do? -You should be looking at the `bp_message_lane::target_chain::ForbidInboundMessages` structure from -the [`bp_message_lane::target_chain`](../../primitives/message-lane/src/target_chain.rs) module. It +You should be looking at the `bp_messages::target_chain::ForbidInboundMessages` structure from +the [`bp_messages::target_chain`](../../primitives/messages/src/target_chain.rs) module. It implements all required traits and will simply reject all transactions, related to inbound messages. -### What about other Constants in the Message Lane Module Configuration Trait? +### What about other Constants in the Messages Module Configuration Trait? Message is being stored in the source chain storage until its delivery will be confirmed. After that, we may safely remove the message from the storage. Lane messages are removed (pruned) when someone sends a new message using the same lane. So the message submitter pays for that pruning. To avoid pruning too many messages in a single transaction, there's -`pallet_message_lane::Config::MaxMessagesToPruneAtOnce` configuration parameter. We will never prune +`pallet_bridge_messages::Config::MaxMessagesToPruneAtOnce` configuration parameter. We will never prune more than this number of messages in the single transaction. That said, the value should not be too big to avoid waste of resources when there are no messages to prune. @@ -186,10 +186,10 @@ chain to confirm delivery and pay rewards. So to make sure we are able to craft transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure that the weight of processing this map is below a certain limit. Both size and processing weight mostly depend on the number of entries. The number of entries is limited with the -`pallet_message_lane::ConfigMaxUnrewardedRelayerEntriesAtInboundLane` parameter. Processing weight +`pallet_bridge_messages::ConfigMaxUnrewardedRelayerEntriesAtInboundLane` parameter. Processing weight also depends on the total number of messages that are being confirmed, because every confirmed message needs to be read. So there's another -`pallet_message_lane::Config::MaxUnconfirmedMessagesAtInboundLane` parameter for that. +`pallet_bridge_messages::Config::MaxUnconfirmedMessagesAtInboundLane` parameter for that. When choosing values for these parameters, you must also keep in mind that if proof in your scheme is based on finality of headers (and it is the most obvious option for Substrate-based chains with @@ -215,7 +215,7 @@ large maps, at the same time keeping reserve for future source chain upgrades. Apart from the message related calls, the module exposes a set of auxiliary calls. They fall in two groups, described in the next two paragraphs. -There may be a special account in every runtime where the message lane module is deployed. This +There may be a special account in every runtime where the messages module is deployed. This account, named 'module owner', is like a module-level sudo account - he's able to halt all and result all module operations without requiring runtime upgrade. The module may have no message owner, but we suggest to use it at least for initial deployment. To calls that are related to this @@ -242,7 +242,7 @@ The main assumptions behind weight formulas is: - all possible costs are paid in advance by the message submitter; - whenever possible, relayer tries to minimize cost of its transactions. So e.g. even though sender always pays for delivering outbound lane state proof, relayer may not include it in the delivery - transaction (unless message lane module on target chain requires that); + transaction (unless messages module on target chain requires that); - weight formula should incentivize relayer to not to submit any redundant data in the extrinsics arguments; - the extrinsic shall never be executing slower (i.e. has larger actual weight) than defined by the @@ -343,8 +343,8 @@ hardcoded into runtime. Adequate relayer would only include required trie nodes if message size would be maximal (`2/3` of `MaximalExtrinsicSize`), then the extra proof size would be `MaximalExtrinsicSize / 3 * 2 - EXPECTED_DEFAULT_MESSAGE_LENGTH`. -Both conditions are verified by `pallet_message_lane::ensure_weights_are_correct` and -`pallet_message_lane::ensure_able_to_receive_messages` functions, which must be called from every +Both conditions are verified by `pallet_bridge_messages::ensure_weights_are_correct` and +`pallet_bridge_messages::ensure_able_to_receive_messages` functions, which must be called from every runtime's tests. ### Weight of `receive_messages_delivery_proof` call @@ -381,11 +381,11 @@ Where: #### Why we're always able to craft `receive_messages_delivery_proof` transaction? -There can be at most `::MaxUnconfirmedMessagesAtInboundLane` +There can be at most `::MaxUnconfirmedMessagesAtInboundLane` messages and at most -`::MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded +`::MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers in the single delivery confirmation transaction. We're checking that this transaction may be crafted in the -`pallet_message_lane::ensure_able_to_receive_confirmation` function, which must be called from every +`pallet_bridge_messages::ensure_able_to_receive_confirmation` function, which must be called from every runtime' tests. diff --git a/polkadot/bridges/modules/message-lane/src/benchmarking.rs b/polkadot/bridges/modules/messages/src/benchmarking.rs similarity index 95% rename from polkadot/bridges/modules/message-lane/src/benchmarking.rs rename to polkadot/bridges/modules/messages/src/benchmarking.rs index cd59d1347f..d1ecf77500 100644 --- a/polkadot/bridges/modules/message-lane/src/benchmarking.rs +++ b/polkadot/bridges/modules/messages/src/benchmarking.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Message lane pallet benchmarking. +//! Messages pallet benchmarking. use crate::weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH; use crate::{inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, Call, Instance}; -use bp_message_lane::{ +use bp_messages::{ source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, InboundLaneData, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayersState, }; @@ -33,8 +33,8 @@ pub const MESSAGE_FEE: u64 = 10_000_000_000; const SEED: u32 = 0; -/// Module we're benchmarking here. -pub struct Module, I: crate::Instance>(crate::Module); +/// Pallet we're benchmarking here. +pub struct Pallet, I: crate::Instance>(crate::Pallet); /// Proof size requirements. pub enum ProofSize { @@ -142,7 +142,7 @@ benchmarks_instance! { }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) verify { assert_eq!( - crate::Module::::outbound_latest_generated_nonce(T::bench_lane_id()), + crate::Pallet::::outbound_latest_generated_nonce(T::bench_lane_id()), T::MaxMessagesToPruneAtOnce::get() + 1, ); } @@ -179,7 +179,7 @@ benchmarks_instance! { }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) verify { assert_eq!( - crate::Module::::outbound_latest_generated_nonce(T::bench_lane_id()), + crate::Pallet::::outbound_latest_generated_nonce(T::bench_lane_id()), T::MaxMessagesToPruneAtOnce::get() + 1, ); } @@ -216,7 +216,7 @@ benchmarks_instance! { }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) verify { assert_eq!( - crate::Module::::outbound_latest_generated_nonce(T::bench_lane_id()), + crate::Pallet::::outbound_latest_generated_nonce(T::bench_lane_id()), T::MaxMessagesToPruneAtOnce::get() + 1, ); } @@ -261,7 +261,7 @@ benchmarks_instance! { }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) verify { assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 21, ); } @@ -292,7 +292,7 @@ benchmarks_instance! { }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight) verify { assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 22, ); } @@ -327,11 +327,11 @@ benchmarks_instance! { }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) verify { assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 21, ); assert_eq!( - crate::Module::::inbound_latest_confirmed_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_confirmed_nonce(T::bench_lane_id()), 20, ); } @@ -361,7 +361,7 @@ benchmarks_instance! { }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) verify { assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 21, ); } @@ -393,7 +393,7 @@ benchmarks_instance! { }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) verify { assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 21, ); } @@ -404,7 +404,7 @@ benchmarks_instance! { // // This is base benchmark for all other confirmations delivery benchmarks. receive_delivery_proof_for_single_message { - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); let relayer_id: T::AccountId = account("relayer", 0, SEED); let relayer_balance = T::account_balance(&relayer_id); T::endow_account(&relayers_fund_id); @@ -441,7 +441,7 @@ benchmarks_instance! { // as `weight(receive_delivery_proof_for_two_messages_by_single_relayer) // - weight(receive_delivery_proof_for_single_message)`. receive_delivery_proof_for_two_messages_by_single_relayer { - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); let relayer_id: T::AccountId = account("relayer", 0, SEED); let relayer_balance = T::account_balance(&relayer_id); T::endow_account(&relayers_fund_id); @@ -476,7 +476,7 @@ benchmarks_instance! { // as `weight(receive_delivery_proof_for_two_messages_by_two_relayers) // - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`. receive_delivery_proof_for_two_messages_by_two_relayers { - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); let relayer1_id: T::AccountId = account("relayer1", 1, SEED); let relayer1_balance = T::account_balance(&relayer1_id); let relayer2_id: T::AccountId = account("relayer2", 2, SEED); @@ -540,7 +540,7 @@ benchmarks_instance! { }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) verify { assert_eq!( - crate::Module::::outbound_latest_generated_nonce(T::bench_lane_id()), + crate::Pallet::::outbound_latest_generated_nonce(T::bench_lane_id()), T::MaxMessagesToPruneAtOnce::get() + 1, ); } @@ -579,7 +579,7 @@ benchmarks_instance! { ) verify { assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 20 + i as MessageNonce, ); } @@ -616,7 +616,7 @@ benchmarks_instance! { ) verify { assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 21, ); } @@ -653,7 +653,7 @@ benchmarks_instance! { ) verify { assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 21, ); } @@ -696,11 +696,11 @@ benchmarks_instance! { ) verify { assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), 20 + i as MessageNonce, ); assert_eq!( - crate::Module::::inbound_latest_confirmed_nonce(T::bench_lane_id()), + crate::Pallet::::inbound_latest_confirmed_nonce(T::bench_lane_id()), 20, ); } @@ -713,7 +713,7 @@ benchmarks_instance! { .try_into() .expect("Value of MaxUnrewardedRelayerEntriesAtInboundLane is too large"); - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); let relayer_id: T::AccountId = account("relayer", 0, SEED); let relayer_balance = T::account_balance(&relayer_id); T::endow_account(&relayers_fund_id); @@ -749,7 +749,7 @@ benchmarks_instance! { .try_into() .expect("Value of MaxUnconfirmedMessagesAtInboundLane is too large "); - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); let confirmation_relayer_id = account("relayer", 0, SEED); let relayers: BTreeMap = (1..=i) .map(|j| { diff --git a/polkadot/bridges/modules/message-lane/src/inbound_lane.rs b/polkadot/bridges/modules/messages/src/inbound_lane.rs similarity index 99% rename from polkadot/bridges/modules/message-lane/src/inbound_lane.rs rename to polkadot/bridges/modules/messages/src/inbound_lane.rs index 7359aa4ed1..b5576bc30a 100644 --- a/polkadot/bridges/modules/message-lane/src/inbound_lane.rs +++ b/polkadot/bridges/modules/messages/src/inbound_lane.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ //! Everything about incoming messages receival. -use bp_message_lane::{ +use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, }; diff --git a/polkadot/bridges/modules/message-lane/src/instant_payments.rs b/polkadot/bridges/modules/messages/src/instant_payments.rs similarity index 96% rename from polkadot/bridges/modules/message-lane/src/instant_payments.rs rename to polkadot/bridges/modules/messages/src/instant_payments.rs index af5d2cdc41..524a3765d6 100644 --- a/polkadot/bridges/modules/message-lane/src/instant_payments.rs +++ b/polkadot/bridges/modules/messages/src/instant_payments.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -19,7 +19,7 @@ //! The payment is first transferred to a special `relayers-fund` account and only transferred //! to the actual relayer in case confirmation is received. -use bp_message_lane::{ +use bp_messages::{ source_chain::{MessageDeliveryAndDispatchPayment, RelayersRewards, Sender}, MessageNonce, }; @@ -167,14 +167,14 @@ fn pay_relayer_reward( ); match pay_result { - Ok(_) => frame_support::debug::trace!( - target: "runtime", + Ok(_) => log::trace!( + target: "runtime::bridge-messages", "Rewarded relayer {:?} with {:?}", relayer_account, reward, ), - Err(error) => frame_support::debug::trace!( - target: "runtime", + Err(error) => log::trace!( + target: "runtime::bridge-messages", "Failed to pay relayer {:?} reward {:?}: {:?}", relayer_account, reward, @@ -187,7 +187,7 @@ fn pay_relayer_reward( mod tests { use super::*; use crate::mock::{run_test, AccountId as TestAccountId, Balance as TestBalance, TestRuntime}; - use bp_message_lane::source_chain::RelayerRewards; + use bp_messages::source_chain::RelayerRewards; type Balances = pallet_balances::Pallet; diff --git a/polkadot/bridges/modules/message-lane/src/lib.rs b/polkadot/bridges/modules/messages/src/lib.rs similarity index 86% rename from polkadot/bridges/modules/message-lane/src/lib.rs rename to polkadot/bridges/modules/messages/src/lib.rs index 45da09eba0..9e2563498f 100644 --- a/polkadot/bridges/modules/message-lane/src/lib.rs +++ b/polkadot/bridges/modules/messages/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -44,11 +44,11 @@ use crate::inbound_lane::{InboundLane, InboundLaneStorage}; use crate::outbound_lane::{OutboundLane, OutboundLaneStorage}; use crate::weights::WeightInfo; -use bp_message_lane::{ +use bp_messages::{ source_chain::{LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, TargetHeaderChain}, target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, total_unrewarded_messages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce, MessagePayload, - OutboundLaneData, Parameter as MessageLaneParameter, UnrewardedRelayersState, + OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayersState, }; use bp_runtime::Size; use codec::{Decode, Encode}; @@ -88,7 +88,7 @@ pub trait Config: frame_system::Config { /// for integrating the pallet. /// /// All pallet parameters may only be updated either by the root, or by the pallet owner. - type Parameter: MessageLaneParameter; + type Parameter: MessagesParameter; /// Maximal number of messages that may be pruned during maintenance. Maintenance occurs /// whenever new message is sent. The reason is that if you want to use lane, you should @@ -161,7 +161,7 @@ type MessagesDeliveryProofOf = <>::TargetHeaderChain as Tar >>::MessagesDeliveryProof; decl_error! { - pub enum Error for Module, I: Instance> { + pub enum Error for Pallet, I: Instance> { /// All pallet operations are halted. Halted, /// Message has been treated as invalid by chain verifier. @@ -188,14 +188,14 @@ decl_error! { } decl_storage! { - trait Store for Module, I: Instance = DefaultInstance> as MessageLane { + trait Store for Pallet, I: Instance = DefaultInstance> as BridgeMessages { /// Optional pallet owner. /// /// Pallet owner has a right to halt all pallet operations and then resume it. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt /// flag directly or call the `halt_operations`). - pub ModuleOwner get(fn module_owner): Option; + pub PalletOwner get(fn module_owner): Option; /// If true, all pallet transactions are failed immediately. pub IsHalted get(fn is_halted) config(): bool; /// Map of lane id => inbound lane data. @@ -210,7 +210,7 @@ decl_storage! { config(owner): Option; build(|config| { if let Some(ref owner) = config.owner { - >::put(owner); + >::put(owner); } }) } @@ -246,47 +246,42 @@ decl_module! { T::DbWeight::get().reads(reads as u64) } - /// Change `ModuleOwner`. + /// Change `PalletOwner`. /// - /// May only be called either by root, or by `ModuleOwner`. + /// May only be called either by root, or by `PalletOwner`. #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] pub fn set_owner(origin, new_owner: Option) { ensure_owner_or_root::(origin)?; match new_owner { Some(new_owner) => { - ModuleOwner::::put(&new_owner); - frame_support::debug::info!("Setting pallet Owner to: {:?}", new_owner); + PalletOwner::::put(&new_owner); + log::info!(target: "runtime::bridge-messages", "Setting pallet Owner to: {:?}", new_owner); }, None => { - ModuleOwner::::kill(); - frame_support::debug::info!("Removed Owner of pallet."); + PalletOwner::::kill(); + log::info!(target: "runtime::bridge-messages", "Removed Owner of pallet."); }, } } - /// Halt all pallet operations. Operations may be resumed using `resume_operations` call. + /// Halt or resume all pallet operations. /// - /// May only be called either by root, or by `ModuleOwner`. + /// May only be called either by root, or by `PalletOwner`. #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn halt_operations(origin) { + pub fn set_operational(origin, operational: bool) { ensure_owner_or_root::(origin)?; - IsHalted::::put(true); - frame_support::debug::warn!("Stopping pallet operations."); - } + >::put(operational); - /// Resume all pallet operations. May be called even if pallet is halted. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn resume_operations(origin) { - ensure_owner_or_root::(origin)?; - IsHalted::::put(false); - frame_support::debug::info!("Resuming pallet operations."); + if operational { + log::info!(target: "runtime::bridge-messages", "Resuming pallet operations."); + } else { + log::warn!(target: "runtime::bridge-messages", "Stopping pallet operations."); + } } /// Update pallet parameter. /// - /// May only be called either by root, or by `ModuleOwner`. + /// May only be called either by root, or by `PalletOwner`. /// /// The weight is: single read for permissions check + 2 writes for parameter value and event. #[weight = (T::DbWeight::get().reads_writes(1, 2), DispatchClass::Operational)] @@ -310,7 +305,8 @@ decl_module! { // let's first check if message can be delivered to target chain T::TargetHeaderChain::verify_message(&payload) .map_err(|err| { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Message to lane {:?} is rejected by target chain: {:?}", lane_id, err, @@ -328,7 +324,8 @@ decl_module! { &lane.data(), &payload, ).map_err(|err| { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Message to lane {:?} is rejected by lane verifier: {:?}", lane_id, err, @@ -343,7 +340,8 @@ decl_module! { &delivery_and_dispatch_fee, &Self::relayer_fund_account_id(), ).map_err(|err| { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Message to lane {:?} is rejected because submitter {:?} is unable to pay fee {:?}: {:?}", lane_id, submitter, @@ -363,7 +361,8 @@ decl_module! { }); lane.prune_messages(T::MaxMessagesToPruneAtOnce::get()); - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Accepted message {} to lane {:?}. Message size: {:?}", nonce, lane_id, @@ -399,7 +398,8 @@ decl_module! { &additional_fee, &Self::relayer_fund_account_id(), ).map_err(|err| { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Submitter {:?} can't pay additional fee {:?} for the message {:?}/{:?}: {:?}", submitter, additional_fee, @@ -455,7 +455,8 @@ decl_module! { T::InboundPayload, >(proof, messages_count) .map_err(|err| { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Rejecting invalid messages proof: {:?}", err, ); @@ -474,7 +475,8 @@ decl_module! { ) .fold(0, |sum, weight| sum.saturating_add(weight)); if dispatch_weight < actual_dispatch_weight { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Rejecting messages proof because of dispatch weight mismatch: declared={}, expected={}", dispatch_weight, actual_dispatch_weight, @@ -492,7 +494,8 @@ decl_module! { if let Some(lane_state) = lane_data.lane_state { let updated_latest_confirmed_nonce = lane.receive_state_update(lane_state); if let Some(updated_latest_confirmed_nonce) = updated_latest_confirmed_nonce { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Received lane {:?} state update: latest_confirmed_nonce={}", lane_id, updated_latest_confirmed_nonce, @@ -510,7 +513,8 @@ decl_module! { } } - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Received messages: total={}, valid={}", total_messages, valid_messages, @@ -530,7 +534,8 @@ decl_module! { let confirmation_relayer = ensure_signed(origin)?; let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof).map_err(|err| { - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Rejecting invalid messages delivery proof: {:?}", err, ); @@ -585,7 +590,8 @@ decl_module! { ); } - frame_support::debug::trace!( + log::trace!( + target: "runtime::bridge-messages", "Received messages delivery proof up to (and including) {} at lane {:?}", last_delivered_nonce, lane_id, @@ -596,7 +602,7 @@ decl_module! { } } -impl, I: Instance> Module { +impl, I: Instance> Pallet { /// Get payload of given outbound message. pub fn outbound_message_payload(lane: LaneId, nonce: MessageNonce) -> Option { OutboundMessages::::get(MessageKey { lane_id: lane, nonce }).map(|message_data| message_data.payload) @@ -623,11 +629,9 @@ impl, I: Instance> Module { } /// Get state of unrewarded relayers set. - pub fn inbound_unrewarded_relayers_state( - lane: bp_message_lane::LaneId, - ) -> bp_message_lane::UnrewardedRelayersState { + pub fn inbound_unrewarded_relayers_state(lane: bp_messages::LaneId) -> bp_messages::UnrewardedRelayersState { let relayers = InboundLanes::::get(&lane).relayers; - bp_message_lane::UnrewardedRelayersState { + bp_messages::UnrewardedRelayersState { unrewarded_relayer_entries: relayers.len() as _, messages_in_oldest_entry: relayers.front().map(|(begin, end, _)| 1 + end - begin).unwrap_or(0), total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX), @@ -682,11 +686,11 @@ pub mod storage_keys { } } -/// Ensure that the origin is either root, or `ModuleOwner`. +/// Ensure that the origin is either root, or `PalletOwner`. fn ensure_owner_or_root, I: Instance>(origin: T::Origin) -> Result<(), BadOrigin> { match origin.into() { Ok(RawOrigin::Root) => Ok(()), - Ok(RawOrigin::Signed(ref signer)) if Some(signer) == Module::::module_owner().as_ref() => Ok(()), + Ok(RawOrigin::Signed(ref signer)) if Some(signer) == Pallet::::module_owner().as_ref() => Ok(()), _ => Err(BadOrigin), } } @@ -843,13 +847,14 @@ fn verify_and_decode_messages_proof, Fee, Dispatch mod tests { use super::*; use crate::mock::{ - message, run_test, Event as TestEvent, Origin, TestMessageDeliveryAndDispatchPayment, TestMessageLaneParameter, - TestMessagesDeliveryProof, TestMessagesProof, TestPayload, TestRuntime, TokenConversionRate, - PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, + message, run_test, Event as TestEvent, Origin, TestMessageDeliveryAndDispatchPayment, + TestMessagesDeliveryProof, TestMessagesParameter, TestMessagesProof, TestPayload, TestRuntime, + TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, + TEST_RELAYER_B, }; - use bp_message_lane::UnrewardedRelayersState; + use bp_messages::UnrewardedRelayersState; use frame_support::{assert_noop, assert_ok}; - use frame_system::{EventRecord, Module as System, Phase}; + use frame_system::{EventRecord, Pallet as System, Phase}; use hex_literal::hex; use sp_runtime::DispatchError; @@ -861,7 +866,7 @@ mod tests { fn send_regular_message() { get_ready_for_events(); - assert_ok!(Module::::send_message( + assert_ok!(Pallet::::send_message( Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, @@ -873,7 +878,7 @@ mod tests { System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::pallet_message_lane(RawEvent::MessageAccepted(TEST_LANE_ID, 1)), + event: TestEvent::pallet_bridge_messages(RawEvent::MessageAccepted(TEST_LANE_ID, 1)), topics: vec![], }], ); @@ -886,7 +891,7 @@ mod tests { System::::set_block_number(1); System::::reset_events(); - assert_ok!(Module::::receive_messages_delivery_proof( + assert_ok!(Pallet::::receive_messages_delivery_proof( Origin::signed(1), TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, @@ -902,7 +907,7 @@ mod tests { System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::pallet_message_lane(RawEvent::MessagesDelivered(TEST_LANE_ID, 1, 1)), + event: TestEvent::pallet_bridge_messages(RawEvent::MessagesDelivered(TEST_LANE_ID, 1, 1)), topics: vec![], }], ); @@ -911,56 +916,56 @@ mod tests { #[test] fn pallet_owner_may_change_owner() { run_test(|| { - ModuleOwner::::put(2); + PalletOwner::::put(2); - assert_ok!(Module::::set_owner(Origin::root(), Some(1))); + assert_ok!(Pallet::::set_owner(Origin::root(), Some(1))); assert_noop!( - Module::::halt_operations(Origin::signed(2)), + Pallet::::set_operational(Origin::signed(2), false), DispatchError::BadOrigin, ); - assert_ok!(Module::::halt_operations(Origin::root())); + assert_ok!(Pallet::::set_operational(Origin::root(), false)); - assert_ok!(Module::::set_owner(Origin::signed(1), None)); + assert_ok!(Pallet::::set_owner(Origin::signed(1), None)); assert_noop!( - Module::::resume_operations(Origin::signed(1)), + Pallet::::set_operational(Origin::signed(1), true), DispatchError::BadOrigin, ); assert_noop!( - Module::::resume_operations(Origin::signed(2)), + Pallet::::set_operational(Origin::signed(2), true), DispatchError::BadOrigin, ); - assert_ok!(Module::::resume_operations(Origin::root())); + assert_ok!(Pallet::::set_operational(Origin::root(), true)); }); } #[test] fn pallet_may_be_halted_by_root() { run_test(|| { - assert_ok!(Module::::halt_operations(Origin::root())); - assert_ok!(Module::::resume_operations(Origin::root())); + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + assert_ok!(Pallet::::set_operational(Origin::root(), true)); }); } #[test] fn pallet_may_be_halted_by_owner() { run_test(|| { - ModuleOwner::::put(2); + PalletOwner::::put(2); - assert_ok!(Module::::halt_operations(Origin::signed(2))); - assert_ok!(Module::::resume_operations(Origin::signed(2))); + assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); + assert_ok!(Pallet::::set_operational(Origin::signed(2), true)); assert_noop!( - Module::::halt_operations(Origin::signed(1)), + Pallet::::set_operational(Origin::signed(1), false), DispatchError::BadOrigin, ); assert_noop!( - Module::::resume_operations(Origin::signed(1)), + Pallet::::set_operational(Origin::signed(1), true), DispatchError::BadOrigin, ); - assert_ok!(Module::::halt_operations(Origin::signed(2))); + assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); assert_noop!( - Module::::resume_operations(Origin::signed(1)), + Pallet::::set_operational(Origin::signed(1), true), DispatchError::BadOrigin, ); }); @@ -971,8 +976,8 @@ mod tests { run_test(|| { get_ready_for_events(); - let parameter = TestMessageLaneParameter::TokenConversionRate(10.into()); - assert_ok!(Module::::update_pallet_parameter( + let parameter = TestMessagesParameter::TokenConversionRate(10.into()); + assert_ok!(Pallet::::update_pallet_parameter( Origin::root(), parameter.clone(), )); @@ -982,7 +987,7 @@ mod tests { System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::pallet_message_lane(RawEvent::ParameterUpdated(parameter)), + event: TestEvent::pallet_bridge_messages(RawEvent::ParameterUpdated(parameter)), topics: vec![], }], ); @@ -992,11 +997,11 @@ mod tests { #[test] fn pallet_parameter_may_be_updated_by_owner() { run_test(|| { - ModuleOwner::::put(2); + PalletOwner::::put(2); get_ready_for_events(); - let parameter = TestMessageLaneParameter::TokenConversionRate(10.into()); - assert_ok!(Module::::update_pallet_parameter( + let parameter = TestMessagesParameter::TokenConversionRate(10.into()); + assert_ok!(Pallet::::update_pallet_parameter( Origin::signed(2), parameter.clone(), )); @@ -1006,7 +1011,7 @@ mod tests { System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::pallet_message_lane(RawEvent::ParameterUpdated(parameter)), + event: TestEvent::pallet_bridge_messages(RawEvent::ParameterUpdated(parameter)), topics: vec![], }], ); @@ -1017,19 +1022,19 @@ mod tests { fn pallet_parameter_cant_be_updated_by_arbitrary_submitter() { run_test(|| { assert_noop!( - Module::::update_pallet_parameter( + Pallet::::update_pallet_parameter( Origin::signed(2), - TestMessageLaneParameter::TokenConversionRate(10.into()), + TestMessagesParameter::TokenConversionRate(10.into()), ), DispatchError::BadOrigin, ); - ModuleOwner::::put(2); + PalletOwner::::put(2); assert_noop!( - Module::::update_pallet_parameter( + Pallet::::update_pallet_parameter( Origin::signed(1), - TestMessageLaneParameter::TokenConversionRate(10.into()), + TestMessagesParameter::TokenConversionRate(10.into()), ), DispatchError::BadOrigin, ); @@ -1070,7 +1075,7 @@ mod tests { IsHalted::::put(true); assert_noop!( - Module::::send_message( + Pallet::::send_message( Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, @@ -1080,7 +1085,7 @@ mod tests { ); assert_noop!( - Module::::receive_messages_proof( + Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Ok(vec![message(2, REGULAR_PAYLOAD)]).into(), @@ -1091,7 +1096,7 @@ mod tests { ); assert_noop!( - Module::::receive_messages_delivery_proof( + Pallet::::receive_messages_delivery_proof( Origin::signed(1), TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, @@ -1119,7 +1124,7 @@ mod tests { run_test(|| { // messages with this payload are rejected by target chain verifier assert_noop!( - Module::::send_message( + Pallet::::send_message( Origin::signed(1), TEST_LANE_ID, PAYLOAD_REJECTED_BY_TARGET_CHAIN, @@ -1135,7 +1140,7 @@ mod tests { run_test(|| { // messages with zero fee are rejected by lane verifier assert_noop!( - Module::::send_message(Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, 0), + Pallet::::send_message(Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, 0), Error::::MessageRejectedByLaneVerifier, ); }); @@ -1146,7 +1151,7 @@ mod tests { run_test(|| { TestMessageDeliveryAndDispatchPayment::reject_payments(); assert_noop!( - Module::::send_message( + Pallet::::send_message( Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, @@ -1160,7 +1165,7 @@ mod tests { #[test] fn receive_messages_proof_works() { run_test(|| { - assert_ok!(Module::::receive_messages_proof( + assert_ok!(Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), @@ -1186,7 +1191,7 @@ mod tests { }, ); assert_eq!( - Module::::inbound_unrewarded_relayers_state(TEST_LANE_ID), + Pallet::::inbound_unrewarded_relayers_state(TEST_LANE_ID), UnrewardedRelayersState { unrewarded_relayer_entries: 2, messages_in_oldest_entry: 1, @@ -1201,7 +1206,7 @@ mod tests { ..Default::default() }); - assert_ok!(Module::::receive_messages_proof( + assert_ok!(Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, message_proof, @@ -1219,7 +1224,7 @@ mod tests { }, ); assert_eq!( - Module::::inbound_unrewarded_relayers_state(TEST_LANE_ID), + Pallet::::inbound_unrewarded_relayers_state(TEST_LANE_ID), UnrewardedRelayersState { unrewarded_relayer_entries: 2, messages_in_oldest_entry: 1, @@ -1233,7 +1238,7 @@ mod tests { fn receive_messages_proof_rejects_invalid_dispatch_weight() { run_test(|| { assert_noop!( - Module::::receive_messages_proof( + Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), @@ -1249,7 +1254,7 @@ mod tests { fn receive_messages_proof_rejects_invalid_proof() { run_test(|| { assert_noop!( - Module::::receive_messages_proof( + Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Err(()).into(), @@ -1265,7 +1270,7 @@ mod tests { fn receive_messages_proof_rejects_proof_with_too_many_messages() { run_test(|| { assert_noop!( - Module::::receive_messages_proof( + Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), @@ -1293,13 +1298,13 @@ mod tests { #[test] fn receive_messages_delivery_proof_rewards_relayers() { run_test(|| { - assert_ok!(Module::::send_message( + assert_ok!(Pallet::::send_message( Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, 1000, )); - assert_ok!(Module::::send_message( + assert_ok!(Pallet::::send_message( Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, @@ -1307,7 +1312,7 @@ mod tests { )); // this reports delivery of message 1 => reward is paid to TEST_RELAYER_A - assert_ok!(Module::::receive_messages_delivery_proof( + assert_ok!(Pallet::::receive_messages_delivery_proof( Origin::signed(1), TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, @@ -1332,7 +1337,7 @@ mod tests { )); // this reports delivery of both message 1 and message 2 => reward is paid only to TEST_RELAYER_B - assert_ok!(Module::::receive_messages_delivery_proof( + assert_ok!(Pallet::::receive_messages_delivery_proof( Origin::signed(1), TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, @@ -1364,7 +1369,7 @@ mod tests { fn receive_messages_delivery_proof_rejects_invalid_proof() { run_test(|| { assert_noop!( - Module::::receive_messages_delivery_proof( + Pallet::::receive_messages_delivery_proof( Origin::signed(1), TestMessagesDeliveryProof(Err(())), Default::default(), @@ -1379,7 +1384,7 @@ mod tests { run_test(|| { // when number of relayers entires is invalid assert_noop!( - Module::::receive_messages_delivery_proof( + Pallet::::receive_messages_delivery_proof( Origin::signed(1), TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, @@ -1401,7 +1406,7 @@ mod tests { // when number of messages is invalid assert_noop!( - Module::::receive_messages_delivery_proof( + Pallet::::receive_messages_delivery_proof( Origin::signed(1), TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, @@ -1429,7 +1434,7 @@ mod tests { let mut invalid_message = message(1, REGULAR_PAYLOAD); invalid_message.data.payload = Vec::new(); - assert_ok!(Module::::receive_messages_proof( + assert_ok!(Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Ok(vec![invalid_message]).into(), @@ -1450,7 +1455,7 @@ mod tests { let mut invalid_message = message(2, REGULAR_PAYLOAD); invalid_message.data.payload = Vec::new(); - assert_ok!(Module::::receive_messages_proof( + assert_ok!(Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Ok(vec![ @@ -1474,9 +1479,12 @@ mod tests { fn storage_message_key_computed_properly() { // If this test fails, then something has been changed in module storage that is breaking all // previously crafted messages proofs. + let storage_key = storage_keys::message_key::(&*b"test", 42).0; assert_eq!( - storage_keys::message_key::(&*b"test", 42).0, - hex!("87f1ffe31b52878f09495ca7482df1a48a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(), + storage_key, + hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(), + "Unexpected storage key: {}", + hex::encode(&storage_key), ); } @@ -1484,9 +1492,12 @@ mod tests { fn outbound_lane_data_key_computed_properly() { // If this test fails, then something has been changed in module storage that is breaking all // previously crafted outbound lane state proofs. + let storage_key = storage_keys::outbound_lane_data_key::(&*b"test").0; assert_eq!( - storage_keys::outbound_lane_data_key::(&*b"test").0, - hex!("87f1ffe31b52878f09495ca7482df1a496c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(), + storage_key, + hex!("dd16c784ebd3390a9bc0357c7511ed0196c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(), + "Unexpected storage key: {}", + hex::encode(&storage_key), ); } @@ -1494,9 +1505,12 @@ mod tests { fn inbound_lane_data_key_computed_properly() { // If this test fails, then something has been changed in module storage that is breaking all // previously crafted inbound lane state proofs. + let storage_key = storage_keys::inbound_lane_data_key::(&*b"test").0; assert_eq!( - storage_keys::inbound_lane_data_key::(&*b"test").0, - hex!("87f1ffe31b52878f09495ca7482df1a4e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(), + storage_key, + hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(), + "Unexpected storage key: {}", + hex::encode(&storage_key), ); } @@ -1508,7 +1522,7 @@ mod tests { let message3 = message(2, TestPayload(0, Weight::MAX / 2)); assert_noop!( - Module::::receive_messages_proof( + Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, // this may cause overflow if source chain storage is invalid @@ -1528,7 +1542,7 @@ mod tests { receive_messages_delivery_proof(); assert_noop!( - Module::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), + Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), Error::::MessageIsAlreadyDelivered, ); }); @@ -1538,7 +1552,7 @@ mod tests { fn increase_message_fee_fails_if_message_is_not_yet_sent() { run_test(|| { assert_noop!( - Module::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), + Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), Error::::MessageIsNotYetSent, ); }); @@ -1552,7 +1566,7 @@ mod tests { TestMessageDeliveryAndDispatchPayment::reject_payments(); assert_noop!( - Module::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), + Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), Error::::FailedToWithdrawMessageFee, ); }); @@ -1563,7 +1577,7 @@ mod tests { run_test(|| { send_regular_message(); - assert_ok!(Module::::increase_message_fee( + assert_ok!(Pallet::::increase_message_fee( Origin::signed(1), TEST_LANE_ID, 1, diff --git a/polkadot/bridges/modules/message-lane/src/mock.rs b/polkadot/bridges/modules/messages/src/mock.rs similarity index 95% rename from polkadot/bridges/modules/message-lane/src/mock.rs rename to polkadot/bridges/modules/messages/src/mock.rs index 3fa10beef4..e640fa7805 100644 --- a/polkadot/bridges/modules/message-lane/src/mock.rs +++ b/polkadot/bridges/modules/messages/src/mock.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -19,13 +19,13 @@ use crate::Config; -use bp_message_lane::{ +use bp_messages::{ source_chain::{ LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, Sender, TargetHeaderChain, }, target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, - Parameter as MessageLaneParameter, + Parameter as MessagesParameter, }; use bp_runtime::Size; use codec::{Decode, Encode}; @@ -56,7 +56,7 @@ impl sp_runtime::traits::Convert for AccountIdConverter { type Block = frame_system::mocking::MockBlock; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -use crate as pallet_message_lane; +use crate as pallet_bridge_messages; frame_support::construct_runtime! { pub enum TestRuntime where @@ -66,7 +66,7 @@ frame_support::construct_runtime! { { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Event}, - MessageLane: pallet_message_lane::{Pallet, Call, Event}, + Messages: pallet_bridge_messages::{Pallet, Call, Event}, } } @@ -125,16 +125,14 @@ parameter_types! { } #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub enum TestMessageLaneParameter { +pub enum TestMessagesParameter { TokenConversionRate(FixedU128), } -impl MessageLaneParameter for TestMessageLaneParameter { +impl MessagesParameter for TestMessagesParameter { fn save(&self) { match *self { - TestMessageLaneParameter::TokenConversionRate(conversion_rate) => { - TokenConversionRate::set(&conversion_rate) - } + TestMessagesParameter::TokenConversionRate(conversion_rate) => TokenConversionRate::set(&conversion_rate), } } } @@ -142,7 +140,7 @@ impl MessageLaneParameter for TestMessageLaneParameter { impl Config for TestRuntime { type Event = Event; type WeightInfo = (); - type Parameter = TestMessageLaneParameter; + type Parameter = TestMessagesParameter; type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; @@ -391,7 +389,7 @@ pub fn message_data(payload: TestPayload) -> MessageData { } } -/// Run message lane test. +/// Run pallet test. pub fn run_test(test: impl FnOnce() -> T) -> T { let mut t = frame_system::GenesisConfig::default() .build_storage::() diff --git a/polkadot/bridges/modules/message-lane/src/outbound_lane.rs b/polkadot/bridges/modules/messages/src/outbound_lane.rs similarity index 98% rename from polkadot/bridges/modules/message-lane/src/outbound_lane.rs rename to polkadot/bridges/modules/messages/src/outbound_lane.rs index 8496d7f8c0..47616c33ea 100644 --- a/polkadot/bridges/modules/message-lane/src/outbound_lane.rs +++ b/polkadot/bridges/modules/messages/src/outbound_lane.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ //! Everything about outgoing messages sending. -use bp_message_lane::{LaneId, MessageData, MessageNonce, OutboundLaneData}; +use bp_messages::{LaneId, MessageData, MessageNonce, OutboundLaneData}; /// Outbound lane storage. pub trait OutboundLaneStorage { diff --git a/polkadot/bridges/modules/message-lane/src/weights.rs b/polkadot/bridges/modules/messages/src/weights.rs similarity index 78% rename from polkadot/bridges/modules/message-lane/src/weights.rs rename to polkadot/bridges/modules/messages/src/weights.rs index b0ec6522b2..0eecd0d846 100644 --- a/polkadot/bridges/modules/message-lane/src/weights.rs +++ b/polkadot/bridges/modules/messages/src/weights.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,26 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Autogenerated weights for pallet_message_lane +//! Autogenerated weights for pallet_bridge_messages //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.1 -//! DATE: 2021-02-11, STEPS: [50, ], REPEAT: 20 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-04-14, STEPS: [50, ], REPEAT: 20 //! LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled -//! CHAIN: Some("local"), DB CACHE: 128 +//! CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: // target/release/rialto-bridge-node // benchmark -// --chain=local +// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_message_lane +// --pallet=pallet_bridge_messages // --extrinsic=* // --execution=wasm // --wasm-execution=Compiled // --heap-pages=4096 -// --output=./modules/message-lane/src/weights.rs +// --output=./modules/messages/src/weights.rs // --template=./.maintain/rialto-weight-template.hbs #![allow(clippy::all)] @@ -46,7 +46,7 @@ use frame_support::{ }; use sp_std::marker::PhantomData; -/// Weight functions needed for pallet_message_lane. +/// Weight functions needed for pallet_bridge_messages. pub trait WeightInfo { fn send_minimal_message_worst_case() -> Weight; fn send_1_kb_message_worst_case() -> Weight; @@ -69,109 +69,109 @@ pub trait WeightInfo { fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight; } -/// Weights for pallet_message_lane using the Rialto node and recommended hardware. +/// Weights for pallet_bridge_messages using the Rialto node and recommended hardware. pub struct RialtoWeight(PhantomData); impl WeightInfo for RialtoWeight { fn send_minimal_message_worst_case() -> Weight { - (140_645_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (149_497_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn send_1_kb_message_worst_case() -> Weight { - (146_434_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (154_339_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn send_16_kb_message_worst_case() -> Weight { - (214_721_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (200_066_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn increase_message_fee() -> Weight { - (8_395_221_000 as Weight) + (6_432_637_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof() -> Weight { - (156_390_000 as Weight) + (141_671_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_two_messages_proof() -> Weight { - (269_316_000 as Weight) + (247_393_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (174_342_000 as Weight) + (159_312_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_1_kb() -> Weight { - (186_621_000 as Weight) + (167_935_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_16_kb() -> Weight { - (487_028_000 as Weight) + (449_846_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_single_message() -> Weight { - (144_893_000 as Weight) + (127_322_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (151_134_000 as Weight) + (134_120_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (212_650_000 as Weight) + (191_193_000 as Weight) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn send_messages_of_various_lengths(i: u32) -> Weight { - (88_670_000 as Weight) - .saturating_add((5_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (115_699_000 as Weight) + .saturating_add((3_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } fn receive_multiple_messages_proof(i: u32) -> Weight { (0 as Weight) - .saturating_add((125_956_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((113_551_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { - (462_389_000 as Weight) - .saturating_add((11_000 as Weight).saturating_mul(i as Weight)) + (458_731_000 as Weight) + .saturating_add((9_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { - (120_744_000 as Weight) - .saturating_add((8_000 as Weight).saturating_mul(i as Weight)) + (82_314_000 as Weight) + .saturating_add((7_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { - (0 as Weight) - .saturating_add((130_087_000 as Weight).saturating_mul(i as Weight)) + (16_766_000 as Weight) + .saturating_add((115_533_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { - (126_833_000 as Weight) - .saturating_add((7_793_000 as Weight).saturating_mul(i as Weight)) + (122_146_000 as Weight) + .saturating_add((6_789_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { - (71_269_000 as Weight) - .saturating_add((72_377_000 as Weight).saturating_mul(i as Weight)) + (155_671_000 as Weight) + .saturating_add((63_020_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) @@ -182,105 +182,105 @@ impl WeightInfo for RialtoWeight { // For backwards compatibility and tests impl WeightInfo for () { fn send_minimal_message_worst_case() -> Weight { - (140_645_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (149_497_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn send_1_kb_message_worst_case() -> Weight { - (146_434_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (154_339_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn send_16_kb_message_worst_case() -> Weight { - (214_721_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (200_066_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn increase_message_fee() -> Weight { - (8_395_221_000 as Weight) + (6_432_637_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_single_message_proof() -> Weight { - (156_390_000 as Weight) + (141_671_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_two_messages_proof() -> Weight { - (269_316_000 as Weight) + (247_393_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (174_342_000 as Weight) + (159_312_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_1_kb() -> Weight { - (186_621_000 as Weight) + (167_935_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_single_message_proof_16_kb() -> Weight { - (487_028_000 as Weight) + (449_846_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_single_message() -> Weight { - (144_893_000 as Weight) + (127_322_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (151_134_000 as Weight) + (134_120_000 as Weight) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (212_650_000 as Weight) + (191_193_000 as Weight) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn send_messages_of_various_lengths(i: u32) -> Weight { - (88_670_000 as Weight) - .saturating_add((5_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (115_699_000 as Weight) + .saturating_add((3_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } fn receive_multiple_messages_proof(i: u32) -> Weight { (0 as Weight) - .saturating_add((125_956_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((113_551_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { - (462_389_000 as Weight) - .saturating_add((11_000 as Weight).saturating_mul(i as Weight)) + (458_731_000 as Weight) + .saturating_add((9_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { - (120_744_000 as Weight) - .saturating_add((8_000 as Weight).saturating_mul(i as Weight)) + (82_314_000 as Weight) + .saturating_add((7_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { - (0 as Weight) - .saturating_add((130_087_000 as Weight).saturating_mul(i as Weight)) + (16_766_000 as Weight) + .saturating_add((115_533_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { - (126_833_000 as Weight) - .saturating_add((7_793_000 as Weight).saturating_mul(i as Weight)) + (122_146_000 as Weight) + .saturating_add((6_789_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { - (71_269_000 as Weight) - .saturating_add((72_377_000 as Weight).saturating_mul(i as Weight)) + (155_671_000 as Weight) + .saturating_add((63_020_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) diff --git a/polkadot/bridges/modules/message-lane/src/weights_ext.rs b/polkadot/bridges/modules/messages/src/weights_ext.rs similarity index 91% rename from polkadot/bridges/modules/message-lane/src/weights_ext.rs rename to polkadot/bridges/modules/messages/src/weights_ext.rs index d99a20007d..cb754a1023 100644 --- a/polkadot/bridges/modules/message-lane/src/weights_ext.rs +++ b/polkadot/bridges/modules/messages/src/weights_ext.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -18,7 +18,7 @@ use crate::weights::WeightInfo; -use bp_message_lane::{MessageNonce, UnrewardedRelayersState}; +use bp_messages::{MessageNonce, UnrewardedRelayersState}; use bp_runtime::{PreComputedSize, Size}; use frame_support::weights::Weight; @@ -95,9 +95,6 @@ pub fn ensure_able_to_receive_message( max_extrinsic_size: u32, max_extrinsic_weight: Weight, max_incoming_message_proof_size: u32, - // This is a base weight (which includes cost of tx itself, per-byte cost, adjusted per-byte cost) of single - // message delivery transaction that brings `max_incoming_message_proof_size` proof. - max_incoming_message_proof_base_weight: Weight, max_incoming_message_dispatch_weight: Weight, ) { // verify that we're able to receive proof of maximal-size message @@ -116,12 +113,9 @@ pub fn ensure_able_to_receive_message( 1, max_incoming_message_dispatch_weight, ); - let max_delivery_transaction_weight = - max_incoming_message_proof_base_weight.saturating_add(max_delivery_transaction_dispatch_weight); assert!( - max_delivery_transaction_weight <= max_extrinsic_weight, - "Weight of maximal message delivery transaction {} + {} is larger than maximal possible transaction weight {}", - max_delivery_transaction_weight, + max_delivery_transaction_dispatch_weight <= max_extrinsic_weight, + "Weight of maximal message delivery transaction + {} is larger than maximal possible transaction weight {}", max_delivery_transaction_dispatch_weight, max_extrinsic_weight, ); @@ -134,9 +128,6 @@ pub fn ensure_able_to_receive_confirmation( max_inbound_lane_data_proof_size_from_peer_chain: u32, max_unrewarded_relayer_entries_at_peer_inbound_lane: MessageNonce, max_unconfirmed_messages_at_inbound_lane: MessageNonce, - // This is a base weight (which includes cost of tx itself, per-byte cost, adjusted per-byte cost) of single - // confirmation transaction that brings `max_inbound_lane_data_proof_size_from_peer_chain` proof. - max_incoming_delivery_proof_base_weight: Weight, ) { // verify that we're able to receive confirmation of maximal-size let max_confirmation_transaction_size = @@ -158,12 +149,9 @@ pub fn ensure_able_to_receive_confirmation( ..Default::default() }, ); - let max_confirmation_transaction_weight = - max_incoming_delivery_proof_base_weight.saturating_add(max_confirmation_transaction_dispatch_weight); assert!( - max_confirmation_transaction_weight <= max_extrinsic_weight, - "Weight of maximal confirmation transaction {} + {} is larger than maximal possible transaction weight {}", - max_incoming_delivery_proof_base_weight, + max_confirmation_transaction_dispatch_weight <= max_extrinsic_weight, + "Weight of maximal confirmation transaction {} is larger than maximal possible transaction weight {}", max_confirmation_transaction_dispatch_weight, max_extrinsic_weight, ); diff --git a/polkadot/bridges/modules/shift-session-manager/src/lib.rs b/polkadot/bridges/modules/shift-session-manager/src/lib.rs index a463d868b1..0d867657af 100644 --- a/polkadot/bridges/modules/shift-session-manager/src/lib.rs +++ b/polkadot/bridges/modules/shift-session-manager/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -31,13 +31,13 @@ decl_module! { } decl_storage! { - trait Store for Module as ShiftSessionManager { + trait Store for Pallet as ShiftSessionManager { /// Validators of first two sessions. InitialValidators: Option>; } } -impl pallet_session::SessionManager for Module { +impl pallet_session::SessionManager for Pallet { fn end_session(_: sp_staking::SessionIndex) {} fn start_session(_: sp_staking::SessionIndex) {} fn new_session(session_index: sp_staking::SessionIndex) -> Option> { @@ -52,7 +52,7 @@ impl pallet_session::SessionManager for Module { // then for every session we select (deterministically) 2/3 of these initial // validators to serve validators of new session let available_validators = InitialValidators::::get().unwrap_or_else(|| { - let validators = >::validators(); + let validators = >::validators(); InitialValidators::::put(validators.clone()); validators }); @@ -61,7 +61,7 @@ impl pallet_session::SessionManager for Module { } } -impl Module { +impl Pallet { /// Select validators for session. fn select_validators( session_index: sp_staking::SessionIndex, @@ -210,19 +210,19 @@ mod tests { let all_accs = vec![1, 2, 3, 4, 5]; // at least 1 validator is selected - assert_eq!(Module::::select_validators(0, &[1]), vec![1],); + assert_eq!(Pallet::::select_validators(0, &[1]), vec![1],); // at session#0, shift is also 0 - assert_eq!(Module::::select_validators(0, &all_accs), vec![1, 2, 3],); + assert_eq!(Pallet::::select_validators(0, &all_accs), vec![1, 2, 3],); // at session#1, shift is also 1 - assert_eq!(Module::::select_validators(1, &all_accs), vec![2, 3, 4],); + assert_eq!(Pallet::::select_validators(1, &all_accs), vec![2, 3, 4],); // at session#3, we're wrapping - assert_eq!(Module::::select_validators(3, &all_accs), vec![4, 5, 1],); + assert_eq!(Pallet::::select_validators(3, &all_accs), vec![4, 5, 1],); // at session#5, we're starting from the beginning again - assert_eq!(Module::::select_validators(5, &all_accs), vec![1, 2, 3],); + assert_eq!(Pallet::::select_validators(5, &all_accs), vec![1, 2, 3],); }); } } diff --git a/polkadot/bridges/modules/substrate/src/fork_tests.rs b/polkadot/bridges/modules/substrate/src/fork_tests.rs deleted file mode 100644 index 445ffd8ce5..0000000000 --- a/polkadot/bridges/modules/substrate/src/fork_tests.rs +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests for checking that behaviour of importing headers and finality proofs works correctly. -//! -//! The tests are built around the idea that we will be importing headers on different forks and we -//! should be able to check that we're correctly importing headers, scheduling changes, and -//! finalizing headers across different forks. -//! -//! Each test is depicted using beautiful ASCII art. The symbols used in the tests are the -//! following: -//! -//! - S|N: Schedules change in N blocks -//! - E: Enacts change -//! - F: Finalized -//! - FN: Finality proof imported for header N -//! -//! Each diagram also comes with an import order. This is important since we expect things to fail -//! when headers or proofs are imported in a certain order. -//! -//! Tests can be read as follows: -//! -//! ## Example Import 1 -//! -//! (Type::Header(2, 1, None, None), Ok(())) -//! -//! Import header 2 on fork 1. This does not create a fork, or schedule an authority set change. We -//! expect this header import to be succesful. -//! -//! ## Example Import 2 -//! -//! (Type::Header(4, 2, Some((3, 1)), Some(0)), Ok(())) -//! -//! Import header 4 on fork 2. This header starts a new fork from header 3 on fork 1. It also -//! schedules a change with a delay of 0 blocks. It should be succesfully imported. -//! -//! ## Example Import 3 -//! -//! (Type::Finality(2, 1), Err(FinalizationError::OldHeader.into())) -//! -//! Import a finality proof for header 2 on fork 1. This finalty proof should fail to be imported -//! because the header is an old header. - -use crate::mock::*; -use crate::storage::ImportedHeader; -use crate::verifier::*; -use crate::{BestFinalized, BestHeight, BridgeStorage, NextScheduledChange, PalletStorage}; -use bp_header_chain::AuthoritySet; -use bp_test_utils::{alice, authority_list, bob, make_justification_for_header}; -use codec::Encode; -use frame_support::{IterableStorageMap, StorageValue}; -use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; -use sp_runtime::{Digest, DigestItem}; -use std::collections::BTreeMap; - -type ForkId = u64; -type Delay = u64; - -// Indicates when to start a new fork. The first item in the tuple -// will be the parent header of the header starting this fork. -type ForksAt = Option<(TestNumber, ForkId)>; -type ScheduledChangeAt = Option; - -#[derive(Debug)] -enum Type { - Header(TestNumber, ForkId, ForksAt, ScheduledChangeAt), - Finality(TestNumber, ForkId), -} - -// Order: 1, 2, 2', 3, 3'' -// -// / [3''] -// / [2'] -// [1] <- [2] <- [3] -#[test] -fn fork_can_import_headers_on_different_forks() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, None), Ok(())), - (Type::Header(2, 2, Some((1, 1)), None), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - (Type::Header(3, 3, Some((2, 2)), None), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - - let best_headers = storage.best_headers(); - assert_eq!(best_headers.len(), 2); - assert_eq!(>::get(), 3); - }) -} - -// Order: 1, 2, 2', F2, F2' -// -// [1] <- [2: F] -// \ [2'] -// -// Not allowed to finalize 2' -#[test] -fn fork_does_not_allow_competing_finality_proofs() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, None), Ok(())), - (Type::Header(2, 2, Some((1, 1)), None), Ok(())), - (Type::Finality(2, 1), Ok(())), - (Type::Finality(2, 2), Err(FinalizationError::OldHeader.into())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, 3, F2, 3 -// -// [1] <- [2: S|0] <- [3] -// -// Not allowed to import 3 until we get F2 -// -// Note: GRANDPA would technically allow 3 to be imported as long as it didn't try and enact an -// authority set change. However, since we expect finality proofs to be imported quickly we've -// decided to simplify our import process and disallow header imports until we get a finality proof. -#[test] -fn fork_waits_for_finality_proof_before_importing_header_past_one_which_enacts_a_change() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - ( - Type::Header(3, 1, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - (Type::Finality(2, 1), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, F2, 3 -// -// [1] <- [2: S|1] <- [3: S|0] -// -// GRANDPA can have multiple authority set changes pending on the same fork. However, we've decided -// to introduce a limit of _one_ pending authority set change per fork in order to simplify pallet -// logic and to prevent DoS attacks if GRANDPA finality were to temporarily stall for a long time -// (we'd have to perform a lot of expensive ancestry checks to catch back up). -#[test] -fn fork_does_not_allow_multiple_scheduled_changes_on_the_same_fork() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(1)), Ok(())), - ( - Type::Header(3, 1, None, Some(0)), - Err(ImportError::PendingAuthoritySetChange.into()), - ), - (Type::Finality(2, 1), Ok(())), - (Type::Header(3, 1, None, Some(0)), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, 2' -// -// / [2': S|0] -// [1] <- [2: S|0] -// -// Both 2 and 2' should be marked as needing justifications since they enact changes. -#[test] -fn fork_correctly_tracks_which_headers_require_finality_proofs() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - (Type::Header(2, 2, Some((1, 1)), Some(0)), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - - let header_ids = storage.missing_justifications(); - assert_eq!(header_ids.len(), 2); - assert!(header_ids[0].hash != header_ids[1].hash); - assert_eq!(header_ids[0].number, 2); - assert_eq!(header_ids[1].number, 2); - }) -} - -// Order: 1, 2, 2', 3', F2, 3, 4' -// -// / [2': S|1] <- [3'] <- [4'] -// [1] <- [2: S|0] <- [3] -// -// -// Not allowed to import 3 or 4' -// Can only import 3 after we get the finality proof for 2 -#[test] -fn fork_does_not_allow_importing_past_header_that_enacts_changes_on_forks() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - (Type::Header(2, 2, Some((1, 1)), Some(1)), Ok(())), - ( - Type::Header(3, 1, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - (Type::Header(3, 2, None, None), Ok(())), - (Type::Finality(2, 1), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - ( - Type::Header(4, 2, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - ]; - - create_chain(&mut storage, &mut chain); - - // Since we can't query the map directly to check if we applied the right authority set - // change (we don't know the header hash of 2) we need to get a little clever. - let mut next_change = >::iter(); - let (_, scheduled_change_on_fork) = next_change.next().unwrap(); - assert_eq!(scheduled_change_on_fork.height, 3); - - // Sanity check to make sure we enacted the change on the canonical change - assert_eq!(next_change.next(), None); - }) -} - -// Order: 1, 2, 3, 2', 3' -// -// / [2'] <- [3'] -// [1] <- [2: S|0] <- [3] -// -// Not allowed to import 3 -// Fine to import 2' and 3' -#[test] -fn fork_allows_importing_on_different_fork_while_waiting_for_finality_proof() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - ( - Type::Header(3, 1, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - (Type::Header(2, 2, Some((1, 1)), None), Ok(())), - (Type::Header(3, 2, None, None), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, 2', F2, 3, 3' -// -// / [2'] <- [3'] -// [1] <- [2: F] <- [3] -// -// In our current implementation we're allowed to keep building on fork 2 for as long as our hearts' -// content. However, we'll never be able to finalize anything on that fork. We'd have to check for -// ancestry with `best_finalized` on every import which will get expensive. -// -// I think this is fine as long as we run pruning every so often to clean up these dead forks. -#[test] -fn fork_allows_importing_on_different_fork_past_finalized_header() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - (Type::Header(2, 2, Some((1, 1)), None), Ok(())), - (Type::Finality(2, 1), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - (Type::Header(3, 2, None, None), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, 3, 4, 3', 4' -// -// / [3': E] <- [4'] -// [1] <- [2: S|1] <- [3: E] <- [4] -// -// Not allowed to import {4|4'} -#[test] -fn fork_can_track_scheduled_changes_across_forks() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(1)), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - ( - Type::Header(4, 1, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - (Type::Header(3, 2, Some((2, 1)), None), Ok(())), - ( - Type::Header(4, 2, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -#[derive(Debug, PartialEq)] -enum TestError { - Import(ImportError), - Finality(FinalizationError), -} - -impl From for TestError { - fn from(e: ImportError) -> Self { - TestError::Import(e) - } -} - -impl From for TestError { - fn from(e: FinalizationError) -> Self { - TestError::Finality(e) - } -} - -// Builds a fork-aware representation of a blockchain given a list of headers. -// -// Takes a list of headers and finality proof operations which will be applied in order. The -// expected outcome for each operation is also required. -// -// The first header in the list will be used as the genesis header and will be manually imported -// into storage. -fn create_chain(storage: &mut S, chain: &mut Vec<(Type, Result<(), TestError>)>) -where - S: BridgeStorage
+ Clone, -{ - let mut map = BTreeMap::new(); - let mut verifier = Verifier { - storage: storage.clone(), - }; - initialize_genesis(storage, &mut map, chain.remove(0).0); - - for h in chain { - match h { - (Type::Header(num, fork_id, does_fork, schedules_change), expected_result) => { - // If we've never seen this fork before - if !map.contains_key(&fork_id) { - // Let's get the info about where to start the fork - if let Some((parent_num, forked_from_id)) = does_fork { - let fork = &*map.get(&forked_from_id).unwrap(); - let parent = fork - .iter() - .find(|h| h.number == *parent_num) - .expect("Trying to fork on a parent which doesn't exist"); - - let mut header = test_header(*num); - header.parent_hash = parent.hash(); - header.state_root = [*fork_id as u8; 32].into(); - - if let Some(delay) = schedules_change { - header.digest = change_log(*delay); - } - - // Try and import into storage - let res = verifier - .import_header(header.hash(), header.clone()) - .map_err(TestError::Import); - assert_eq!( - res, *expected_result, - "Expected {:?} while importing header ({}, {}), got {:?}", - *expected_result, *num, *fork_id, res, - ); - - // Let's mark the header down in a new fork - if res.is_ok() { - map.insert(*fork_id, vec![header]); - } - } - } else { - // We've seen this fork before so let's append our new header to it - let parent_hash = { - let fork = &*map.get(&fork_id).unwrap(); - fork.last().unwrap().hash() - }; - - let mut header = test_header(*num); - header.parent_hash = parent_hash; - - // Doing this to make sure headers at the same height but on - // different forks have different hashes - header.state_root = [*fork_id as u8; 32].into(); - - if let Some(delay) = schedules_change { - header.digest = change_log(*delay); - } - - let res = verifier - .import_header(header.hash(), header.clone()) - .map_err(TestError::Import); - assert_eq!( - res, *expected_result, - "Expected {:?} while importing header ({}, {}), got {:?}", - *expected_result, *num, *fork_id, res, - ); - - if res.is_ok() { - map.get_mut(&fork_id).unwrap().push(header); - } - } - } - (Type::Finality(num, fork_id), expected_result) => { - let header = map[fork_id] - .iter() - .find(|h| h.number == *num) - .expect("Trying to finalize block that doesn't exist"); - - // This is technically equivocating (accepting the same justification on the same - // `grandpa_round`). - // - // See for more: https://github.com/paritytech/parity-bridges-common/issues/430 - let grandpa_round = 1; - let set_id = 1; - let authorities = authority_list(); - let justification = make_justification_for_header(header, grandpa_round, set_id, &authorities).encode(); - - let res = verifier - .import_finality_proof(header.hash(), justification.into()) - .map_err(TestError::Finality); - assert_eq!( - res, *expected_result, - "Expected {:?} while importing finality proof for header ({}, {}), got {:?}", - *expected_result, *num, *fork_id, res, - ); - } - } - } - - for (key, value) in map.iter() { - println!("{}: {:#?}", key, value); - } -} - -fn initialize_genesis(storage: &mut S, map: &mut BTreeMap>, genesis: Type) -where - S: BridgeStorage
, -{ - if let Type::Header(num, fork_id, None, None) = genesis { - let genesis = test_header(num); - map.insert(fork_id, vec![genesis.clone()]); - - let genesis = ImportedHeader { - header: genesis, - requires_justification: false, - is_finalized: true, - signal_hash: None, - }; - - >::put(genesis.hash()); - storage.write_header(&genesis); - } else { - panic!("Unexpected genesis block format {:#?}", genesis) - } - - let set_id = 1; - let authorities = authority_list(); - let authority_set = AuthoritySet::new(authorities, set_id); - storage.update_current_authority_set(authority_set); -} - -pub(crate) fn change_log(delay: u64) -> Digest { - let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { - next_authorities: vec![(alice(), 1), (bob(), 1)], - delay, - }); - - Digest:: { - logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], - } -} diff --git a/polkadot/bridges/modules/substrate/src/lib.rs b/polkadot/bridges/modules/substrate/src/lib.rs deleted file mode 100644 index c14db8596f..0000000000 --- a/polkadot/bridges/modules/substrate/src/lib.rs +++ /dev/null @@ -1,1013 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate Bridge Pallet -//! -//! This pallet is an on-chain light client for chains which have a notion of finality. -//! -//! It has a simple interface for achieving this. First it can import headers to the runtime -//! storage. During this it will check the validity of the headers and ensure they don't conflict -//! with any existing headers (e.g they're on a different finalized chain). Secondly it can finalize -//! an already imported header (and its ancestors) given a valid GRANDPA justification. -//! -//! With these two functions the pallet is able to form a "source of truth" for what headers have -//! been finalized on a given Substrate chain. This can be a useful source of info for other -//! higher-level applications. - -#![cfg_attr(not(feature = "std"), no_std)] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] - -use crate::storage::ImportedHeader; -use bp_header_chain::AuthoritySet; -use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; -use frame_support::{ - decl_error, decl_module, decl_storage, dispatch::DispatchResult, ensure, traits::Get, weights::DispatchClass, -}; -use frame_system::{ensure_signed, RawOrigin}; -use sp_runtime::traits::Header as HeaderT; -use sp_runtime::{traits::BadOrigin, RuntimeDebug}; -use sp_std::{marker::PhantomData, prelude::*}; -use sp_trie::StorageProof; - -// Re-export since the node uses these when configuring genesis -pub use storage::{InitializationData, ScheduledChange}; - -pub use storage_proof::StorageProofChecker; - -mod storage; -mod storage_proof; -mod verifier; - -#[cfg(test)] -mod mock; - -#[cfg(test)] -mod fork_tests; - -/// Block number of the bridged chain. -pub(crate) type BridgedBlockNumber = BlockNumberOf<::BridgedChain>; -/// Block hash of the bridged chain. -pub(crate) type BridgedBlockHash = HashOf<::BridgedChain>; -/// Hasher of the bridged chain. -pub(crate) type BridgedBlockHasher = HasherOf<::BridgedChain>; -/// Header of the bridged chain. -pub(crate) type BridgedHeader = HeaderOf<::BridgedChain>; - -/// A convenience type identifying headers. -#[derive(RuntimeDebug, PartialEq)] -pub struct HeaderId { - /// The block number of the header. - pub number: H::Number, - /// The hash of the header. - pub hash: H::Hash, -} - -pub trait Config: frame_system::Config { - /// Chain that we are bridging here. - type BridgedChain: Chain; -} - -decl_storage! { - trait Store for Module as SubstrateBridge { - /// Hash of the header used to bootstrap the pallet. - InitialHash: BridgedBlockHash; - /// The number of the highest block(s) we know of. - BestHeight: BridgedBlockNumber; - /// Hash of the header at the highest known height. - /// - /// If there are multiple headers at the same "best" height - /// this will contain all of their hashes. - BestHeaders: Vec>; - /// Hash of the best finalized header. - BestFinalized: BridgedBlockHash; - /// The set of header IDs (number, hash) which enact an authority set change and therefore - /// require a GRANDPA justification. - RequiresJustification: map hasher(identity) BridgedBlockHash => BridgedBlockNumber; - /// Headers which have been imported into the pallet. - ImportedHeaders: map hasher(identity) BridgedBlockHash => Option>>; - /// The current GRANDPA Authority set. - CurrentAuthoritySet: AuthoritySet; - /// The next scheduled authority set change for a given fork. - /// - /// The fork is indicated by the header which _signals_ the change (key in the mapping). - /// Note that this is different than a header which _enacts_ a change. - // GRANDPA doesn't require there to always be a pending change. In fact, most of the time - // there will be no pending change available. - NextScheduledChange: map hasher(identity) BridgedBlockHash => Option>>; - /// Optional pallet owner. - /// - /// Pallet owner has a right to halt all pallet operations and then resume it. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). - ModuleOwner get(fn module_owner): Option; - /// If true, all pallet transactions are failed immediately. - IsHalted get(fn is_halted): bool; - } - add_extra_genesis { - config(owner): Option; - config(init_data): Option>>; - build(|config| { - if let Some(ref owner) = config.owner { - >::put(owner); - } - - if let Some(init_data) = config.init_data.clone() { - initialize_bridge::(init_data); - } else { - // Since the bridge hasn't been initialized we shouldn't allow anyone to perform - // transactions. - IsHalted::put(true); - } - }) - } -} - -decl_error! { - pub enum Error for Module { - /// This header has failed basic verification. - InvalidHeader, - /// This header has not been finalized. - UnfinalizedHeader, - /// The header is unknown. - UnknownHeader, - /// The storage proof doesn't contains storage root. So it is invalid for given header. - StorageRootMismatch, - /// Error when trying to fetch storage value from the proof. - StorageValueUnavailable, - /// All pallet operations are halted. - Halted, - /// The pallet has already been initialized. - AlreadyInitialized, - /// The given header is not a descendant of a particular header. - NotDescendant, - } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// Import a signed Substrate header into the runtime. - /// - /// This will perform some basic checks to make sure it is fine to - /// import into the runtime. However, it does not perform any checks - /// related to finality. - // TODO: Update weights [#78] - #[weight = 0] - pub fn import_signed_header( - origin, - header: BridgedHeader, - ) -> DispatchResult { - ensure_operational::()?; - let _ = ensure_signed(origin)?; - let hash = header.hash(); - frame_support::debug::trace!("Going to import header {:?}: {:?}", hash, header); - - let mut verifier = verifier::Verifier { - storage: PalletStorage::::new(), - }; - - let _ = verifier - .import_header(hash, header) - .map_err(|e| { - frame_support::debug::error!("Failed to import header {:?}: {:?}", hash, e); - >::InvalidHeader - })?; - - frame_support::debug::trace!("Successfully imported header: {:?}", hash); - - Ok(()) - } - - /// Import a finalty proof for a particular header. - /// - /// This will take care of finalizing any already imported headers - /// which get finalized when importing this particular proof, as well - /// as updating the current and next validator sets. - // TODO: Update weights [#78] - #[weight = 0] - pub fn finalize_header( - origin, - hash: BridgedBlockHash, - finality_proof: Vec, - ) -> DispatchResult { - ensure_operational::()?; - let _ = ensure_signed(origin)?; - frame_support::debug::trace!("Going to finalize header: {:?}", hash); - - let mut verifier = verifier::Verifier { - storage: PalletStorage::::new(), - }; - - let _ = verifier - .import_finality_proof(hash, finality_proof.into()) - .map_err(|e| { - frame_support::debug::error!("Failed to finalize header {:?}: {:?}", hash, e); - >::UnfinalizedHeader - })?; - - frame_support::debug::trace!("Successfully finalized header: {:?}", hash); - - Ok(()) - } - - /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. - /// - /// The initial configuration provided does not need to be the genesis header of the bridged - /// chain, it can be any arbirary header. You can also provide the next scheduled set change - /// if it is already know. - /// - /// This function is only allowed to be called from a trusted origin and writes to storage - /// with practically no checks in terms of the validity of the data. It is important that - /// you ensure that valid data is being passed in. - //TODO: Update weights [#78] - #[weight = 0] - pub fn initialize( - origin, - init_data: InitializationData>, - ) { - ensure_owner_or_root::(origin)?; - let init_allowed = !>::exists(); - ensure!(init_allowed, >::AlreadyInitialized); - initialize_bridge::(init_data.clone()); - - frame_support::debug::info!( - "Pallet has been initialized with the following parameters: {:?}", init_data - ); - } - - /// Change `ModuleOwner`. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn set_owner(origin, new_owner: Option) { - ensure_owner_or_root::(origin)?; - match new_owner { - Some(new_owner) => { - ModuleOwner::::put(&new_owner); - frame_support::debug::info!("Setting pallet Owner to: {:?}", new_owner); - }, - None => { - ModuleOwner::::kill(); - frame_support::debug::info!("Removed Owner of pallet."); - }, - } - } - - /// Halt all pallet operations. Operations may be resumed using `resume_operations` call. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn halt_operations(origin) { - ensure_owner_or_root::(origin)?; - IsHalted::put(true); - frame_support::debug::warn!("Stopping pallet operations."); - } - - /// Resume all pallet operations. May be called even if pallet is halted. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn resume_operations(origin) { - ensure_owner_or_root::(origin)?; - IsHalted::put(false); - frame_support::debug::info!("Resuming pallet operations."); - } - } -} - -impl Module { - /// Get the highest header(s) that the pallet knows of. - pub fn best_headers() -> Vec<(BridgedBlockNumber, BridgedBlockHash)> { - PalletStorage::::new() - .best_headers() - .iter() - .map(|id| (id.number, id.hash)) - .collect() - } - - /// Get the best finalized header the pallet knows of. - /// - /// Returns a dummy header if there is no best header. This can only happen - /// if the pallet has not been initialized yet. - /// - /// Since this has been finalized correctly a user of the bridge - /// pallet should be confident that any transactions that were - /// included in this or any previous header will not be reverted. - pub fn best_finalized() -> BridgedHeader { - PalletStorage::::new().best_finalized_header().header - } - - /// Check if a particular header is known to the bridge pallet. - pub fn is_known_header(hash: BridgedBlockHash) -> bool { - PalletStorage::::new().header_exists(hash) - } - - /// Check if a particular header is finalized. - /// - /// Will return false if the header is not known to the pallet. - // One thing worth noting here is that this approach won't work well - // once we track forks since there could be an older header on a - // different fork which isn't an ancestor of our best finalized header. - pub fn is_finalized_header(hash: BridgedBlockHash) -> bool { - let storage = PalletStorage::::new(); - if let Some(header) = storage.header_by_hash(hash) { - header.is_finalized - } else { - false - } - } - - /// Returns a list of headers which require finality proofs. - /// - /// These headers require proofs because they enact authority set changes. - pub fn require_justifications() -> Vec<(BridgedBlockNumber, BridgedBlockHash)> { - PalletStorage::::new() - .missing_justifications() - .iter() - .map(|id| (id.number, id.hash)) - .collect() - } - - /// Verify that the passed storage proof is valid, given it is crafted using - /// known finalized header. If the proof is valid, then the `parse` callback - /// is called and the function returns its result. - pub fn parse_finalized_storage_proof( - finalized_header_hash: BridgedBlockHash, - storage_proof: StorageProof, - parse: impl FnOnce(StorageProofChecker>) -> R, - ) -> Result { - let storage = PalletStorage::::new(); - let header = storage - .header_by_hash(finalized_header_hash) - .ok_or(Error::::UnknownHeader)?; - if !header.is_finalized { - return Err(Error::::UnfinalizedHeader.into()); - } - - let storage_proof_checker = - StorageProofChecker::new(*header.state_root(), storage_proof).map_err(Error::::from)?; - Ok(parse(storage_proof_checker)) - } -} - -impl bp_header_chain::HeaderChain, sp_runtime::DispatchError> for Module { - fn best_finalized() -> BridgedHeader { - PalletStorage::::new().best_finalized_header().header - } - - fn authority_set() -> AuthoritySet { - PalletStorage::::new().current_authority_set() - } - - fn append_header(header: BridgedHeader) { - import_header_unchecked::<_, T>(&mut PalletStorage::::new(), header); - } -} - -/// Import a finalized header without checking if this is true. -/// -/// This function assumes that all the given header has already been proven to be valid and -/// finalized. Using this assumption it will write them to storage with minimal checks. That -/// means it's of great importance that this function *not* called with any headers whose -/// finality has not been checked, otherwise you risk bricking your bridge. -/// -/// One thing this function does do for you is GRANDPA authority set handoffs. However, since it -/// does not do verification on the incoming header it will assume that the authority set change -/// signals in the digest are well formed. -fn import_header_unchecked(storage: &mut S, header: BridgedHeader) -where - S: BridgeStorage
>, - T: Config, -{ - // Since we want to use the existing storage infrastructure we need to indicate the fork - // that we're on. We will assume that since we are using the unchecked import there are no - // forks, and can indicate that by using the first imported header's "fork". - let dummy_fork_hash = >::get(); - - // If we have a pending change in storage let's check if the current header enacts it. - let enact_change = if let Some(pending_change) = storage.scheduled_set_change(dummy_fork_hash) { - pending_change.height == *header.number() - } else { - // We don't have a scheduled change in storage at the moment. Let's check if the current - // header signals an authority set change. - if let Some(change) = verifier::find_scheduled_change(&header) { - let next_set = AuthoritySet { - authorities: change.next_authorities, - set_id: storage.current_authority_set().set_id + 1, - }; - - let height = *header.number() + change.delay; - let scheduled_change = ScheduledChange { - authority_set: next_set, - height, - }; - - storage.schedule_next_set_change(dummy_fork_hash, scheduled_change); - - // If the delay is 0 this header will enact the change it signaled - height == *header.number() - } else { - false - } - }; - - if enact_change { - const ENACT_SET_PROOF: &str = "We only set `enact_change` as `true` if we are sure that there is a scheduled - authority set change in storage. Therefore, it must exist."; - - // If we are unable to enact an authority set it means our storage entry for scheduled - // changes is missing. Best to crash since this is likely a bug. - let _ = storage.enact_authority_set(dummy_fork_hash).expect(ENACT_SET_PROOF); - } - - storage.update_best_finalized(header.hash()); - - storage.write_header(&ImportedHeader { - header, - requires_justification: false, - is_finalized: true, - signal_hash: None, - }); -} - -/// Ensure that the origin is either root, or `ModuleOwner`. -fn ensure_owner_or_root(origin: T::Origin) -> Result<(), BadOrigin> { - match origin.into() { - Ok(RawOrigin::Root) => Ok(()), - Ok(RawOrigin::Signed(ref signer)) if Some(signer) == >::module_owner().as_ref() => Ok(()), - _ => Err(BadOrigin), - } -} - -/// Ensure that the pallet is in operational mode (not halted). -fn ensure_operational() -> Result<(), Error> { - if IsHalted::get() { - Err(>::Halted) - } else { - Ok(()) - } -} - -/// (Re)initialize bridge with given header for using it in external benchmarks. -#[cfg(feature = "runtime-benchmarks")] -pub fn initialize_for_benchmarks(header: HeaderOf) { - initialize_bridge::(InitializationData { - header, - authority_list: Vec::new(), // we don't verify any proofs in external benchmarks - set_id: 0, - scheduled_change: None, - is_halted: false, - }); -} - -/// Since this writes to storage with no real checks this should only be used in functions that were -/// called by a trusted origin. -fn initialize_bridge(init_params: InitializationData>) { - let InitializationData { - header, - authority_list, - set_id, - scheduled_change, - is_halted, - } = init_params; - - let initial_hash = header.hash(); - - let mut signal_hash = None; - if let Some(ref change) = scheduled_change { - assert!( - change.height > *header.number(), - "Changes must be scheduled past initial header." - ); - - signal_hash = Some(initial_hash); - >::insert(initial_hash, change); - }; - - >::put(initial_hash); - >::put(header.number()); - >::put(vec![initial_hash]); - >::put(initial_hash); - - let authority_set = AuthoritySet::new(authority_list, set_id); - CurrentAuthoritySet::put(authority_set); - - >::insert( - initial_hash, - ImportedHeader { - header, - requires_justification: false, - is_finalized: true, - signal_hash, - }, - ); - - IsHalted::put(is_halted); -} - -/// Expected interface for interacting with bridge pallet storage. -// TODO: This should be split into its own less-Substrate-dependent crate -pub trait BridgeStorage { - /// The header type being used by the pallet. - type Header: HeaderT; - - /// Write a header to storage. - fn write_header(&mut self, header: &ImportedHeader); - - /// Get the header(s) at the highest known height. - fn best_headers(&self) -> Vec>; - - /// Get the best finalized header the pallet knows of. - /// - /// Returns None if there is no best header. This can only happen if the pallet - /// has not been initialized yet. - fn best_finalized_header(&self) -> ImportedHeader; - - /// Update the best finalized header the pallet knows of. - fn update_best_finalized(&self, hash: ::Hash); - - /// Check if a particular header is known to the pallet. - fn header_exists(&self, hash: ::Hash) -> bool; - - /// Returns a list of headers which require justifications. - /// - /// A header will require a justification if it enacts a new authority set. - fn missing_justifications(&self) -> Vec>; - - /// Get a specific header by its hash. - /// - /// Returns None if it is not known to the pallet. - fn header_by_hash(&self, hash: ::Hash) -> Option>; - - /// Get the current GRANDPA authority set. - fn current_authority_set(&self) -> AuthoritySet; - - /// Update the current GRANDPA authority set. - /// - /// Should only be updated when a scheduled change has been triggered. - fn update_current_authority_set(&self, new_set: AuthoritySet); - - /// Replace the current authority set with the next scheduled set. - /// - /// Returns an error if there is no scheduled authority set to enact. - #[allow(clippy::result_unit_err)] - fn enact_authority_set(&mut self, signal_hash: ::Hash) -> Result<(), ()>; - - /// Get the next scheduled GRANDPA authority set change. - fn scheduled_set_change( - &self, - signal_hash: ::Hash, - ) -> Option::Number>>; - - /// Schedule a GRANDPA authority set change in the future. - /// - /// Takes the hash of the header which scheduled this particular change. - fn schedule_next_set_change( - &mut self, - signal_hash: ::Hash, - next_change: ScheduledChange<::Number>, - ); -} - -/// Used to interact with the pallet storage in a more abstract way. -#[derive(Default, Clone)] -pub struct PalletStorage(PhantomData); - -impl PalletStorage { - fn new() -> Self { - Self(PhantomData::::default()) - } -} - -impl BridgeStorage for PalletStorage { - type Header = BridgedHeader; - - fn write_header(&mut self, header: &ImportedHeader>) { - use core::cmp::Ordering; - - let hash = header.hash(); - let current_height = header.number(); - let best_height = >::get(); - - match current_height.cmp(&best_height) { - Ordering::Equal => { - // Want to avoid duplicates in the case where we're writing a finalized header to - // storage which also happens to be at the best height the best height - let not_duplicate = !>::contains_key(hash); - if not_duplicate { - >::append(hash); - } - } - Ordering::Greater => { - >::kill(); - >::append(hash); - >::put(current_height); - } - Ordering::Less => { - // This is fine. We can still have a valid header, but it might just be on a - // different fork and at a lower height than the "best" overall header. - } - } - - if header.requires_justification { - >::insert(hash, current_height); - } else { - // If the key doesn't exist this is a no-op, so it's fine to call it often - >::remove(hash); - } - - >::insert(hash, header); - } - - fn best_headers(&self) -> Vec>> { - let number = >::get(); - >::get() - .iter() - .map(|hash| HeaderId { number, hash: *hash }) - .collect() - } - - fn best_finalized_header(&self) -> ImportedHeader> { - // We will only construct a dummy header if the pallet is not initialized and someone tries - // to use the public module interface (not dispatchables) to get the best finalized header. - // This is an edge case since this can only really happen when bootstrapping the bridge. - let hash = >::get(); - self.header_by_hash(hash).unwrap_or_else(|| ImportedHeader { - header: >::new( - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ), - requires_justification: false, - is_finalized: false, - signal_hash: None, - }) - } - - fn update_best_finalized(&self, hash: BridgedBlockHash) { - >::put(hash); - } - - fn header_exists(&self, hash: BridgedBlockHash) -> bool { - >::contains_key(hash) - } - - fn header_by_hash(&self, hash: BridgedBlockHash) -> Option>> { - >::get(hash) - } - - fn missing_justifications(&self) -> Vec>> { - >::iter() - .map(|(hash, number)| HeaderId { number, hash }) - .collect() - } - - fn current_authority_set(&self) -> AuthoritySet { - CurrentAuthoritySet::get() - } - - fn update_current_authority_set(&self, new_set: AuthoritySet) { - CurrentAuthoritySet::put(new_set) - } - - fn enact_authority_set(&mut self, signal_hash: BridgedBlockHash) -> Result<(), ()> { - let new_set = >::take(signal_hash).ok_or(())?.authority_set; - self.update_current_authority_set(new_set); - - Ok(()) - } - - fn scheduled_set_change(&self, signal_hash: BridgedBlockHash) -> Option>> { - >::get(signal_hash) - } - - fn schedule_next_set_change( - &mut self, - signal_hash: BridgedBlockHash, - next_change: ScheduledChange>, - ) { - >::insert(signal_hash, next_change) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, test_header, unfinalized_header, Origin, TestHeader, TestRuntime}; - use bp_header_chain::HeaderChain; - use bp_test_utils::{alice, authority_list, bob}; - use frame_support::{assert_noop, assert_ok}; - use sp_runtime::DispatchError; - - fn init_with_origin(origin: Origin) -> Result, DispatchError> { - let init_data = InitializationData { - header: test_header(1), - authority_list: authority_list(), - set_id: 1, - scheduled_change: None, - is_halted: false, - }; - - Module::::initialize(origin, init_data.clone()).map(|_| init_data) - } - - #[test] - fn init_root_or_owner_origin_can_initialize_pallet() { - run_test(|| { - assert_noop!(init_with_origin(Origin::signed(1)), DispatchError::BadOrigin); - assert_ok!(init_with_origin(Origin::root())); - - // Reset storage so we can initialize the pallet again - BestFinalized::::kill(); - ModuleOwner::::put(2); - assert_ok!(init_with_origin(Origin::signed(2))); - }) - } - - #[test] - fn init_storage_entries_are_correctly_initialized() { - run_test(|| { - assert!(Module::::best_headers().is_empty()); - assert_eq!(Module::::best_finalized(), test_header(0)); - - let init_data = init_with_origin(Origin::root()).unwrap(); - - let storage = PalletStorage::::new(); - assert!(storage.header_exists(init_data.header.hash())); - assert_eq!( - storage.best_headers()[0], - crate::HeaderId { - number: *init_data.header.number(), - hash: init_data.header.hash() - } - ); - assert_eq!(storage.best_finalized_header().hash(), init_data.header.hash()); - assert_eq!(storage.current_authority_set().authorities, init_data.authority_list); - assert_eq!(IsHalted::get(), false); - }) - } - - #[test] - fn init_can_only_initialize_pallet_once() { - run_test(|| { - assert_ok!(init_with_origin(Origin::root())); - assert_noop!( - init_with_origin(Origin::root()), - >::AlreadyInitialized - ); - }) - } - - #[test] - fn pallet_owner_may_change_owner() { - run_test(|| { - ModuleOwner::::put(2); - - assert_ok!(Module::::set_owner(Origin::root(), Some(1))); - assert_noop!( - Module::::halt_operations(Origin::signed(2)), - DispatchError::BadOrigin, - ); - assert_ok!(Module::::halt_operations(Origin::root())); - - assert_ok!(Module::::set_owner(Origin::signed(1), None)); - assert_noop!( - Module::::resume_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - assert_noop!( - Module::::resume_operations(Origin::signed(2)), - DispatchError::BadOrigin, - ); - assert_ok!(Module::::resume_operations(Origin::root())); - }); - } - - #[test] - fn pallet_may_be_halted_by_root() { - run_test(|| { - assert_ok!(Module::::halt_operations(Origin::root())); - assert_ok!(Module::::resume_operations(Origin::root())); - }); - } - - #[test] - fn pallet_may_be_halted_by_owner() { - run_test(|| { - ModuleOwner::::put(2); - - assert_ok!(Module::::halt_operations(Origin::signed(2))); - assert_ok!(Module::::resume_operations(Origin::signed(2))); - - assert_noop!( - Module::::halt_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - assert_noop!( - Module::::resume_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - - assert_ok!(Module::::halt_operations(Origin::signed(2))); - assert_noop!( - Module::::resume_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - }); - } - - #[test] - fn pallet_rejects_transactions_if_halted() { - run_test(|| { - IsHalted::put(true); - - assert_noop!( - Module::::import_signed_header(Origin::signed(1), test_header(1)), - Error::::Halted, - ); - - assert_noop!( - Module::::finalize_header(Origin::signed(1), test_header(1).hash(), vec![]), - Error::::Halted, - ); - }) - } - - #[test] - fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { - run_test(|| { - assert_noop!( - Module::::parse_finalized_storage_proof( - Default::default(), - StorageProof::new(vec![]), - |_| (), - ), - Error::::UnknownHeader, - ); - }); - } - - #[test] - fn parse_finalized_storage_proof_rejects_proof_on_unfinalized_header() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let header = unfinalized_header(1); - storage.write_header(&header); - - assert_noop!( - Module::::parse_finalized_storage_proof( - header.header.hash(), - StorageProof::new(vec![]), - |_| (), - ), - Error::::UnfinalizedHeader, - ); - }); - } - - #[test] - fn parse_finalized_storage_accepts_valid_proof() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let (state_root, storage_proof) = storage_proof::tests::craft_valid_storage_proof(); - let mut header = unfinalized_header(1); - header.is_finalized = true; - header.header.set_state_root(state_root); - storage.write_header(&header); - - assert_ok!( - Module::::parse_finalized_storage_proof(header.header.hash(), storage_proof, |_| (),), - (), - ); - }); - } - - #[test] - fn importing_unchecked_headers_works() { - run_test(|| { - init_with_origin(Origin::root()).unwrap(); - let storage = PalletStorage::::new(); - - let header = test_header(2); - Module::::append_header(header.clone()); - - assert!(storage.header_by_hash(header.hash()).unwrap().is_finalized); - assert_eq!(storage.best_finalized_header().header, header); - assert_eq!(storage.best_headers()[0].hash, header.hash()); - }) - } - - #[test] - fn importing_unchecked_headers_enacts_new_authority_set() { - run_test(|| { - init_with_origin(Origin::root()).unwrap(); - let storage = PalletStorage::::new(); - - let next_set_id = 2; - let next_authorities = vec![(alice(), 1), (bob(), 1)]; - - // Need to update the header digest to indicate that our header signals an authority set - // change. The change will be enacted when we import our header. - let mut header = test_header(2); - header.digest = fork_tests::change_log(0); - - // Let's import our test header - Module::::append_header(header.clone()); - - // Make sure that our header is the best finalized - assert_eq!(storage.best_finalized_header().header, header); - assert_eq!(storage.best_headers()[0].hash, header.hash()); - - // Make sure that the authority set actually changed upon importing our header - assert_eq!( - storage.current_authority_set(), - AuthoritySet::new(next_authorities, next_set_id), - ); - }) - } - - #[test] - fn importing_unchecked_headers_enacts_new_authority_set_from_old_header() { - run_test(|| { - init_with_origin(Origin::root()).unwrap(); - let storage = PalletStorage::::new(); - - let next_set_id = 2; - let next_authorities = vec![(alice(), 1), (bob(), 1)]; - - // Need to update the header digest to indicate that our header signals an authority set - // change. However, the change doesn't happen until the next block. - let mut schedules_change = test_header(2); - schedules_change.digest = fork_tests::change_log(1); - let header = test_header(3); - - // Let's import our test headers - Module::::append_header(schedules_change); - Module::::append_header(header.clone()); - - // Make sure that our header is the best finalized - assert_eq!(storage.best_finalized_header().header, header); - assert_eq!(storage.best_headers()[0].hash, header.hash()); - - // Make sure that the authority set actually changed upon importing our header - assert_eq!( - storage.current_authority_set(), - AuthoritySet::new(next_authorities, next_set_id), - ); - }) - } - - #[test] - fn importing_unchecked_header_can_enact_set_change_scheduled_at_genesis() { - run_test(|| { - let storage = PalletStorage::::new(); - - let next_authorities = vec![(alice(), 1)]; - let next_set_id = 2; - let next_authority_set = AuthoritySet::new(next_authorities.clone(), next_set_id); - - let first_scheduled_change = ScheduledChange { - authority_set: next_authority_set, - height: 2, - }; - - let init_data = InitializationData { - header: test_header(1), - authority_list: authority_list(), - set_id: 1, - scheduled_change: Some(first_scheduled_change), - is_halted: false, - }; - - assert_ok!(Module::::initialize(Origin::root(), init_data)); - - // We are expecting an authority set change at height 2, so this header should enact - // that upon being imported. - Module::::append_header(test_header(2)); - - // Make sure that the authority set actually changed upon importing our header - assert_eq!( - storage.current_authority_set(), - AuthoritySet::new(next_authorities, next_set_id), - ); - }) - } -} diff --git a/polkadot/bridges/modules/substrate/src/storage.rs b/polkadot/bridges/modules/substrate/src/storage.rs deleted file mode 100644 index 5b521306b2..0000000000 --- a/polkadot/bridges/modules/substrate/src/storage.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Storage primitives for the Substrate light client (a.k.a bridge) pallet. - -use bp_header_chain::AuthoritySet; -use codec::{Decode, Encode}; -use core::default::Default; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -use sp_finality_grandpa::{AuthorityList, SetId}; -use sp_runtime::traits::Header as HeaderT; -use sp_runtime::RuntimeDebug; - -/// Data required for initializing the bridge pallet. -/// -/// The bridge needs to know where to start its sync from, and this provides that initial context. -#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Clone)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct InitializationData { - /// The header from which we should start syncing. - pub header: H, - /// The initial authorities of the pallet. - pub authority_list: AuthorityList, - /// The ID of the initial authority set. - pub set_id: SetId, - /// The first scheduled authority set change of the pallet. - pub scheduled_change: Option>, - /// Should the pallet block transaction immediately after initialization. - pub is_halted: bool, -} - -/// Keeps track of when the next GRANDPA authority set change will occur. -#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Clone)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct ScheduledChange { - /// The authority set that will be used once this change is enacted. - pub authority_set: AuthoritySet, - /// The block height at which the authority set should be enacted. - /// - /// Note: It will only be enacted once a header at this height is finalized. - pub height: N, -} - -/// A more useful representation of a header for storage purposes. -#[derive(Default, Encode, Decode, Clone, RuntimeDebug, PartialEq)] -pub struct ImportedHeader { - /// A plain Substrate header. - pub header: H, - /// Does this header enact a new authority set change. If it does - /// then it will require a justification. - pub requires_justification: bool, - /// Has this header been finalized, either explicitly via a justification, - /// or implicitly via one of its children getting finalized. - pub is_finalized: bool, - /// The hash of the header which scheduled a change on this fork. If there are currently - /// not pending changes on this fork this will be empty. - pub signal_hash: Option, -} - -impl core::ops::Deref for ImportedHeader { - type Target = H; - - fn deref(&self) -> &H { - &self.header - } -} diff --git a/polkadot/bridges/modules/substrate/src/verifier.rs b/polkadot/bridges/modules/substrate/src/verifier.rs deleted file mode 100644 index 0c3bd1b5dd..0000000000 --- a/polkadot/bridges/modules/substrate/src/verifier.rs +++ /dev/null @@ -1,871 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The verifier's role is to check the validity of headers being imported, and also determine if -//! they can be finalized. -//! -//! When importing headers it performs checks to ensure that no invariants are broken (like -//! importing the same header twice). When it imports finality proofs it will ensure that the proof -//! has been signed off by the correct GRANDPA authorities, and also enact any authority set changes -//! if required. - -use crate::storage::{ImportedHeader, ScheduledChange}; -use crate::BridgeStorage; - -use bp_header_chain::{justification::verify_justification, AuthoritySet}; -use finality_grandpa::voter_set::VoterSet; -use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; -use sp_runtime::generic::OpaqueDigestItemId; -use sp_runtime::traits::{CheckedAdd, Header as HeaderT, One}; -use sp_runtime::RuntimeDebug; -use sp_std::{prelude::Vec, vec}; - -/// The finality proof used by the pallet. -/// -/// For a Substrate based chain using GRANDPA this will -/// be an encoded GRANDPA Justification. -#[derive(RuntimeDebug)] -pub struct FinalityProof(Vec); - -impl From<&[u8]> for FinalityProof { - fn from(proof: &[u8]) -> Self { - Self(proof.to_vec()) - } -} - -impl From> for FinalityProof { - fn from(proof: Vec) -> Self { - Self(proof) - } -} - -/// Errors which can happen while importing a header. -#[derive(RuntimeDebug, PartialEq)] -pub enum ImportError { - /// This header is at the same height or older than our latest finalized block, thus not useful. - OldHeader, - /// This header has already been imported by the pallet. - HeaderAlreadyExists, - /// We're missing a parent for this header. - MissingParent, - /// The number of the header does not follow its parent's number. - InvalidChildNumber, - /// The height of the next authority set change overflowed. - ScheduledHeightOverflow, - /// Received an authority set which was invalid in some way, such as - /// the authority weights being empty or overflowing the `AuthorityWeight` - /// type. - InvalidAuthoritySet, - /// This header is not allowed to be imported since an ancestor requires a finality proof. - /// - /// This can happen if an ancestor is supposed to enact an authority set change. - AwaitingFinalityProof, - /// This header schedules an authority set change even though we're still waiting - /// for an old authority set change to be enacted on this fork. - PendingAuthoritySetChange, -} - -/// Errors which can happen while verifying a headers finality. -#[derive(RuntimeDebug, PartialEq)] -pub enum FinalizationError { - /// This header has never been imported by the pallet. - UnknownHeader, - /// Trying to prematurely import a justification - PrematureJustification, - /// We failed to verify this header's ancestry. - AncestryCheckFailed, - /// This header is at the same height or older than our latest finalized block, thus not useful. - OldHeader, - /// The given justification was not able to finalize the given header. - /// - /// There are several reasons why this might happen, such as the justification being - /// signed by the wrong authority set, being given alongside an unexpected header, - /// or failing ancestry checks. - InvalidJustification, -} - -/// Used to verify imported headers and their finality status. -#[derive(RuntimeDebug)] -pub struct Verifier { - pub storage: S, -} - -impl Verifier -where - S: BridgeStorage
, - H: HeaderT, - H::Number: finality_grandpa::BlockNumberOps, -{ - /// Import a header to the pallet. - /// - /// Will perform some basic checks to make sure that this header doesn't break any assumptions - /// such as being on a different finalized fork. - pub fn import_header(&mut self, hash: H::Hash, header: H) -> Result<(), ImportError> { - let best_finalized = self.storage.best_finalized_header(); - - if header.number() <= best_finalized.number() { - return Err(ImportError::OldHeader); - } - - if self.storage.header_exists(hash) { - return Err(ImportError::HeaderAlreadyExists); - } - - let parent_header = self - .storage - .header_by_hash(*header.parent_hash()) - .ok_or(ImportError::MissingParent)?; - - let parent_number = *parent_header.number(); - if parent_number + One::one() != *header.number() { - return Err(ImportError::InvalidChildNumber); - } - - // A header requires a justification if it enacts an authority set change. We don't - // need to act on it right away (we'll update the set once the header gets finalized), but - // we need to make a note of it. - // - // Note: This assumes that we can only have one authority set change pending per fork at a - // time. While this is not strictly true of GRANDPA (it can have multiple pending changes, - // even across forks), this assumption simplifies our tracking of authority set changes. - let mut signal_hash = parent_header.signal_hash; - let scheduled_change = find_scheduled_change(&header); - - // Check if our fork is expecting an authority set change - let requires_justification = if let Some(hash) = signal_hash { - const PROOF: &str = "If the header has a signal hash it means there's an accompanying set - change in storage, therefore this must always be valid."; - let pending_change = self.storage.scheduled_set_change(hash).expect(PROOF); - - if scheduled_change.is_some() { - return Err(ImportError::PendingAuthoritySetChange); - } - - if *header.number() > pending_change.height { - return Err(ImportError::AwaitingFinalityProof); - } - - pending_change.height == *header.number() - } else { - // Since we don't currently have a pending authority set change let's check if the header - // contains a log indicating when the next change should be. - if let Some(change) = scheduled_change { - let mut total_weight = 0u64; - - for (_id, weight) in &change.next_authorities { - total_weight = total_weight - .checked_add(*weight) - .ok_or(ImportError::InvalidAuthoritySet)?; - } - - // If none of the authorities have a weight associated with them the - // set is essentially empty. We don't want that. - if total_weight == 0 { - return Err(ImportError::InvalidAuthoritySet); - } - - let next_set = AuthoritySet { - authorities: change.next_authorities, - set_id: self.storage.current_authority_set().set_id + 1, - }; - - let height = (*header.number()) - .checked_add(&change.delay) - .ok_or(ImportError::ScheduledHeightOverflow)?; - - let scheduled_change = ScheduledChange { - authority_set: next_set, - height, - }; - - // Note: It's important that the signal hash is updated if a header schedules a - // change or else we end up with inconsistencies in other places. - signal_hash = Some(hash); - self.storage.schedule_next_set_change(hash, scheduled_change); - - // If the delay is 0 this header will enact the change it signaled - height == *header.number() - } else { - false - } - }; - - self.storage.write_header(&ImportedHeader { - header, - requires_justification, - is_finalized: false, - signal_hash, - }); - - Ok(()) - } - - /// Verify that a previously imported header can be finalized with the given GRANDPA finality - /// proof. If the header enacts an authority set change the change will be applied once the - /// header has been finalized. - pub fn import_finality_proof(&mut self, hash: H::Hash, proof: FinalityProof) -> Result<(), FinalizationError> { - // Make sure that we've previously imported this header - let header = self - .storage - .header_by_hash(hash) - .ok_or(FinalizationError::UnknownHeader)?; - - // We don't want to finalize an ancestor of an already finalized - // header, this would be inconsistent - let last_finalized = self.storage.best_finalized_header(); - if header.number() <= last_finalized.number() { - return Err(FinalizationError::OldHeader); - } - - let current_authority_set = self.storage.current_authority_set(); - let voter_set = VoterSet::new(current_authority_set.authorities).expect( - "We verified the correctness of the authority list during header import, - before writing them to storage. This must always be valid.", - ); - verify_justification::( - (hash, *header.number()), - current_authority_set.set_id, - voter_set, - &proof.0, - ) - .map_err(|_| FinalizationError::InvalidJustification)?; - frame_support::debug::trace!("Received valid justification for {:?}", header); - - frame_support::debug::trace!( - "Checking ancestry for headers between {:?} and {:?}", - last_finalized, - header - ); - let mut finalized_headers = - if let Some(ancestors) = headers_between(&self.storage, last_finalized, header.clone()) { - // Since we only try and finalize headers with a height strictly greater - // than `best_finalized` if `headers_between` returns Some we must have - // at least one element. If we don't something's gone wrong, so best - // to die before we write to storage. - assert_eq!( - ancestors.is_empty(), - false, - "Empty ancestry list returned from `headers_between()`", - ); - - // Check if any of our ancestors `requires_justification` a.k.a schedule authority - // set changes. If they're still waiting to be finalized we must reject this - // justification. We don't include our current header in this check. - // - // We do this because it is important to to import justifications _in order_, - // otherwise we risk finalizing headers on competing chains. - let requires_justification = ancestors.iter().skip(1).find(|h| h.requires_justification); - if requires_justification.is_some() { - return Err(FinalizationError::PrematureJustification); - } - - ancestors - } else { - return Err(FinalizationError::AncestryCheckFailed); - }; - - // If the current header was marked as `requires_justification` it means that it enacts a - // new authority set change. When we finalize the header we need to update the current - // authority set. - if header.requires_justification { - const SIGNAL_HASH_PROOF: &str = "When we import a header we only mark it as - `requires_justification` if we have checked that it contains a signal hash. Therefore - this must always be valid."; - - const ENACT_SET_PROOF: &str = - "Headers must only be marked as `requires_justification` if there's a scheduled change in storage."; - - // If we are unable to enact an authority set it means our storage entry for scheduled - // changes is missing. Best to crash since this is likely a bug. - let _ = self - .storage - .enact_authority_set(header.signal_hash.expect(SIGNAL_HASH_PROOF)) - .expect(ENACT_SET_PROOF); - } - - for header in finalized_headers.iter_mut() { - header.is_finalized = true; - header.requires_justification = false; - header.signal_hash = None; - self.storage.write_header(header); - } - - self.storage.update_best_finalized(hash); - - Ok(()) - } -} - -/// Returns the lineage of headers between [child, ancestor) -fn headers_between( - storage: &S, - ancestor: ImportedHeader, - child: ImportedHeader, -) -> Option>> -where - S: BridgeStorage
, - H: HeaderT, -{ - let mut ancestors = vec![]; - let mut current_header = child; - - while ancestor.hash() != current_header.hash() { - // We've gotten to the same height and we're not related - if ancestor.number() >= current_header.number() { - return None; - } - - let parent = storage.header_by_hash(*current_header.parent_hash()); - ancestors.push(current_header); - current_header = match parent { - Some(h) => h, - None => return None, - } - } - - Some(ancestors) -} - -pub(crate) fn find_scheduled_change(header: &H) -> Option> { - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - - let filter_log = |log: ConsensusLog| match log { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - }; - - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - use crate::{BestFinalized, BestHeight, HeaderId, ImportedHeaders, PalletStorage}; - use bp_test_utils::{alice, authority_list, bob, make_justification_for_header}; - use codec::Encode; - use frame_support::{assert_err, assert_ok}; - use frame_support::{StorageMap, StorageValue}; - use sp_finality_grandpa::{AuthorityId, SetId}; - use sp_runtime::{Digest, DigestItem}; - - fn schedule_next_change( - authorities: Vec, - set_id: SetId, - height: TestNumber, - ) -> ScheduledChange { - let authorities = authorities.into_iter().map(|id| (id, 1u64)).collect(); - let authority_set = AuthoritySet::new(authorities, set_id); - ScheduledChange { authority_set, height } - } - - // Useful for quickly writing a chain of headers to storage - // Input is expected in the form: vec![(num, requires_justification, is_finalized)] - fn write_headers>( - storage: &mut S, - headers: Vec<(u64, bool, bool)>, - ) -> Vec> { - let mut imported_headers = vec![]; - let genesis = ImportedHeader { - header: test_header(0), - requires_justification: false, - is_finalized: true, - signal_hash: None, - }; - - >::put(genesis.hash()); - storage.write_header(&genesis); - imported_headers.push(genesis); - - for (num, requires_justification, is_finalized) in headers { - let header = ImportedHeader { - header: test_header(num), - requires_justification, - is_finalized, - signal_hash: None, - }; - - storage.write_header(&header); - imported_headers.push(header); - } - - imported_headers - } - - // Given a block number will generate a chain of headers which don't require justification and - // are not considered to be finalized. - fn write_default_headers>( - storage: &mut S, - headers: Vec, - ) -> Vec> { - let headers = headers.iter().map(|num| (*num, false, false)).collect(); - write_headers(storage, headers) - } - - #[test] - fn fails_to_import_old_header() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let parent = unfinalized_header(5); - storage.write_header(&parent); - storage.update_best_finalized(parent.hash()); - - let header = test_header(1); - let mut verifier = Verifier { storage }; - assert_err!(verifier.import_header(header.hash(), header), ImportError::OldHeader); - }) - } - - #[test] - fn fails_to_import_header_without_parent() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let parent = unfinalized_header(1); - storage.write_header(&parent); - storage.update_best_finalized(parent.hash()); - - // By default the parent is `0x00` - let header = TestHeader::new_from_number(2); - - let mut verifier = Verifier { storage }; - assert_err!( - verifier.import_header(header.hash(), header), - ImportError::MissingParent - ); - }) - } - - #[test] - fn fails_to_import_header_twice() { - run_test(|| { - let storage = PalletStorage::::new(); - let header = test_header(1); - >::put(header.hash()); - - let imported_header = ImportedHeader { - header: header.clone(), - requires_justification: false, - is_finalized: false, - signal_hash: None, - }; - >::insert(header.hash(), &imported_header); - - let mut verifier = Verifier { storage }; - assert_err!(verifier.import_header(header.hash(), header), ImportError::OldHeader); - }) - } - - #[test] - fn succesfully_imports_valid_but_unfinalized_header() { - run_test(|| { - let storage = PalletStorage::::new(); - let parent = test_header(1); - let parent_hash = parent.hash(); - >::put(parent.hash()); - - let imported_header = ImportedHeader { - header: parent, - requires_justification: false, - is_finalized: true, - signal_hash: None, - }; - >::insert(parent_hash, &imported_header); - - let header = test_header(2); - let mut verifier = Verifier { - storage: storage.clone(), - }; - assert_ok!(verifier.import_header(header.hash(), header.clone())); - - let stored_header = storage - .header_by_hash(header.hash()) - .expect("Should have been imported successfully"); - assert_eq!(stored_header.is_finalized, false); - assert_eq!(stored_header.hash(), storage.best_headers()[0].hash); - }) - } - - #[test] - fn successfully_imports_two_different_headers_at_same_height() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - // We want to write the genesis header to storage - let _ = write_headers(&mut storage, vec![]); - - // Both of these headers have the genesis header as their parent - let header_on_fork1 = test_header(1); - let mut header_on_fork2 = test_header(1); - - // We need to change _something_ to make it a different header - header_on_fork2.state_root = [1; 32].into(); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - - // It should be fine to import both - assert_ok!(verifier.import_header(header_on_fork1.hash(), header_on_fork1.clone())); - assert_ok!(verifier.import_header(header_on_fork2.hash(), header_on_fork2.clone())); - - // We should have two headers marked as being the best since they're - // both at the same height - let best_headers = storage.best_headers(); - assert_eq!(best_headers.len(), 2); - assert_eq!( - best_headers[0], - HeaderId { - number: *header_on_fork1.number(), - hash: header_on_fork1.hash() - } - ); - assert_eq!( - best_headers[1], - HeaderId { - number: *header_on_fork2.number(), - hash: header_on_fork2.hash() - } - ); - assert_eq!(>::get(), 1); - }) - } - - #[test] - fn correctly_updates_the_best_header_given_a_better_header() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - // We want to write the genesis header to storage - let _ = write_headers(&mut storage, vec![]); - - // Write two headers at the same height to storage. - let best_header = test_header(1); - let mut also_best_header = test_header(1); - - // We need to change _something_ to make it a different header - also_best_header.state_root = [1; 32].into(); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - - // It should be fine to import both - assert_ok!(verifier.import_header(best_header.hash(), best_header.clone())); - assert_ok!(verifier.import_header(also_best_header.hash(), also_best_header)); - - // The headers we manually imported should have been marked as the best - // upon writing to storage. Let's confirm that. - assert_eq!(storage.best_headers().len(), 2); - assert_eq!(>::get(), 1); - - // Now let's build something at a better height. - let mut better_header = test_header(2); - better_header.parent_hash = best_header.hash(); - - assert_ok!(verifier.import_header(better_header.hash(), better_header.clone())); - - // Since `better_header` is the only one at height = 2 we should only have - // a single "best header" now. - let best_headers = storage.best_headers(); - assert_eq!(best_headers.len(), 1); - assert_eq!( - best_headers[0], - HeaderId { - number: *better_header.number(), - hash: better_header.hash() - } - ); - assert_eq!(>::get(), 2); - }) - } - - #[test] - fn doesnt_write_best_header_twice_upon_finalization() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let _imported_headers = write_default_headers(&mut storage, vec![1]); - - let set_id = 1; - let authorities = authority_list(); - let initial_authority_set = AuthoritySet::new(authorities.clone(), set_id); - storage.update_current_authority_set(initial_authority_set); - - // Let's import our header - let header = test_header(2); - let mut verifier = Verifier { - storage: storage.clone(), - }; - assert_ok!(verifier.import_header(header.hash(), header.clone())); - - // Our header should be the only best header we have - assert_eq!(storage.best_headers()[0].hash, header.hash()); - assert_eq!(storage.best_headers().len(), 1); - - // Now lets finalize our best header - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); - assert_ok!(verifier.import_finality_proof(header.hash(), justification.into())); - - // Our best header should only appear once in the list of best headers - assert_eq!(storage.best_headers()[0].hash, header.hash()); - assert_eq!(storage.best_headers().len(), 1); - }) - } - - #[test] - fn related_headers_are_ancestors() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let mut imported_headers = write_default_headers(&mut storage, vec![1, 2, 3]); - - for header in imported_headers.iter() { - assert!(storage.header_exists(header.hash())); - } - - let ancestor = imported_headers.remove(0); - let child = imported_headers.pop().unwrap(); - let ancestors = headers_between(&storage, ancestor, child); - - assert!(ancestors.is_some()); - assert_eq!(ancestors.unwrap().len(), 3); - }) - } - - #[test] - fn unrelated_headers_are_not_ancestors() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut imported_headers = write_default_headers(&mut storage, vec![1, 2, 3]); - for header in imported_headers.iter() { - assert!(storage.header_exists(header.hash())); - } - - // Need to give it a different parent_hash or else it'll be - // related to our test genesis header - let mut bad_ancestor = test_header(0); - bad_ancestor.parent_hash = [1u8; 32].into(); - let bad_ancestor = ImportedHeader { - header: bad_ancestor, - requires_justification: false, - is_finalized: false, - signal_hash: None, - }; - - let child = imported_headers.pop().unwrap(); - let ancestors = headers_between(&storage, bad_ancestor, child); - assert!(ancestors.is_none()); - }) - } - - #[test] - fn ancestor_newer_than_child_is_not_related() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut imported_headers = write_default_headers(&mut storage, vec![1, 2, 3]); - for header in imported_headers.iter() { - assert!(storage.header_exists(header.hash())); - } - - // What if we have an "ancestor" that's newer than child? - let new_ancestor = test_header(5); - let new_ancestor = ImportedHeader { - header: new_ancestor, - requires_justification: false, - is_finalized: false, - signal_hash: None, - }; - - let child = imported_headers.pop().unwrap(); - let ancestors = headers_between(&storage, new_ancestor, child); - assert!(ancestors.is_none()); - }) - } - - #[test] - fn doesnt_import_header_which_schedules_change_with_invalid_authority_set() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let _imported_headers = write_default_headers(&mut storage, vec![1]); - let mut header = test_header(2); - - // This is an *invalid* authority set because the combined weight of the - // authorities is greater than `u64::MAX` - let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { - next_authorities: vec![(alice(), u64::MAX), (bob(), u64::MAX)], - delay: 0, - }); - - header.digest = Digest:: { - logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], - }; - - let mut verifier = Verifier { storage }; - - assert_eq!( - verifier.import_header(header.hash(), header).unwrap_err(), - ImportError::InvalidAuthoritySet - ); - }) - } - - #[test] - fn finalizes_header_which_doesnt_enact_or_schedule_a_new_authority_set() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let _imported_headers = write_default_headers(&mut storage, vec![1]); - - // Nothing special about this header, yet GRANDPA may have created a justification - // for it since it does that periodically - let header = test_header(2); - - let set_id = 1; - let authorities = authority_list(); - let authority_set = AuthoritySet::new(authorities.clone(), set_id); - storage.update_current_authority_set(authority_set); - - // We'll need this justification to finalize the header - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - - assert_ok!(verifier.import_header(header.hash(), header.clone())); - assert_ok!(verifier.import_finality_proof(header.hash(), justification.into())); - assert_eq!(storage.best_finalized_header().header, header); - }) - } - - #[test] - fn correctly_verifies_and_finalizes_chain_of_headers() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let imported_headers = write_default_headers(&mut storage, vec![1, 2]); - let header = test_header(3); - - let set_id = 1; - let authorities = authority_list(); - let authority_set = AuthoritySet { - authorities: authorities.clone(), - set_id, - }; - storage.update_current_authority_set(authority_set); - - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - assert!(verifier.import_header(header.hash(), header.clone()).is_ok()); - assert!(verifier - .import_finality_proof(header.hash(), justification.into()) - .is_ok()); - - // Make sure we marked the our headers as finalized - assert!(storage.header_by_hash(imported_headers[1].hash()).unwrap().is_finalized); - assert!(storage.header_by_hash(imported_headers[2].hash()).unwrap().is_finalized); - assert!(storage.header_by_hash(header.hash()).unwrap().is_finalized); - - // Make sure the header at the highest height is the best finalized - assert_eq!(storage.best_finalized_header().header, header); - }); - } - - #[test] - fn updates_authority_set_upon_finalizing_header_which_enacts_change() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let genesis_hash = write_headers(&mut storage, vec![])[0].hash(); - - // We want this header to indicate that there's an upcoming set change on this fork - let parent = ImportedHeader { - header: test_header(1), - requires_justification: false, - is_finalized: false, - signal_hash: Some(genesis_hash), - }; - storage.write_header(&parent); - - let set_id = 1; - let authorities = authority_list(); - let initial_authority_set = AuthoritySet::new(authorities.clone(), set_id); - storage.update_current_authority_set(initial_authority_set); - - // This header enacts an authority set change upon finalization - let header = test_header(2); - - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); - - // Schedule a change at the height of our header - let set_id = 2; - let height = *header.number(); - let authorities = vec![alice()]; - let change = schedule_next_change(authorities, set_id, height); - storage.schedule_next_set_change(genesis_hash, change.clone()); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - - assert_ok!(verifier.import_header(header.hash(), header.clone())); - assert_eq!(storage.missing_justifications().len(), 1); - assert_eq!(storage.missing_justifications()[0].hash, header.hash()); - - assert_ok!(verifier.import_finality_proof(header.hash(), justification.into())); - assert_eq!(storage.best_finalized_header().header, header); - - // Make sure that we have updated the set now that we've finalized our header - assert_eq!(storage.current_authority_set(), change.authority_set); - assert!(storage.missing_justifications().is_empty()); - }) - } - - #[test] - fn importing_finality_proof_for_already_finalized_header_doesnt_work() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let genesis = test_header(0); - - let genesis = ImportedHeader { - header: genesis, - requires_justification: false, - is_finalized: true, - signal_hash: None, - }; - - // Make sure that genesis is the best finalized header - >::put(genesis.hash()); - storage.write_header(&genesis); - - let mut verifier = Verifier { storage }; - - // Now we want to try and import it again to see what happens - assert_eq!( - verifier - .import_finality_proof(genesis.hash(), vec![4, 2].into()) - .unwrap_err(), - FinalizationError::OldHeader - ); - }); - } -} diff --git a/polkadot/bridges/primitives/chain-kusama/Cargo.toml b/polkadot/bridges/primitives/chain-kusama/Cargo.toml new file mode 100644 index 0000000000..70ff3b844d --- /dev/null +++ b/polkadot/bridges/primitives/chain-kusama/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "bp-kusama" +description = "Primitives of Kusama runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] + +# Bridge Dependencies +bp-messages = { path = "../messages", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-polkadot-core/std", + "bp-runtime/std", + "sp-api/std", + "sp-std/std", +] diff --git a/polkadot/bridges/primitives/chain-kusama/src/lib.rs b/polkadot/bridges/primitives/chain-kusama/src/lib.rs new file mode 100644 index 0000000000..7163d15ef1 --- /dev/null +++ b/polkadot/bridges/primitives/chain-kusama/src/lib.rs @@ -0,0 +1,117 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_with_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use sp_std::prelude::*; + +pub use bp_polkadot_core::*; + +/// Kusama Chain +pub type Kusama = PolkadotLike; + +// We use this to get the account on Kusama (target) which is derived from Polkadot's (source) +// account. +pub fn derive_account_from_polkadot_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::POLKADOT_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +/// Name of the `KusamaFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_best_finalized"; +/// Name of the `KusamaFinalityApi::is_known_header` runtime method. +pub const IS_KNOWN_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_is_known_header"; + +/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToKusamaOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToKusamaOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_KUSAMA_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToKusamaOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToKusamaOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_generated_nonce"; +/// Name of the `ToKusamaOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_received_nonce"; + +/// Name of the `FromKusamaInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_received_nonce"; +/// Name of the `FromKusamaInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromKusamaInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_KUSAMA_UNREWARDED_RELAYERS_STATE: &str = "FromKusamaInboundLaneApi_unrewarded_relayers_state"; + +sp_api::decl_runtime_apis! { + /// API for querying information about the finalized Kusama headers. + /// + /// This API is implemented by runtimes that are bridging with the Kusama chain, not the + /// Kusama runtime itself. + pub trait KusamaFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Kusama chain. + /// + /// This API is implemented by runtimes that are sending messages to Kusama chain, not the + /// Kusama runtime itself. + pub trait ToKusamaOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Kusama from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Kusama chain. + /// + /// This API is implemented by runtimes that are receiving messages from Kusama chain, not the + /// Kusama runtime itself. + pub trait FromKusamaInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} diff --git a/polkadot/bridges/primitives/millau/Cargo.toml b/polkadot/bridges/primitives/chain-millau/Cargo.toml similarity index 95% rename from polkadot/bridges/primitives/millau/Cargo.toml rename to polkadot/bridges/primitives/chain-millau/Cargo.toml index 124d8199e2..67db08c208 100644 --- a/polkadot/bridges/primitives/millau/Cargo.toml +++ b/polkadot/bridges/primitives/chain-millau/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" # Bridge Dependencies -bp-message-lane = { path = "../message-lane", default-features = false } +bp-messages = { path = "../messages", default-features = false } bp-runtime = { path = "../runtime", default-features = false } fixed-hash = { version = "0.7.0", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } @@ -33,7 +33,7 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , [features] default = ["std"] std = [ - "bp-message-lane/std", + "bp-messages/std", "bp-runtime/std", "fixed-hash/std", "frame-support/std", diff --git a/polkadot/bridges/primitives/millau/src/lib.rs b/polkadot/bridges/primitives/chain-millau/src/lib.rs similarity index 88% rename from polkadot/bridges/primitives/millau/src/lib.rs rename to polkadot/bridges/primitives/chain-millau/src/lib.rs index 84096d116e..22f09cb5b0 100644 --- a/polkadot/bridges/primitives/millau/src/lib.rs +++ b/polkadot/bridges/primitives/chain-millau/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -22,7 +22,7 @@ mod millau_hash; -use bp_message_lane::{LaneId, MessageNonce, UnrewardedRelayersState}; +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; use bp_runtime::Chain; use frame_support::{ weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, @@ -48,6 +48,11 @@ pub use millau_hash::MillauHash; /// Some reserve is reserved to account future chain growth. pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; +/// Number of bytes, included in the signed Millau transaction apart from the encoded call itself. +/// +/// Can be computed by subtracting encoded call size from raw transaction size. +pub const TX_EXTRA_BYTES: u32 = 103; + /// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; @@ -71,26 +76,30 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 1024; /// Weight of single regular message delivery transaction on Millau chain. /// -/// This value is a result of `pallet_message_lane::Module::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call +/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. /// The message must have dispatch weight set to zero. The result then must be rounded up to account /// possible future runtime upgrades. pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000; /// Increase of delivery transaction weight on Millau chain with every additional message byte. /// -/// This value is a result of `pallet_message_lane::WeightInfoExt::storage_proof_size_overhead(1)` call. The +/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The /// result then must be rounded up to account possible future runtime upgrades. pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; /// Maximal weight of single message delivery confirmation transaction on Millau chain. /// -/// This value is a result of `pallet_message_lane::Module::receive_messages_delivery_proof` weight formula computation +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation /// for the case when single message is confirmed. The result then must be rounded up to account possible future /// runtime upgrades. pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; -/// The length of a session (how often authorities change) on Millau measured in of number of blocks. +/// The target length of a session (how often authorities change) on Millau measured in of number of +/// blocks. +/// +/// Note that since this is a target sessions may change before/after this time depending on network +/// conditions. pub const SESSION_LENGTH: BlockNumber = 5 * time_units::MINUTES; /// Re-export `time_units` to make usage easier. @@ -229,14 +238,8 @@ pub fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } -/// Name of the `MillauHeaderApi::best_block` runtime method. -pub const BEST_MILLAU_BLOCKS_METHOD: &str = "MillauHeaderApi_best_blocks"; -/// Name of the `MillauHeaderApi::finalized_block` runtime method. -pub const FINALIZED_MILLAU_BLOCK_METHOD: &str = "MillauHeaderApi_finalized_block"; -/// Name of the `MillauHeaderApi::is_known_block` runtime method. -pub const IS_KNOWN_MILLAU_BLOCK_METHOD: &str = "MillauHeaderApi_is_known_block"; -/// Name of the `MillauHeaderApi::incomplete_headers` runtime method. -pub const INCOMPLETE_MILLAU_HEADERS_METHOD: &str = "MillauHeaderApi_incomplete_headers"; +/// Name of the `MillauFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_MILLAU_HEADER_METHOD: &str = "MillauFinalityApi_best_finalized"; /// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. pub const TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD: &str = @@ -258,7 +261,7 @@ pub const FROM_MILLAU_UNREWARDED_RELAYERS_STATE: &str = "FromMillauInboundLaneAp sp_api::decl_runtime_apis! { /// API for querying information about Millau headers from the Bridge Pallet instance. /// - /// This API is implemented by runtimes that are bridging with Millau chain, not the + /// This API is implemented by runtimes that are bridging with the Millau chain, not the /// Millau runtime itself. pub trait MillauHeaderApi { /// Returns number and hash of the best blocks known to the bridge module. @@ -281,6 +284,17 @@ sp_api::decl_runtime_apis! { fn is_finalized_block(hash: Hash) -> bool; } + /// API for querying information about the finalized Millau headers. + /// + /// This API is implemented by runtimes that are bridging with the Millau chain, not the + /// Millau runtime itself. + pub trait MillauFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + /// Outbound message lane API for messages that are sent to Millau chain. /// /// This API is implemented by runtimes that are sending messages to Millau chain, not the diff --git a/polkadot/bridges/primitives/millau/src/millau_hash.rs b/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs similarity index 96% rename from polkadot/bridges/primitives/millau/src/millau_hash.rs rename to polkadot/bridges/primitives/chain-millau/src/millau_hash.rs index e917329d2c..936791217a 100644 --- a/polkadot/bridges/primitives/millau/src/millau_hash.rs +++ b/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/primitives/chain-polkadot/Cargo.toml b/polkadot/bridges/primitives/chain-polkadot/Cargo.toml new file mode 100644 index 0000000000..22ded41b91 --- /dev/null +++ b/polkadot/bridges/primitives/chain-polkadot/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "bp-polkadot" +description = "Primitives of Polkadot runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] + +# Bridge Dependencies +bp-messages = { path = "../messages", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies + +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-polkadot-core/std", + "bp-runtime/std", + "sp-api/std", + "sp-std/std", +] diff --git a/polkadot/bridges/primitives/chain-polkadot/src/lib.rs b/polkadot/bridges/primitives/chain-polkadot/src/lib.rs new file mode 100644 index 0000000000..8e0d30cdb6 --- /dev/null +++ b/polkadot/bridges/primitives/chain-polkadot/src/lib.rs @@ -0,0 +1,117 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_with_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use sp_std::prelude::*; + +pub use bp_polkadot_core::*; + +/// Polkadot Chain +pub type Polkadot = PolkadotLike; + +// We use this to get the account on Polkadot (target) which is derived from Kusama's (source) +// account. +pub fn derive_account_from_kusama_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::KUSAMA_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +/// Name of the `PolkadotFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_best_finalized"; +/// Name of the `PolkadotFinalityApi::is_known_header` runtime method. +pub const IS_KNOWN_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_is_known_header"; + +/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToPolkadotOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToPolkadotOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_POLKADOT_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToPolkadotOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToPolkadotOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_generated_nonce"; +/// Name of the `ToPolkadotOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_received_nonce"; + +/// Name of the `FromPolkadotInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_received_nonce"; +/// Name of the `FromPolkadotInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromPolkadotInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_POLKADOT_UNREWARDED_RELAYERS_STATE: &str = "FromPolkadotInboundLaneApi_unrewarded_relayers_state"; + +sp_api::decl_runtime_apis! { + /// API for querying information about the finalized Polkadot headers. + /// + /// This API is implemented by runtimes that are bridging with the Polkadot chain, not the + /// Polkadot runtime itself. + pub trait PolkadotFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Polkadot chain. + /// + /// This API is implemented by runtimes that are sending messages to Polkadot chain, not the + /// Polkadot runtime itself. + pub trait ToPolkadotOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Polkadot from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Polkadot chain. + /// + /// This API is implemented by runtimes that are receiving messages from Polkadot chain, not the + /// Polkadot runtime itself. + pub trait FromPolkadotInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} diff --git a/polkadot/bridges/primitives/rialto/Cargo.toml b/polkadot/bridges/primitives/chain-rialto/Cargo.toml similarity index 92% rename from polkadot/bridges/primitives/rialto/Cargo.toml rename to polkadot/bridges/primitives/chain-rialto/Cargo.toml index d6c12fc848..7e039a40ac 100644 --- a/polkadot/bridges/primitives/rialto/Cargo.toml +++ b/polkadot/bridges/primitives/chain-rialto/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" # Bridge Dependencies -bp-message-lane = { path = "../message-lane", default-features = false } +bp-messages = { path = "../messages", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies @@ -25,7 +25,7 @@ sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , [features] default = ["std"] std = [ - "bp-message-lane/std", + "bp-messages/std", "bp-runtime/std", "frame-support/std", "frame-system/std", diff --git a/polkadot/bridges/primitives/rialto/src/lib.rs b/polkadot/bridges/primitives/chain-rialto/src/lib.rs similarity index 88% rename from polkadot/bridges/primitives/rialto/src/lib.rs rename to polkadot/bridges/primitives/chain-rialto/src/lib.rs index 706e2f2785..c10f31bae3 100644 --- a/polkadot/bridges/primitives/rialto/src/lib.rs +++ b/polkadot/bridges/primitives/chain-rialto/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -20,7 +20,7 @@ // Runtime-generated DecodeLimit::decode_all_With_depth_limit #![allow(clippy::unnecessary_mut_passed)] -use bp_message_lane::{LaneId, MessageNonce, UnrewardedRelayersState}; +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; use bp_runtime::Chain; use frame_support::{ weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, @@ -39,6 +39,11 @@ use sp_std::prelude::*; /// Some reserve is reserved to account future chain growth. pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; +/// Number of bytes, included in the signed Rialto transaction apart from the encoded call itself. +/// +/// Can be computed by subtracting encoded call size from raw transaction size. +pub const TX_EXTRA_BYTES: u32 = 103; + /// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; @@ -62,26 +67,30 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 128; /// Weight of single regular message delivery transaction on Rialto chain. /// -/// This value is a result of `pallet_message_lane::Module::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call +/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. /// The message must have dispatch weight set to zero. The result then must be rounded up to account /// possible future runtime upgrades. pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000; /// Increase of delivery transaction weight on Rialto chain with every additional message byte. /// -/// This value is a result of `pallet_message_lane::WeightInfoExt::storage_proof_size_overhead(1)` call. The +/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The /// result then must be rounded up to account possible future runtime upgrades. pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; /// Maximal weight of single message delivery confirmation transaction on Rialto chain. /// -/// This value is a result of `pallet_message_lane::Module::receive_messages_delivery_proof` weight formula computation +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation /// for the case when single message is confirmed. The result then must be rounded up to account possible future /// runtime upgrades. pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; -/// The length of a session (how often authorities change) on Rialto measured in of number of blocks. +/// The target length of a session (how often authorities change) on Rialto measured in of number of +/// blocks. +/// +/// Note that since this is a target sessions may change before/after this time depending on network +/// conditions. pub const SESSION_LENGTH: BlockNumber = 4; /// Re-export `time_units` to make usage easier. @@ -190,14 +199,8 @@ pub fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } -/// Name of the `RialtoHeaderApi::best_blocks` runtime method. -pub const BEST_RIALTO_BLOCKS_METHOD: &str = "RialtoHeaderApi_best_blocks"; -/// Name of the `RialtoHeaderApi::finalized_block` runtime method. -pub const FINALIZED_RIALTO_BLOCK_METHOD: &str = "RialtoHeaderApi_finalized_block"; -/// Name of the `RialtoHeaderApi::is_known_block` runtime method. -pub const IS_KNOWN_RIALTO_BLOCK_METHOD: &str = "RialtoHeaderApi_is_known_block"; -/// Name of the `RialtoHeaderApi::incomplete_headers` runtime method. -pub const INCOMPLETE_RIALTO_HEADERS_METHOD: &str = "RialtoHeaderApi_incomplete_headers"; +/// Name of the `RialtoFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_RIALTO_HEADER_METHOD: &str = "RialtoFinalityApi_best_finalized"; /// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. pub const TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD: &str = @@ -219,7 +222,7 @@ pub const FROM_RIALTO_UNREWARDED_RELAYERS_STATE: &str = "FromRialtoInboundLaneAp sp_api::decl_runtime_apis! { /// API for querying information about Rialto headers from the Bridge Pallet instance. /// - /// This API is implemented by runtimes that are bridging with Rialto chain, not the + /// This API is implemented by runtimes that are bridging with the Rialto chain, not the /// Rialto runtime itself. pub trait RialtoHeaderApi { /// Returns number and hash of the best blocks known to the bridge module. @@ -242,6 +245,17 @@ sp_api::decl_runtime_apis! { fn is_finalized_block(hash: Hash) -> bool; } + /// API for querying information about the finalized Rialto headers. + /// + /// This API is implemented by runtimes that are bridging with the Rialto chain, not the + /// Millau runtime itself. + pub trait RialtoFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + /// Outbound message lane API for messages that are sent to Rialto chain. /// /// This API is implemented by runtimes that are sending messages to Rialto chain, not the diff --git a/polkadot/bridges/primitives/kusama/Cargo.toml b/polkadot/bridges/primitives/chain-rococo/Cargo.toml similarity index 55% rename from polkadot/bridges/primitives/kusama/Cargo.toml rename to polkadot/bridges/primitives/chain-rococo/Cargo.toml index 784f0b0132..b97e8d9d1a 100644 --- a/polkadot/bridges/primitives/kusama/Cargo.toml +++ b/polkadot/bridges/primitives/chain-rococo/Cargo.toml @@ -1,36 +1,36 @@ [package] -name = "bp-kusama" -description = "Primitives of Kusama runtime." +name = "bp-rococo" +description = "Primitives of Rococo runtime." version = "0.1.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } # Bridge Dependencies - -bp-message-lane = { path = "../message-lane", default-features = false } +bp-header-chain = { path = "../header-chain", default-features = false } +bp-messages = { path = "../messages", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] std = [ - "bp-message-lane/std", + "bp-header-chain/std", + "bp-messages/std", + "bp-polkadot-core/std", "bp-runtime/std", - "frame-support/std", - "frame-system/std", + "parity-scale-codec/std", "sp-api/std", - "sp-core/std", "sp-runtime/std", "sp-std/std", + "sp-version/std", ] diff --git a/polkadot/bridges/primitives/chain-rococo/src/lib.rs b/polkadot/bridges/primitives/chain-rococo/src/lib.rs new file mode 100644 index 0000000000..b79fdf6cfc --- /dev/null +++ b/polkadot/bridges/primitives/chain-rococo/src/lib.rs @@ -0,0 +1,172 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_with_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_runtime::Chain; +use sp_std::prelude::*; +use sp_version::RuntimeVersion; + +pub use bp_polkadot_core::*; + +/// Rococo Chain +pub type Rococo = PolkadotLike; + +pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; + +// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo. +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("rococo"), + impl_name: sp_version::create_runtime_str!("parity-rococo-v1.5"), + authoring_version: 0, + spec_version: 231, + impl_version: 0, + apis: sp_version::create_apis_vec![[]], + transaction_version: 0, +}; + +/// Rococo Runtime `Call` enum. +/// +/// The enum represents a subset of possible `Call`s we can send to Rococo chain. +/// Ideally this code would be auto-generated from Metadata, because we want to +/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. +/// +/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo +/// `construct_runtime`, so that we maintain SCALE-compatibility. +/// +/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] +pub enum Call { + /// Westend bridge pallet. + #[codec(index = 40)] + BridgeGrandpaWestend(BridgeGrandpaWestendCall), +} + +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum BridgeGrandpaWestendCall { + #[codec(index = 0)] + submit_finality_proof( + ::Header, + bp_header_chain::justification::GrandpaJustification<::Header>, + ), + #[codec(index = 1)] + initialize(bp_header_chain::InitializationData<::Header>), +} + +impl sp_runtime::traits::Dispatchable for Call { + type Origin = (); + type Config = (); + type Info = (); + type PostInfo = (); + + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + unimplemented!("The Call is not expected to be dispatched.") + } +} + +// We use this to get the account on Rococo (target) which is derived from Westend's (source) +// account. +pub fn derive_account_from_westend_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::WESTEND_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +/// Name of the `RococoFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_best_finalized"; +/// Name of the `RococoFinalityApi::is_known_header` runtime method. +pub const IS_KNOWN_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_is_known_header"; + +/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToRococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToRococoOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_ROCOCO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToRococoOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToRococoOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_generated_nonce"; +/// Name of the `ToRococoOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_received_nonce"; + +/// Name of the `FromRococoInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_received_nonce"; +/// Name of the `FromRococoInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromRococoInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_ROCOCO_UNREWARDED_RELAYERS_STATE: &str = "FromRococoInboundLaneApi_unrewarded_relayers_state"; + +sp_api::decl_runtime_apis! { + /// API for querying information about the finalized Rococo headers. + /// + /// This API is implemented by runtimes that are bridging with the Rococo chain, not the + /// Rococo runtime itself. + pub trait RococoFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Rococo chain. + /// + /// This API is implemented by runtimes that are sending messages to Rococo chain, not the + /// Rococo runtime itself. + pub trait ToRococoOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Rococo from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Rococo chain. + /// + /// This API is implemented by runtimes that are receiving messages from Rococo chain, not the + /// Rococo runtime itself. + pub trait FromRococoInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} diff --git a/polkadot/bridges/primitives/chain-westend/Cargo.toml b/polkadot/bridges/primitives/chain-westend/Cargo.toml new file mode 100644 index 0000000000..d5fda1ccef --- /dev/null +++ b/polkadot/bridges/primitives/chain-westend/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "bp-westend" +description = "Primitives of Westend runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } + +# Bridge Dependencies +bp-header-chain = { path = "../header-chain", default-features = false } +bp-messages = { path = "../messages", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-header-chain/std", + "bp-messages/std", + "bp-polkadot-core/std", + "bp-runtime/std", + "parity-scale-codec/std", + "sp-api/std", + "sp-runtime/std", + "sp-std/std", + "sp-version/std", +] diff --git a/polkadot/bridges/primitives/chain-westend/src/lib.rs b/polkadot/bridges/primitives/chain-westend/src/lib.rs new file mode 100644 index 0000000000..db97364ef4 --- /dev/null +++ b/polkadot/bridges/primitives/chain-westend/src/lib.rs @@ -0,0 +1,179 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_with_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_runtime::Chain; +use sp_std::prelude::*; +use sp_version::RuntimeVersion; + +pub use bp_polkadot_core::*; + +/// Westend Chain +pub type Westend = PolkadotLike; + +pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; + +// NOTE: This needs to be kept up to date with the Westend runtime found in the Polkadot repo. +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("westend"), + impl_name: sp_version::create_runtime_str!("parity-westend"), + authoring_version: 2, + spec_version: 51, + impl_version: 0, + apis: sp_version::create_apis_vec![[]], + transaction_version: 5, +}; + +/// Westend Runtime `Call` enum. +/// +/// The enum represents a subset of possible `Call`s we can send to Westend chain. +/// Ideally this code would be auto-generated from Metadata, because we want to +/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. +/// +/// All entries here (like pretty much in the entire file) must be kept in sync with Westend +/// `construct_runtime`, so that we maintain SCALE-compatibility. +/// +/// See: https://github.com/paritytech/polkadot/blob/master/runtime/westend/src/lib.rs +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] +pub enum Call { + /// Rococo bridge pallet. + #[codec(index = 40)] + BridgeGrandpaRococo(BridgeGrandpaRococoCall), +} + +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum BridgeGrandpaRococoCall { + #[codec(index = 0)] + submit_finality_proof( + ::Header, + bp_header_chain::justification::GrandpaJustification<::Header>, + ), + #[codec(index = 1)] + initialize(bp_header_chain::InitializationData<::Header>), +} + +impl sp_runtime::traits::Dispatchable for Call { + type Origin = (); + type Config = (); + type Info = (); + type PostInfo = (); + + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + unimplemented!("The Call is not expected to be dispatched.") + } +} + +// We use this to get the account on Westend (target) which is derived from Rococo's (source) +// account. +pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +/// Name of the `WestendFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_best_finalized"; +/// Name of the `WestendFinalityApi::is_known_header` runtime method. +pub const IS_KNOWN_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_is_known_header"; + +/// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_WESTEND_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToWestendOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToWestendOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_WESTEND_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToWestendOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToWestendOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_generated_nonce"; +/// Name of the `ToWestendOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_received_nonce"; + +/// Name of the `FromWestendInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_received_nonce"; +/// Name of the `FromWestendInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_WESTEND_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromWestendInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_WESTEND_UNREWARDED_RELAYERS_STATE: &str = "FromWestendInboundLaneApi_unrewarded_relayers_state"; + +/// The target length of a session (how often authorities change) on Westend measured in of number of +/// blocks. +/// +/// Note that since this is a target sessions may change before/after this time depending on network +/// conditions. +pub const SESSION_LENGTH: BlockNumber = 10 * time_units::MINUTES; + +sp_api::decl_runtime_apis! { + /// API for querying information about the finalized Westend headers. + /// + /// This API is implemented by runtimes that are bridging with the Westend chain, not the + /// Westend runtime itself. + pub trait WestendFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Westend chain. + /// + /// This API is implemented by runtimes that are sending messages to Westend chain, not the + /// Westend runtime itself. + pub trait ToWestendOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Westend from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Westend chain. + /// + /// This API is implemented by runtimes that are receiving messages from Westend chain, not the + /// Westend runtime itself. + pub trait FromWestendInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} diff --git a/polkadot/bridges/primitives/currency-exchange/src/lib.rs b/polkadot/bridges/primitives/currency-exchange/src/lib.rs index 131daf66ed..88695dbb5e 100644 --- a/polkadot/bridges/primitives/currency-exchange/src/lib.rs +++ b/polkadot/bridges/primitives/currency-exchange/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/primitives/ethereum-poa/src/lib.rs b/polkadot/bridges/primitives/ethereum-poa/src/lib.rs index dc65ac432b..57c539f2e2 100644 --- a/polkadot/bridges/primitives/ethereum-poa/src/lib.rs +++ b/polkadot/bridges/primitives/ethereum-poa/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs b/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs index 21c6f3f860..a4e076f220 100644 --- a/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs +++ b/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright 2020-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/primitives/header-chain/src/justification.rs b/polkadot/bridges/primitives/header-chain/src/justification.rs index fef9aedac9..139b430324 100644 --- a/polkadot/bridges/primitives/header-chain/src/justification.rs +++ b/polkadot/bridges/primitives/header-chain/src/justification.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Module for checking GRANDPA Finality Proofs. +//! Pallet for checking GRANDPA Finality Proofs. //! //! Adapted copy of substrate/client/finality-grandpa/src/justification.rs. If origin //! will ever be moved to the sp_finality_grandpa, we should reuse that implementation. @@ -25,7 +25,7 @@ use frame_support::RuntimeDebug; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId}; use sp_runtime::traits::Header as HeaderT; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; -use sp_std::prelude::Vec; +use sp_std::prelude::*; /// Justification verification error. #[derive(RuntimeDebug, PartialEq)] @@ -57,16 +57,12 @@ pub fn decode_justification_target( pub fn verify_justification( finalized_target: (Header::Hash, Header::Number), authorities_set_id: SetId, - authorities_set: VoterSet, - raw_justification: &[u8], + authorities_set: &VoterSet, + justification: &GrandpaJustification
, ) -> Result<(), Error> where Header::Number: finality_grandpa::BlockNumberOps, { - // Decode justification first - let justification = - GrandpaJustification::
::decode(&mut &*raw_justification).map_err(|_| Error::JustificationDecode)?; - // Ensure that it is justification for the expected header if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { return Err(Error::InvalidJustificationTarget); @@ -76,7 +72,7 @@ where // signatures are valid. We'll check the validity of the signatures later since they're more // resource intensive to verify. let ancestry_chain = AncestryChain::new(&justification.votes_ancestries); - match finality_grandpa::validate_commit(&justification.commit, &authorities_set, &ancestry_chain) { + match finality_grandpa::validate_commit(&justification.commit, authorities_set, &ancestry_chain) { Ok(ref result) if result.ghost().is_some() => {} _ => return Err(Error::InvalidJustificationCommit), } @@ -130,7 +126,7 @@ where /// /// This particular proof is used to prove that headers on a bridged chain /// (so not our chain) have been finalized correctly. -#[derive(Encode, Decode, RuntimeDebug)] +#[derive(Encode, Decode, RuntimeDebug, Clone, PartialEq, Eq)] pub struct GrandpaJustification { /// The round (voting period) this justification is valid for. pub round: u64, @@ -140,6 +136,12 @@ pub struct GrandpaJustification { pub votes_ancestries: Vec
, } +impl crate::FinalityProof for GrandpaJustification { + fn target_header_number(&self) -> H::Number { + self.commit.target_number + } +} + /// A utility trait implementing `finality_grandpa::Chain` using a given set of headers. #[derive(RuntimeDebug)] struct AncestryChain { diff --git a/polkadot/bridges/primitives/header-chain/src/lib.rs b/polkadot/bridges/primitives/header-chain/src/lib.rs index 1663717646..adac6eb268 100644 --- a/polkadot/bridges/primitives/header-chain/src/lib.rs +++ b/polkadot/bridges/primitives/header-chain/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -26,10 +26,9 @@ use core::default::Default; use core::fmt::Debug; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use sp_finality_grandpa::{AuthorityList, SetId}; -use sp_runtime::traits::Header as HeaderT; +use sp_finality_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::RuntimeDebug; -use sp_std::vec::Vec; +use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT}; pub mod justification; @@ -56,6 +55,22 @@ impl AuthoritySet { } } +/// Data required for initializing the bridge pallet. +/// +/// The bridge needs to know where to start its sync from, and this provides that initial context. +#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct InitializationData { + /// The header from which we should start syncing. + pub header: H, + /// The initial authorities of the pallet. + pub authority_list: AuthorityList, + /// The ID of the initial authority set. + pub set_id: SetId, + /// Should the pallet block transaction immediately after initialization. + pub is_halted: bool, +} + /// base trait for verifying transaction inclusion proofs. pub trait InclusionProofVerifier { /// Transaction type. @@ -78,7 +93,7 @@ pub trait HeaderChain { fn authority_set() -> AuthoritySet; /// Write a header finalized by GRANDPA to the underlying pallet storage. - fn append_header(header: H); + fn append_header(header: H) -> Result<(), E>; } impl HeaderChain for () { @@ -90,115 +105,29 @@ impl HeaderChain for () { AuthoritySet::default() } - fn append_header(_header: H) {} -} - -/// A trait for checking if a given child header is a direct descendant of an ancestor. -pub trait AncestryChecker { - /// Is the child header a descendant of the ancestor header? - fn are_ancestors(ancestor: &H, child: &H, proof: &P) -> bool; -} - -impl AncestryChecker for () { - fn are_ancestors(_ancestor: &H, _child: &H, _proof: &P) -> bool { - true + fn append_header(_header: H) -> Result<(), E> { + Ok(()) } } -/// A simple ancestry checker which verifies ancestry by walking every header between `child` and -/// `ancestor`. -pub struct LinearAncestryChecker; - -impl AncestryChecker> for LinearAncestryChecker { - fn are_ancestors(ancestor: &H, child: &H, proof: &Vec) -> bool { - // You can't be your own parent - if proof.len() < 2 { - return false; - } - - // Let's make sure that the given headers are actually in the proof - match proof.first() { - Some(first) if first == ancestor => {} - _ => return false, - } - - match proof.last() { - Some(last) if last == child => {} - _ => return false, - } - - // Now we actually check the proof - for i in 1..proof.len() { - if &proof[i - 1].hash() != proof[i].parent_hash() { - return false; - } - } - - true - } +/// Abstract finality proof that is justifying block finality. +pub trait FinalityProof: Clone + Send + Sync + Debug { + /// Return number of header that this proof is generated for. + fn target_header_number(&self) -> Number; } -#[cfg(test)] -mod tests { - use super::*; - use bp_test_utils::test_header; - use sp_runtime::testing::Header; +/// Find header digest that schedules next GRANDPA authorities set. +pub fn find_grandpa_authorities_scheduled_change( + header: &H, +) -> Option> { + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - #[test] - fn can_verify_ancestry_correctly() { - let ancestor: Header = test_header(1); - let header2: Header = test_header(2); - let header3: Header = test_header(3); - let child: Header = test_header(4); + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + }; - let ancestry_proof = vec![ancestor.clone(), header2, header3, child.clone()]; - - assert!(LinearAncestryChecker::are_ancestors(&ancestor, &child, &ancestry_proof)); - } - - #[test] - fn does_not_verify_invalid_proof() { - let ancestor: Header = test_header(1); - let header2: Header = test_header(2); - let header3: Header = test_header(3); - let child: Header = test_header(4); - - let ancestry_proof = vec![ancestor.clone(), header3, header2, child.clone()]; - - let invalid = !LinearAncestryChecker::are_ancestors(&ancestor, &child, &ancestry_proof); - assert!(invalid); - } - - #[test] - fn header_is_not_allowed_to_be_its_own_ancestor() { - let ancestor: Header = test_header(1); - let child: Header = ancestor.clone(); - let ancestry_proof = vec![ancestor.clone()]; - - let invalid = !LinearAncestryChecker::are_ancestors(&ancestor, &child, &ancestry_proof); - assert!(invalid); - } - - #[test] - fn proof_is_considered_invalid_if_child_and_ancestor_do_not_match() { - let ancestor: Header = test_header(1); - let header2: Header = test_header(2); - let header3: Header = test_header(3); - let child: Header = test_header(4); - - let ancestry_proof = vec![ancestor, header3.clone(), header2.clone(), child]; - - let invalid = !LinearAncestryChecker::are_ancestors(&header2, &header3, &ancestry_proof); - assert!(invalid); - } - - #[test] - fn empty_proof_is_invalid() { - let ancestor: Header = test_header(1); - let child: Header = ancestor.clone(); - let ancestry_proof = vec![]; - - let invalid = !LinearAncestryChecker::are_ancestors(&ancestor, &child, &ancestry_proof); - assert!(invalid); - } + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } diff --git a/polkadot/bridges/primitives/header-chain/tests/justification.rs b/polkadot/bridges/primitives/header-chain/tests/justification.rs index 81bd83b1ad..1ce739e453 100644 --- a/polkadot/bridges/primitives/header-chain/tests/justification.rs +++ b/polkadot/bridges/primitives/header-chain/tests/justification.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright 2020-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,26 +16,91 @@ //! Tests for Grandpa Justification code. -use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification}; +use bp_header_chain::justification::{verify_justification, Error}; use bp_test_utils::*; -use codec::Encode; type TestHeader = sp_runtime::testing::Header; -fn make_justification_for_header_1() -> GrandpaJustification { - make_justification_for_header( - &test_header(1), - TEST_GRANDPA_ROUND, - TEST_GRANDPA_SET_ID, - &authority_list(), - ) +#[test] +fn valid_justification_accepted() { + let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)]; + let params = JustificationGeneratorParams { + header: test_header(1), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: authorities.clone(), + votes: 7, + forks: 3, + }; + + let justification = make_justification_for_header::(params.clone()); + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set(), + &justification, + ), + Ok(()), + ); + + assert_eq!(justification.commit.precommits.len(), authorities.len()); + assert_eq!(justification.votes_ancestries.len(), params.votes as usize); } #[test] -fn justification_with_invalid_encoding_rejected() { +fn valid_justification_accepted_with_single_fork() { + let params = JustificationGeneratorParams { + header: test_header(1), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)], + votes: 5, + forks: 1, + }; + assert_eq!( - verify_justification::(header_id::(1), TEST_GRANDPA_SET_ID, voter_set(), &[],), - Err(Error::JustificationDecode), + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set(), + &make_justification_for_header::(params) + ), + Ok(()), + ); +} + +#[test] +fn valid_justification_accepted_with_arbitrary_number_of_authorities() { + use finality_grandpa::voter_set::VoterSet; + use sp_finality_grandpa::AuthorityId; + + let n = 15; + let authorities = accounts(n).iter().map(|k| (*k, 1)).collect::>(); + + let params = JustificationGeneratorParams { + header: test_header(1), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: authorities.clone(), + votes: n.into(), + forks: n.into(), + }; + + let authorities = authorities + .iter() + .map(|(id, w)| (AuthorityId::from(*id), *w)) + .collect::>(); + let voter_set = VoterSet::new(authorities).unwrap(); + + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set, + &make_justification_for_header::(params) + ), + Ok(()), ); } @@ -45,8 +110,8 @@ fn justification_with_invalid_target_rejected() { verify_justification::( header_id::(2), TEST_GRANDPA_SET_ID, - voter_set(), - &make_justification_for_header_1().encode(), + &voter_set(), + &make_default_justification::(&test_header(1)), ), Err(Error::InvalidJustificationTarget), ); @@ -54,15 +119,15 @@ fn justification_with_invalid_target_rejected() { #[test] fn justification_with_invalid_commit_rejected() { - let mut justification = make_justification_for_header_1(); + let mut justification = make_default_justification::(&test_header(1)); justification.commit.precommits.clear(); assert_eq!( verify_justification::( header_id::(1), TEST_GRANDPA_SET_ID, - voter_set(), - &justification.encode(), + &voter_set(), + &justification, ), Err(Error::InvalidJustificationCommit), ); @@ -70,15 +135,15 @@ fn justification_with_invalid_commit_rejected() { #[test] fn justification_with_invalid_authority_signature_rejected() { - let mut justification = make_justification_for_header_1(); + let mut justification = make_default_justification::(&test_header(1)); justification.commit.precommits[0].signature = Default::default(); assert_eq!( verify_justification::( header_id::(1), TEST_GRANDPA_SET_ID, - voter_set(), - &justification.encode(), + &voter_set(), + &justification, ), Err(Error::InvalidAuthoritySignature), ); @@ -86,29 +151,41 @@ fn justification_with_invalid_authority_signature_rejected() { #[test] fn justification_with_invalid_precommit_ancestry() { - let mut justification = make_justification_for_header_1(); + let mut justification = make_default_justification::(&test_header(1)); justification.votes_ancestries.push(test_header(10)); assert_eq!( verify_justification::( header_id::(1), TEST_GRANDPA_SET_ID, - voter_set(), - &justification.encode(), + &voter_set(), + &justification, ), Err(Error::InvalidPrecommitAncestries), ); } #[test] -fn valid_justification_accepted() { +fn justification_is_invalid_if_we_dont_meet_threshold() { + // Need at least three authorities to sign off or else the voter set threshold can't be reached + let authorities = vec![(ALICE, 1), (BOB, 1)]; + + let params = JustificationGeneratorParams { + header: test_header(1), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: authorities.clone(), + votes: 2 * authorities.len() as u32, + forks: 2, + }; + assert_eq!( verify_justification::( header_id::(1), TEST_GRANDPA_SET_ID, - voter_set(), - &make_justification_for_header_1().encode(), + &voter_set(), + &make_justification_for_header::(params) ), - Ok(()), + Err(Error::InvalidJustificationCommit), ); } diff --git a/polkadot/bridges/primitives/kusama/src/lib.rs b/polkadot/bridges/primitives/kusama/src/lib.rs deleted file mode 100644 index 9ec032dbd5..0000000000 --- a/polkadot/bridges/primitives/kusama/src/lib.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Runtime-generated DecodeLimit::decode_all_with_depth_limit -#![allow(clippy::unnecessary_mut_passed)] - -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::Chain; -use frame_support::{weights::Weight, RuntimeDebug}; -use sp_core::Hasher as HasherT; -use sp_runtime::{ - generic, - traits::{BlakeTwo256, IdentifyAccount, Verify}, - MultiSignature, OpaqueExtrinsic as UncheckedExtrinsic, -}; -use sp_std::prelude::*; - -// TODO: may need to be updated after https://github.com/paritytech/parity-bridges-common/issues/78 -/// Maximal number of messages in single delivery transaction. -pub const MAX_MESSAGES_IN_DELIVERY_TRANSACTION: MessageNonce = 128; - -/// Maximal number of unrewarded relayer entries at inbound lane. -pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128; - -// TODO: should be selected keeping in mind: -// finality delay on both chains + reward payout cost + messages throughput. -/// Maximal number of unconfirmed messages at inbound lane. -pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 8192; - -/// Block number type used in Kusama. -pub type BlockNumber = u32; - -/// Hash type used in Kusama. -pub type Hash = ::Out; - -/// The type of an object that can produce hashes on Kusama. -pub type Hasher = BlakeTwo256; - -/// The header type used by Kusama. -pub type Header = generic::Header; - -/// Signature type used by Kusama. -pub type Signature = MultiSignature; - -/// Public key of account on Kusama chain. -pub type AccountPublic = ::Signer; - -/// Id of account on Kusama chain. -pub type AccountId = ::AccountId; - -/// Index of a transaction on the Kusama chain. -pub type Nonce = u32; - -/// Block type of Kusama. -pub type Block = generic::Block; - -/// Kusama block signed with a Justification. -pub type SignedBlock = generic::SignedBlock; - -/// The balance of an account on Polkadot. -pub type Balance = u128; - -/// Kusama chain. -#[derive(RuntimeDebug)] -pub struct Kusama; - -impl Chain for Kusama { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; -} - -/// Convert a 256-bit hash into an AccountId. -pub struct AccountIdConverter; - -impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: sp_core::H256) -> AccountId { - hash.to_fixed_bytes().into() - } -} - -/// Name of the `KusamaHeaderApi::best_blocks` runtime method. -pub const BEST_KUSAMA_BLOCKS_METHOD: &str = "KusamaHeaderApi_best_blocks"; -/// Name of the `KusamaHeaderApi::finalized_block` runtime method. -pub const FINALIZED_KUSAMA_BLOCK_METHOD: &str = "KusamaHeaderApi_finalized_block"; -/// Name of the `KusamaHeaderApi::is_known_block` runtime method. -pub const IS_KNOWN_KUSAMA_BLOCK_METHOD: &str = "KusamaHeaderApi_is_known_block"; -/// Name of the `KusamaHeaderApi::incomplete_headers` runtime method. -pub const INCOMPLETE_KUSAMA_HEADERS_METHOD: &str = "KusamaHeaderApi_incomplete_headers"; - -sp_api::decl_runtime_apis! { - /// API for querying information about Kusama headers from the Bridge Pallet instance. - /// - /// This API is implemented by runtimes that are bridging with Kusama chain, not the - /// Kusama runtime itself. - pub trait KusamaHeaderApi { - /// Returns number and hash of the best blocks known to the bridge module. - /// - /// Will return multiple headers if there are many headers at the same "best" height. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_blocks() -> Vec<(BlockNumber, Hash)>; - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (BlockNumber, Hash); - /// Returns numbers and hashes of headers that require finality proofs. - /// - /// An empty response means that there are no headers which currently require a - /// finality proof. - fn incomplete_headers() -> Vec<(BlockNumber, Hash)>; - /// Returns true if the header is known to the runtime. - fn is_known_block(hash: Hash) -> bool; - /// Returns true if the header is considered finalized by the runtime. - fn is_finalized_block(hash: Hash) -> bool; - } - - /// Outbound message lane API for messages that are sent to Kusama chain. - /// - /// This API is implemented by runtimes that are sending messages to Kusama chain, not the - /// Kusama runtime itself. - pub trait ToKusamaOutboundLaneApi { - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; - /// Returns nonce of the latest message, received by bridged chain. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Returns nonce of the latest message, generated by given lane. - fn latest_generated_nonce(lane: LaneId) -> MessageNonce; - } - - /// Inbound message lane API for messages sent by Kusama chain. - /// - /// This API is implemented by runtimes that are receiving messages from Kusama chain, not the - /// Kusama runtime itself. - pub trait FromKusamaInboundLaneApi { - /// Returns nonce of the latest message, received by given lane. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. - fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; - } -} diff --git a/polkadot/bridges/primitives/message-dispatch/src/lib.rs b/polkadot/bridges/primitives/message-dispatch/src/lib.rs index 1932d8cb0b..3b83e38517 100644 --- a/polkadot/bridges/primitives/message-dispatch/src/lib.rs +++ b/polkadot/bridges/primitives/message-dispatch/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/primitives/message-lane/Cargo.toml b/polkadot/bridges/primitives/messages/Cargo.toml similarity index 91% rename from polkadot/bridges/primitives/message-lane/Cargo.toml rename to polkadot/bridges/primitives/messages/Cargo.toml index cbddcb1614..9cb037a34c 100644 --- a/polkadot/bridges/primitives/message-lane/Cargo.toml +++ b/polkadot/bridges/primitives/messages/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "bp-message-lane" -description = "Primitives of message lane module." +name = "bp-messages" +description = "Primitives of messages module." version = "0.1.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/polkadot/bridges/primitives/message-lane/src/lib.rs b/polkadot/bridges/primitives/messages/src/lib.rs similarity index 97% rename from polkadot/bridges/primitives/message-lane/src/lib.rs rename to polkadot/bridges/primitives/messages/src/lib.rs index de2dbd9ae6..c3ffce8baa 100644 --- a/polkadot/bridges/primitives/message-lane/src/lib.rs +++ b/polkadot/bridges/primitives/messages/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Primitives of message lane module. +//! Primitives of messages module. #![cfg_attr(not(feature = "std"), no_std)] // RuntimeApi generated functions @@ -29,10 +29,10 @@ use sp_std::{collections::vec_deque::VecDeque, prelude::*}; pub mod source_chain; pub mod target_chain; -// Weight is reexported to avoid additional frame-support dependencies in message-lane related crates. +// Weight is reexported to avoid additional frame-support dependencies in related crates. pub use frame_support::weights::Weight; -/// Message lane pallet parameter. +/// Messages pallet parameter. pub trait Parameter: frame_support::Parameter { /// Save parameter value in the runtime storage. fn save(&self); diff --git a/polkadot/bridges/primitives/message-lane/src/source_chain.rs b/polkadot/bridges/primitives/messages/src/source_chain.rs similarity index 98% rename from polkadot/bridges/primitives/message-lane/src/source_chain.rs rename to polkadot/bridges/primitives/messages/src/source_chain.rs index d0dc36bb69..1d313634bc 100644 --- a/polkadot/bridges/primitives/message-lane/src/source_chain.rs +++ b/polkadot/bridges/primitives/messages/src/source_chain.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Primitives of message lane module, that are used on the source chain. +//! Primitives of messages module, that are used on the source chain. use crate::{InboundLaneData, LaneId, MessageNonce, OutboundLaneData}; diff --git a/polkadot/bridges/primitives/message-lane/src/target_chain.rs b/polkadot/bridges/primitives/messages/src/target_chain.rs similarity index 97% rename from polkadot/bridges/primitives/message-lane/src/target_chain.rs rename to polkadot/bridges/primitives/messages/src/target_chain.rs index 765ce64f63..676e919bc6 100644 --- a/polkadot/bridges/primitives/message-lane/src/target_chain.rs +++ b/polkadot/bridges/primitives/messages/src/target_chain.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Primitives of message lane module, that are used on the target chain. +//! Primitives of messages module, that are used on the target chain. use crate::{LaneId, Message, MessageData, MessageKey, OutboundLaneData}; diff --git a/polkadot/bridges/primitives/polkadot/Cargo.toml b/polkadot/bridges/primitives/polkadot-core/Cargo.toml similarity index 71% rename from polkadot/bridges/primitives/polkadot/Cargo.toml rename to polkadot/bridges/primitives/polkadot-core/Cargo.toml index f7c9b9717d..995f948e5d 100644 --- a/polkadot/bridges/primitives/polkadot/Cargo.toml +++ b/polkadot/bridges/primitives/polkadot-core/Cargo.toml @@ -1,16 +1,17 @@ [package] -name = "bp-polkadot" -description = "Primitives of Polkadot runtime." +name = "bp-polkadot-core" +description = "Primitives of Polkadot-like runtime." version = "0.1.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } # Bridge Dependencies -bp-message-lane = { path = "../message-lane", default-features = false } +bp-messages = { path = "../messages", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies @@ -21,16 +22,22 @@ sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[dev-dependencies] +hex = "0.4" [features] default = ["std"] std = [ - "bp-message-lane/std", + "bp-messages/std", "bp-runtime/std", "frame-support/std", "frame-system/std", + "parity-scale-codec/std", "sp-api/std", "sp-core/std", "sp-runtime/std", "sp-std/std", + "sp-version/std", ] diff --git a/polkadot/bridges/primitives/polkadot-core/src/lib.rs b/polkadot/bridges/primitives/polkadot-core/src/lib.rs new file mode 100644 index 0000000000..c9858c0820 --- /dev/null +++ b/polkadot/bridges/primitives/polkadot-core/src/lib.rs @@ -0,0 +1,350 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +use bp_messages::MessageNonce; +use bp_runtime::Chain; +use frame_support::{ + dispatch::Dispatchable, + parameter_types, + weights::{ + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, WEIGHT_PER_SECOND}, + DispatchClass, Weight, + }, + Blake2_128Concat, RuntimeDebug, StorageHasher, Twox128, +}; +use frame_system::limits; +use parity_scale_codec::Compact; +use sp_core::Hasher as HasherT; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiAddress, MultiSignature, OpaqueExtrinsic, Perbill, +}; +use sp_std::prelude::Vec; + +// Re-export's to avoid extra substrate dependencies in chain-specific crates. +pub use frame_support::Parameter; +pub use sp_runtime::traits::Convert; + +/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at +/// Polkadot-like chain. This mostly depends on number of entries in the storage trie. +/// Some reserve is reserved to account future chain growth. +/// +/// To compute this value, we've synced Kusama chain blocks [0; 6545733] to see if there were +/// any significant changes of the storage proof size (NO): +/// +/// - at block 3072 the storage proof size overhead was 579 bytes; +/// - at block 2479616 it was 578 bytes; +/// - at block 4118528 it was 711 bytes; +/// - at block 6540800 it was 779 bytes. +/// +/// The number of storage entries at the block 6546170 was 351207 and number of trie nodes in +/// the storage proof was 5 (log(16, 351207) ~ 4.6). +/// +/// So the assumption is that the storage proof size overhead won't be larger than 1024 in the +/// nearest future. If it'll ever break this barrier, then we'll need to update this constant +/// at next runtime upgrade. +pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; + +/// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. +/// +/// All polkadot-like chains are using same crypto. +pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; + +/// All Polkadot-like chains allow normal extrinsics to fill block up to 75%. +/// +/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + +/// All Polkadot-like chains allow 2 seconds of compute with a 6 second average block time. +/// +/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. +pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; + +/// All Polkadot-like chains assume that an on-initialize consumes 1% of the weight on average, +/// hence a single extrinsic will not be allowed to consume more than `AvailableBlockRatio - 1%`. +/// +/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. +pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); + +parameter_types! { + /// All Polkadot-like chains have maximal block size set to 5MB. + /// + /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. + pub BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio( + 5 * 1024 * 1024, + NORMAL_DISPATCH_RATIO, + ); + /// All Polkadot-like chains have the same block weights. + /// + /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have an extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT, + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); +} + +/// Get the maximum weight (compute time) that a Normal extrinsic on the Polkadot-like chain can use. +pub fn max_extrinsic_weight() -> Weight { + BlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic + .unwrap_or(Weight::MAX) +} + +/// Get the maximum length in bytes that a Normal extrinsic on the Polkadot-like chain requires. +pub fn max_extrinsic_size() -> u32 { + *BlockLength::get().max.get(DispatchClass::Normal) +} + +// TODO [#78] may need to be updated after https://github.com/paritytech/parity-bridges-common/issues/78 +/// Maximal number of messages in single delivery transaction. +pub const MAX_MESSAGES_IN_DELIVERY_TRANSACTION: MessageNonce = 128; + +/// Maximal number of unrewarded relayer entries at inbound lane. +pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128; + +// TODO [#438] should be selected keeping in mind: +// finality delay on both chains + reward payout cost + messages throughput. +/// Maximal number of unconfirmed messages at inbound lane. +pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 8192; + +/// Re-export `time_units` to make usage easier. +pub use time_units::*; + +/// Human readable time units defined in terms of number of blocks. +pub mod time_units { + use super::BlockNumber; + + pub const MILLISECS_PER_BLOCK: u64 = 6000; + pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + + pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); + pub const HOURS: BlockNumber = MINUTES * 60; + pub const DAYS: BlockNumber = HOURS * 24; +} + +/// Block number type used in Polkadot-like chains. +pub type BlockNumber = u32; + +/// Hash type used in Polkadot-like chains. +pub type Hash = ::Out; + +/// Account Index (a.k.a. nonce). +pub type Index = u32; + +/// Hashing type. +pub type Hashing = BlakeTwo256; + +/// The type of an object that can produce hashes on Polkadot-like chains. +pub type Hasher = BlakeTwo256; + +/// The header type used by Polkadot-like chains. +pub type Header = generic::Header; + +/// Signature type used by Polkadot-like chains. +pub type Signature = MultiSignature; + +/// Public key of account on Polkadot-like chains. +pub type AccountPublic = ::Signer; + +/// Id of account on Polkadot-like chains. +pub type AccountId = ::AccountId; + +/// Index of a transaction on the Polkadot-like chains. +pub type Nonce = u32; + +/// Block type of Polkadot-like chains. +pub type Block = generic::Block; + +/// Polkadot-like block signed with a Justification. +pub type SignedBlock = generic::SignedBlock; + +/// The balance of an account on Polkadot-like chain. +pub type Balance = u128; + +/// Unchecked Extrinsic type. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic, Call, Signature, SignedExtensions>; + +/// A type of the data encoded as part of the transaction. +pub type SignedExtra = ( + (), + (), + (), + sp_runtime::generic::Era, + Compact, + (), + Compact, +); + +/// Parameters which are part of the payload used to produce transaction signature, +/// but don't end up in the transaction itself (i.e. inherent part of the runtime). +pub type AdditionalSigned = (u32, u32, Hash, Hash, (), (), ()); + +/// A simplified version of signed extensions meant for producing signed transactions +/// and signed payload in the client code. +#[derive(PartialEq, Eq, Clone, RuntimeDebug)] +pub struct SignedExtensions { + encode_payload: SignedExtra, + additional_signed: AdditionalSigned, + _data: sp_std::marker::PhantomData, +} + +impl parity_scale_codec::Encode for SignedExtensions { + fn using_encoded R>(&self, f: F) -> R { + self.encode_payload.using_encoded(f) + } +} + +impl parity_scale_codec::Decode for SignedExtensions { + fn decode(_input: &mut I) -> Result { + unimplemented!("SignedExtensions are never meant to be decoded, they are only used to create transaction"); + } +} + +impl SignedExtensions { + pub fn new( + version: sp_version::RuntimeVersion, + era: sp_runtime::generic::Era, + genesis_hash: Hash, + nonce: Nonce, + tip: Balance, + ) -> Self { + Self { + encode_payload: ( + (), // spec version + (), // tx version + (), // genesis + era, // era + nonce.into(), // nonce (compact encoding) + (), // Check weight + tip.into(), // transaction payment / tip (compact encoding) + ), + additional_signed: ( + version.spec_version, + version.transaction_version, + genesis_hash, + genesis_hash, + (), + (), + (), + ), + _data: Default::default(), + } + } +} + +impl sp_runtime::traits::SignedExtension for SignedExtensions +where + Call: parity_scale_codec::Codec + sp_std::fmt::Debug + Sync + Send + Clone + Eq + PartialEq, + Call: Dispatchable, +{ + const IDENTIFIER: &'static str = "Not needed."; + + type AccountId = AccountId; + type Call = Call; + type AdditionalSigned = AdditionalSigned; + type Pre = (); + + fn additional_signed(&self) -> Result { + Ok(self.additional_signed) + } +} + +/// Polkadot-like chain. +#[derive(RuntimeDebug)] +pub struct PolkadotLike; + +impl Chain for PolkadotLike { + type BlockNumber = BlockNumber; + type Hash = Hash; + type Hasher = Hasher; + type Header = Header; +} + +/// Convert a 256-bit hash into an AccountId. +pub struct AccountIdConverter; + +impl Convert for AccountIdConverter { + fn convert(hash: sp_core::H256) -> AccountId { + hash.to_fixed_bytes().into() + } +} + +/// Return a storage key for account data. +/// +/// This is based on FRAME storage-generation code from Substrate: +/// https://github.com/paritytech/substrate/blob/c939ceba381b6313462d47334f775e128ea4e95d/frame/support/src/storage/generator/map.rs#L74 +/// The equivalent command to invoke in case full `Runtime` is known is this: +/// `let key = frame_system::Account::::storage_map_final_key(&account_id);` +pub fn account_info_storage_key(id: &AccountId) -> Vec { + let module_prefix_hashed = Twox128::hash(b"System"); + let storage_prefix_hashed = Twox128::hash(b"Account"); + let key_hashed = parity_scale_codec::Encode::using_encoded(id, Blake2_128Concat::hash); + + let mut final_key = Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len()); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&key_hashed); + + final_key +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::codec::Encode; + + #[test] + fn maximal_encoded_account_id_size_is_correct() { + let actual_size = AccountId::default().encode().len(); + assert!( + actual_size <= MAXIMAL_ENCODED_ACCOUNT_ID_SIZE as usize, + "Actual size of encoded account id for Polkadot-like chains ({}) is larger than expected {}", + actual_size, + MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + ); + } + + #[test] + fn should_generate_storage_key() { + let acc = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, + ] + .into(); + let key = account_info_storage_key(&acc); + assert_eq!(hex::encode(key), "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da92dccd599abfe1920a1cff8a7358231430102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"); + } +} diff --git a/polkadot/bridges/primitives/polkadot/src/lib.rs b/polkadot/bridges/primitives/polkadot/src/lib.rs deleted file mode 100644 index a82dd5075b..0000000000 --- a/polkadot/bridges/primitives/polkadot/src/lib.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Runtime-generated DecodeLimit::decode_all_with_depth_limit -#![allow(clippy::unnecessary_mut_passed)] - -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::Chain; -use frame_support::{weights::Weight, RuntimeDebug}; -use sp_core::Hasher as HasherT; -use sp_runtime::{ - generic, - traits::{BlakeTwo256, IdentifyAccount, Verify}, - MultiSignature, OpaqueExtrinsic as UncheckedExtrinsic, -}; -use sp_std::prelude::*; - -// TODO: may need to be updated after https://github.com/paritytech/parity-bridges-common/issues/78 -/// Maximal number of messages in single delivery transaction. -pub const MAX_MESSAGES_IN_DELIVERY_TRANSACTION: MessageNonce = 128; - -/// Maximal number of unrewarded relayer entries at inbound lane. -pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128; - -// TODO: should be selected keeping in mind: -// finality delay on both chains + reward payout cost + messages throughput. -/// Maximal number of unconfirmed messages at inbound lane. -pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 8192; - -/// Block number type used in Polkadot. -pub type BlockNumber = u32; - -/// Hash type used in Polkadot. -pub type Hash = ::Out; - -/// The type of an object that can produce hashes on Polkadot. -pub type Hasher = BlakeTwo256; - -/// The header type used by Polkadot. -pub type Header = generic::Header; - -/// Signature type used by Polkadot. -pub type Signature = MultiSignature; - -/// Public key of account on Polkadot chain. -pub type AccountPublic = ::Signer; - -/// Id of account on Polkadot chain. -pub type AccountId = ::AccountId; - -/// Index of a transaction on the Polkadot chain. -pub type Nonce = u32; - -/// Block type of Polkadot. -pub type Block = generic::Block; - -/// Polkadot block signed with a Justification. -pub type SignedBlock = generic::SignedBlock; - -/// The balance of an account on Polkadot. -pub type Balance = u128; - -/// Polkadot chain. -#[derive(RuntimeDebug)] -pub struct Polkadot; - -impl Chain for Polkadot { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; -} - -/// Convert a 256-bit hash into an AccountId. -pub struct AccountIdConverter; - -impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: sp_core::H256) -> AccountId { - hash.to_fixed_bytes().into() - } -} - -/// Name of the `PolkadotHeaderApi::best_blocks` runtime method. -pub const BEST_POLKADOT_BLOCKS_METHOD: &str = "PolkadotHeaderApi_best_blocks"; -/// Name of the `PolkadotHeaderApi::finalized_block` runtime method. -pub const FINALIZED_POLKADOT_BLOCK_METHOD: &str = "PolkadotHeaderApi_finalized_block"; -/// Name of the `PolkadotHeaderApi::is_known_block` runtime method. -pub const IS_KNOWN_POLKADOT_BLOCK_METHOD: &str = "PolkadotHeaderApi_is_known_block"; -/// Name of the `PolkadotHeaderApi::incomplete_headers` runtime method. -pub const INCOMPLETE_POLKADOT_HEADERS_METHOD: &str = "PolkadotHeaderApi_incomplete_headers"; - -sp_api::decl_runtime_apis! { - /// API for querying information about Polkadot headers from the Bridge Pallet instance. - /// - /// This API is implemented by runtimes that are bridging with Polkadot chain, not the - /// Polkadot runtime itself. - pub trait PolkadotHeaderApi { - /// Returns number and hash of the best blocks known to the bridge module. - /// - /// Will return multiple headers if there are many headers at the same "best" height. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_blocks() -> Vec<(BlockNumber, Hash)>; - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (BlockNumber, Hash); - /// Returns numbers and hashes of headers that require finality proofs. - /// - /// An empty response means that there are no headers which currently require a - /// finality proof. - fn incomplete_headers() -> Vec<(BlockNumber, Hash)>; - /// Returns true if the header is known to the runtime. - fn is_known_block(hash: Hash) -> bool; - /// Returns true if the header is considered finalized by the runtime. - fn is_finalized_block(hash: Hash) -> bool; - } - - /// Outbound message lane API for messages that are sent to Polkadot chain. - /// - /// This API is implemented by runtimes that are sending messages to Polkadot chain, not the - /// Polkadot runtime itself. - pub trait ToPolkadotOutboundLaneApi { - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; - /// Returns nonce of the latest message, received by bridged chain. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Returns nonce of the latest message, generated by given lane. - fn latest_generated_nonce(lane: LaneId) -> MessageNonce; - } - - /// Inbound message lane API for messages sent by Polkadot chain. - /// - /// This API is implemented by runtimes that are receiving messages from Polkadot chain, not the - /// Polkadot runtime itself. - pub trait FromPolkadotInboundLaneApi { - /// Returns nonce of the latest message, received by given lane. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. - fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; - } -} diff --git a/polkadot/bridges/primitives/runtime/Cargo.toml b/polkadot/bridges/primitives/runtime/Cargo.toml index b8e511fa56..17fa96b2c9 100644 --- a/polkadot/bridges/primitives/runtime/Cargo.toml +++ b/polkadot/bridges/primitives/runtime/Cargo.toml @@ -8,6 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +hash-db = { version = "0.15.2", default-features = false } num-traits = { version = "0.2", default-features = false } # Substrate Dependencies @@ -16,16 +17,25 @@ frame-support = { git = "https://github.com/paritytech/substrate", branch = "mas sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } + [features] default = ["std"] std = [ "codec/std", "frame-support/std", + "hash-db/std", "num-traits/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", + "sp-state-machine/std", + "sp-trie/std", ] diff --git a/polkadot/bridges/primitives/runtime/src/chain.rs b/polkadot/bridges/primitives/runtime/src/chain.rs index 348b5bf1d2..cb19c6e726 100644 --- a/polkadot/bridges/primitives/runtime/src/chain.rs +++ b/polkadot/bridges/primitives/runtime/src/chain.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/primitives/runtime/src/lib.rs b/polkadot/bridges/primitives/runtime/src/lib.rs index 1afb1b1fd8..e7f990d283 100644 --- a/polkadot/bridges/primitives/runtime/src/lib.rs +++ b/polkadot/bridges/primitives/runtime/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -24,8 +24,13 @@ use sp_io::hashing::blake2_256; use sp_std::convert::TryFrom; pub use chain::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; +pub use storage_proof::{Error as StorageProofError, StorageProofChecker}; + +#[cfg(feature = "std")] +pub use storage_proof::craft_valid_storage_proof; mod chain; +mod storage_proof; /// Use this when something must be shared among all instances. pub const NO_INSTANCE_ID: InstanceId = [0, 0, 0, 0]; @@ -42,11 +47,14 @@ pub const POLKADOT_BRIDGE_INSTANCE: InstanceId = *b"pdot"; /// Bridge-with-Kusama instance id. pub const KUSAMA_BRIDGE_INSTANCE: InstanceId = *b"ksma"; -/// Call-dispatch module prefix. -pub const CALL_DISPATCH_MODULE_PREFIX: &[u8] = b"pallet-bridge/call-dispatch"; +/// Bridge-with-Rococo instance id. +pub const ROCOCO_BRIDGE_INSTANCE: InstanceId = *b"roco"; -/// Message-lane module prefix. -pub const MESSAGE_LANE_MODULE_PREFIX: &[u8] = b"pallet-bridge/message-lane"; +/// Bridge-with-Westend instance id. +pub const WESTEND_BRIDGE_INSTANCE: InstanceId = *b"wend"; + +/// Call-dispatch module prefix. +pub const CALL_DISPATCH_MODULE_PREFIX: &[u8] = b"pallet-bridge/dispatch"; /// A unique prefix for entropy when generating cross-chain account IDs. pub const ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/account"; @@ -55,16 +63,16 @@ pub const ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/ pub const ROOT_ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/root"; /// Id of deployed module instance. We have a bunch of pallets that may be used in -/// different bridges. E.g. message-lane pallet may be deployed twice in the same +/// different bridges. E.g. messages pallet may be deployed twice in the same /// runtime to bridge ThisChain with Chain1 and Chain2. Sometimes we need to be able /// to identify deployed instance dynamically. This type is used for that. pub type InstanceId = [u8; 4]; /// Type of accounts on the source chain. pub enum SourceAccount { - /// An account that belongs to Root (privileged origin). + /// An account that belongs to Root (priviledged origin). Root, - /// A non-privileged account. + /// A non-priviledged account. /// /// The embedded account ID may or may not have a private key depending on the "owner" of the /// account (private key, pallet, proxy, etc.). @@ -97,7 +105,7 @@ where /// /// This account is used to collect fees for relayers that are passing messages across the bridge. /// -/// The account ID can be the same across different instances of `message-lane` if the same +/// The account ID can be the same across different instances of `pallet-bridge-messages` if the same /// `bridge_id` is used. pub fn derive_relayer_fund_account_id(bridge_id: InstanceId) -> H256 { ("relayer-fund-account", bridge_id).using_encoded(blake2_256).into() diff --git a/polkadot/bridges/modules/substrate/src/storage_proof.rs b/polkadot/bridges/primitives/runtime/src/storage_proof.rs similarity index 68% rename from polkadot/bridges/modules/substrate/src/storage_proof.rs rename to polkadot/bridges/primitives/runtime/src/storage_proof.rs index 4b908dde15..d70be93b1d 100644 --- a/polkadot/bridges/modules/substrate/src/storage_proof.rs +++ b/polkadot/bridges/primitives/runtime/src/storage_proof.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,9 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -// TODO: remove on actual use -#![allow(dead_code)] - //! Logic for checking Substrate storage proofs. use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; @@ -65,49 +62,42 @@ pub enum Error { StorageValueUnavailable, } -impl From for crate::Error { - fn from(error: Error) -> Self { - match error { - Error::StorageRootMismatch => crate::Error::StorageRootMismatch, - Error::StorageValueUnavailable => crate::Error::StorageValueUnavailable, - } - } +/// Return valid storage proof and state root. +/// +/// NOTE: This should only be used for **testing**. +#[cfg(feature = "std")] +pub fn craft_valid_storage_proof() -> (sp_core::H256, StorageProof) { + use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend}; + + // construct storage proof + let backend = >::from(vec![ + (None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]), + (None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]), + (None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]), + // Value is too big to fit in a branch node + (None, vec![(b"key11".to_vec(), Some(vec![0u8; 32]))]), + ]); + let root = backend.storage_root(std::iter::empty()).0; + let proof = StorageProof::new( + prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key22"[..]]) + .unwrap() + .iter_nodes() + .collect(), + ); + + (root, proof) } #[cfg(test)] pub mod tests { use super::*; - use sp_core::{Blake2Hasher, H256}; - use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend}; - - /// Return valid storage proof and state root. - pub fn craft_valid_storage_proof() -> (H256, StorageProof) { - // construct storage proof - let backend = >::from(vec![ - (None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]), - (None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]), - (None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]), - // Value is too big to fit in a branch node - (None, vec![(b"key11".to_vec(), Some(vec![0u8; 32]))]), - ]); - let root = backend.storage_root(std::iter::empty()).0; - let proof = StorageProof::new( - prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key22"[..]]) - .unwrap() - .iter_nodes() - .collect(), - ); - - (root, proof) - } - #[test] fn storage_proof_check() { let (root, proof) = craft_valid_storage_proof(); // check proof in runtime - let checker = >::new(root, proof.clone()).unwrap(); + let checker = >::new(root, proof.clone()).unwrap(); assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec()))); assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec()))); assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable)); @@ -115,7 +105,7 @@ pub mod tests { // checking proof against invalid commitment fails assert_eq!( - >::new(H256::random(), proof).err(), + >::new(sp_core::H256::random(), proof).err(), Some(Error::StorageRootMismatch) ); } diff --git a/polkadot/bridges/primitives/test-utils/Cargo.toml b/polkadot/bridges/primitives/test-utils/Cargo.toml index 396e0e7646..5adb2c2b55 100644 --- a/polkadot/bridges/primitives/test-utils/Cargo.toml +++ b/polkadot/bridges/primitives/test-utils/Cargo.toml @@ -6,8 +6,24 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -finality-grandpa = { version = "0.14.0" } -bp-header-chain = { path = "../header-chain" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +bp-header-chain = { path = "../header-chain", default-features = false } +ed25519-dalek = { version = "1.0", default-features = false, features = ["u64_backend"] } +finality-grandpa = { version = "0.14.0", default-features = false } +parity-scale-codec = { version = "2.0.0", default-features = false } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-header-chain/std", + "ed25519-dalek/std", + "finality-grandpa/std", + "parity-scale-codec/std", + "sp-application-crypto/std", + "sp-finality-grandpa/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/polkadot/bridges/primitives/test-utils/src/keyring.rs b/polkadot/bridges/primitives/test-utils/src/keyring.rs new file mode 100644 index 0000000000..6c5b1cae91 --- /dev/null +++ b/polkadot/bridges/primitives/test-utils/src/keyring.rs @@ -0,0 +1,96 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Utilities for working with test accounts. + +use ed25519_dalek::{Keypair, PublicKey, SecretKey, Signature}; +use finality_grandpa::voter_set::VoterSet; +use parity_scale_codec::Encode; +use sp_application_crypto::Public; +use sp_finality_grandpa::{AuthorityId, AuthorityList, AuthorityWeight}; +use sp_runtime::RuntimeDebug; +use sp_std::prelude::*; + +/// Set of test accounts with friendly names. +pub const ALICE: Account = Account(0); +pub const BOB: Account = Account(1); +pub const CHARLIE: Account = Account(2); +pub const DAVE: Account = Account(3); +pub const EVE: Account = Account(4); +pub const FERDIE: Account = Account(5); + +/// A test account which can be used to sign messages. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct Account(pub u16); + +impl Account { + pub fn public(&self) -> PublicKey { + (&self.secret()).into() + } + + pub fn secret(&self) -> SecretKey { + let data = self.0.encode(); + let mut bytes = [0_u8; 32]; + bytes[0..data.len()].copy_from_slice(&*data); + SecretKey::from_bytes(&bytes).expect("A static array of the correct length is a known good.") + } + + pub fn pair(&self) -> Keypair { + let mut pair: [u8; 64] = [0; 64]; + + let secret = self.secret(); + pair[..32].copy_from_slice(&secret.to_bytes()); + + let public = self.public(); + pair[32..].copy_from_slice(&public.to_bytes()); + + Keypair::from_bytes(&pair).expect("We expect the SecretKey to be good, so this must also be good.") + } + + pub fn sign(&self, msg: &[u8]) -> Signature { + use ed25519_dalek::Signer; + self.pair().sign(msg) + } +} + +impl From for AuthorityId { + fn from(p: Account) -> Self { + AuthorityId::from_slice(&p.public().to_bytes()) + } +} + +/// Get a valid set of voters for a Grandpa round. +pub fn voter_set() -> VoterSet { + VoterSet::new(authority_list()).unwrap() +} + +/// Convenience function to get a list of Grandpa authorities. +pub fn authority_list() -> AuthorityList { + test_keyring() + .iter() + .map(|(id, w)| (AuthorityId::from(*id), *w)) + .collect() +} + +/// Get the corresponding identities from the keyring for the "standard" authority set. +pub fn test_keyring() -> Vec<(Account, AuthorityWeight)> { + vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)] +} + +/// Get a list of "unique" accounts. +pub fn accounts(len: u16) -> Vec { + (0..len).into_iter().map(Account).collect() +} diff --git a/polkadot/bridges/primitives/test-utils/src/lib.rs b/polkadot/bridges/primitives/test-utils/src/lib.rs index 182eb2cb79..0fcc263763 100644 --- a/polkadot/bridges/primitives/test-utils/src/lib.rs +++ b/polkadot/bridges/primitives/test-utils/src/lib.rs @@ -15,51 +15,132 @@ // along with Parity Bridges Common. If not, see . //! Utilities for testing runtime code. -//! -//! Unlike other crates in the `primitives` folder, this crate does *not* need to compile in a -//! `no_std` environment. This is fine because this code should only be used, as the name implies, -//! in tests. + +#![cfg_attr(not(feature = "std"), no_std)] use bp_header_chain::justification::GrandpaJustification; -use finality_grandpa::voter_set::VoterSet; -use sp_finality_grandpa::{AuthorityId, AuthorityList, AuthorityWeight}; +use sp_application_crypto::TryFrom; +use sp_finality_grandpa::{AuthorityId, AuthorityWeight}; use sp_finality_grandpa::{AuthoritySignature, SetId}; -use sp_keyring::Ed25519Keyring; -use sp_runtime::traits::Header as HeaderT; -use sp_runtime::traits::{One, Zero}; +use sp_runtime::traits::{Header as HeaderT, One, Zero}; +use sp_std::prelude::*; + +// Re-export all our test account utilities +pub use keyring::*; + +mod keyring; pub const TEST_GRANDPA_ROUND: u64 = 1; pub const TEST_GRANDPA_SET_ID: SetId = 1; -/// Get a valid Grandpa justification for a header given a Grandpa round, authority set ID, and -/// authority list. -pub fn make_justification_for_header( - header: &H, - round: u64, - set_id: SetId, - authorities: &[(AuthorityId, AuthorityWeight)], -) -> GrandpaJustification { +/// Configuration parameters when generating test GRANDPA justifications. +#[derive(Clone)] +pub struct JustificationGeneratorParams { + /// The header which we want to finalize. + pub header: H, + /// The GRANDPA round number for the current authority set. + pub round: u64, + /// The current authority set ID. + pub set_id: SetId, + /// The current GRANDPA authority set. + /// + /// The size of the set will determine the number of pre-commits in our justification. + pub authorities: Vec<(Account, AuthorityWeight)>, + /// The total number of vote ancestries in our justification. + /// + /// These may be distributed among many different forks. + pub votes: u32, + /// The number of forks. + /// + /// Useful for creating a "worst-case" scenario in which each authority is on its own fork. + pub forks: u32, +} + +impl Default for JustificationGeneratorParams { + fn default() -> Self { + Self { + header: test_header(One::one()), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: test_keyring(), + votes: 2, + forks: 1, + } + } +} + +/// Make a valid GRANDPA justification with sensible defaults +pub fn make_default_justification(header: &H) -> GrandpaJustification { + let params = JustificationGeneratorParams:: { + header: header.clone(), + ..Default::default() + }; + + make_justification_for_header(params) +} + +/// Generate justifications in a way where we are able to tune the number of pre-commits +/// and vote ancestries which are included in the justification. +/// +/// This is useful for benchmarkings where we want to generate valid justifications with +/// a specific number of pre-commits (tuned with the number of "authorities") and/or a specific +/// number of vote ancestries (tuned with the "votes" parameter). +/// +/// Note: This needs at least three authorities or else the verifier will complain about +/// being given an invalid commit. +pub fn make_justification_for_header(params: JustificationGeneratorParams) -> GrandpaJustification { + let JustificationGeneratorParams { + header, + round, + set_id, + authorities, + mut votes, + forks, + } = params; + let (target_hash, target_number) = (header.hash(), *header.number()); let mut precommits = vec![]; let mut votes_ancestries = vec![]; - // We want to make sure that the header included in the vote ancestries - // is actually related to our target header - let mut precommit_header = test_header::(target_number + One::one()); - precommit_header.set_parent_hash(target_hash); + assert!(forks != 0, "Need at least one fork to have a chain.."); + assert!(votes >= forks, "Need at least one header per fork."); + assert!( + forks as usize <= authorities.len(), + "If we have more forks than authorities we can't create valid pre-commits for all the forks." + ); + + // Roughly, how many vote ancestries do we want per fork + let target_depth = (votes + forks - 1) / forks; + + let mut unsigned_precommits = vec![]; + for i in 0..forks { + let depth = if votes >= target_depth { + votes -= target_depth; + target_depth + } else { + votes + }; + + // Note: Adding 1 to account for the target header + let chain = generate_chain(i as u8, depth + 1, &header); + + // We don't include our finality target header in the vote ancestries + for child in &chain[1..] { + votes_ancestries.push(child.clone()); + } + + // The header we need to use when pre-commiting is the one at the highest height + // on our chain. + let precommit_candidate = chain.last().map(|h| (h.hash(), *h.number())).unwrap(); + unsigned_precommits.push(precommit_candidate); + } + + for (i, (id, _weight)) in authorities.iter().enumerate() { + // Assign authorities to sign pre-commits in a round-robin fashion + let target = unsigned_precommits[i % forks as usize]; + let precommit = signed_precommit::(&id, target, round, set_id); - // I'm using the same header for all the voters since it doesn't matter as long - // as they all vote on blocks _ahead_ of the one we're interested in finalizing - for (id, _weight) in authorities.iter() { - let signer = extract_keyring(&id); - let precommit = signed_precommit::( - signer, - (precommit_header.hash(), *precommit_header.number()), - round, - set_id, - ); precommits.push(precommit); - votes_ancestries.push(precommit_header.clone()); } GrandpaJustification { @@ -73,8 +154,31 @@ pub fn make_justification_for_header( } } +fn generate_chain(fork_id: u8, depth: u32, ancestor: &H) -> Vec { + let mut headers = vec![ancestor.clone()]; + + for i in 1..depth { + let parent = &headers[(i - 1) as usize]; + let (hash, num) = (parent.hash(), *parent.number()); + + let mut header = test_header::(num + One::one()); + header.set_parent_hash(hash); + + // Modifying the digest so headers at the same height but in different forks have different + // hashes + header + .digest_mut() + .logs + .push(sp_runtime::DigestItem::Other(vec![fork_id])); + + headers.push(header); + } + + headers +} + fn signed_precommit( - signer: Ed25519Keyring, + signer: &Account, target: (H::Hash, H::Number), round: u64, set_id: SetId, @@ -83,13 +187,24 @@ fn signed_precommit( target_hash: target.0, target_number: target.1, }; + let encoded = sp_finality_grandpa::localized_payload(round, set_id, &finality_grandpa::Message::Precommit(precommit.clone())); - let signature = signer.sign(&encoded[..]).into(); + + let signature = signer.sign(&encoded); + let raw_signature: Vec = signature.to_bytes().into(); + + // Need to wrap our signature and id types that they match what our `SignedPrecommit` is expecting + let signature = AuthoritySignature::try_from(raw_signature).expect( + "We know our Keypair is good, + so our signature must also be good.", + ); + let id = (*signer).into(); + finality_grandpa::SignedPrecommit { precommit, signature, - id: signer.public().into(), + id, } } @@ -97,16 +212,19 @@ fn signed_precommit( /// /// The correct parent hash will be used if given a non-zero header. pub fn test_header(number: H::Number) -> H { - let mut header = H::new( - number, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); + let default = |num| { + H::new( + num, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + }; + let mut header = default(number); if number != Zero::zero() { - let parent_hash = test_header::(number - One::one()).hash(); + let parent_hash = default(number - One::one()).hash(); header.set_parent_hash(parent_hash); } @@ -117,35 +235,3 @@ pub fn test_header(number: H::Number) -> H { pub fn header_id(index: u8) -> (H::Hash, H::Number) { (test_header::(index.into()).hash(), index.into()) } - -/// Get the identity of a test account given an ED25519 Public key. -pub fn extract_keyring(id: &AuthorityId) -> Ed25519Keyring { - let mut raw_public = [0; 32]; - raw_public.copy_from_slice(id.as_ref()); - Ed25519Keyring::from_raw_public(raw_public).unwrap() -} - -/// Get a valid set of voters for a Grandpa round. -pub fn voter_set() -> VoterSet { - VoterSet::new(authority_list()).unwrap() -} - -/// Convenience function to get a list of Grandpa authorities. -pub fn authority_list() -> AuthorityList { - vec![(alice(), 1), (bob(), 1), (charlie(), 1)] -} - -/// Get the Public key of the Alice test account. -pub fn alice() -> AuthorityId { - Ed25519Keyring::Alice.public().into() -} - -/// Get the Public key of the Bob test account. -pub fn bob() -> AuthorityId { - Ed25519Keyring::Bob.public().into() -} - -/// Get the Public key of the Charlie test account. -pub fn charlie() -> AuthorityId { - Ed25519Keyring::Charlie.public().into() -} diff --git a/polkadot/bridges/relays/ethereum/Cargo.toml b/polkadot/bridges/relays/bin-ethereum/Cargo.toml similarity index 69% rename from polkadot/bridges/relays/ethereum/Cargo.toml rename to polkadot/bridges/relays/bin-ethereum/Cargo.toml index 860c0815e2..efd9c0194b 100644 --- a/polkadot/bridges/relays/ethereum/Cargo.toml +++ b/polkadot/bridges/relays/bin-ethereum/Cargo.toml @@ -12,9 +12,9 @@ async-trait = "0.1.42" clap = { version = "2.33.3", features = ["yaml"] } codec = { package = "parity-scale-codec", version = "2.0.0" } env_logger = "0.8.3" -ethabi = { git = "https://github.com/paritytech/ethabi.git", branch = "td-eth-types-11" } -ethabi-contract = { git = "https://github.com/paritytech/ethabi.git", branch = "td-eth-types-11" } -ethabi-derive = { git = "https://github.com/paritytech/ethabi.git", branch = "td-eth-types-11" } +ethabi = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" } +ethabi-contract = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" } +ethabi-derive = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" } futures = "0.3.12" hex = "0.4" hex-literal = "0.3" @@ -22,19 +22,19 @@ libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac" log = "0.4.14" num-traits = "0.2" serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.62" +serde_json = "1.0.64" time = "0.2" # Bridge dependencies bp-currency-exchange = { path = "../../primitives/currency-exchange" } bp-eth-poa = { path = "../../primitives/ethereum-poa" } -exchange-relay = { path = "../exchange-relay" } -headers-relay = { path = "../headers-relay" } -messages-relay = { path = "../messages-relay" } -relay-ethereum-client = { path = "../ethereum-client" } -relay-rialto-client = { path = "../rialto-client" } -relay-substrate-client = { path = "../substrate-client" } +exchange-relay = { path = "../exchange" } +headers-relay = { path = "../headers" } +messages-relay = { path = "../messages" } +relay-ethereum-client = { path = "../client-ethereum" } +relay-rialto-client = { path = "../client-rialto" } +relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } rialto-runtime = { path = "../../bin/rialto/runtime" } diff --git a/polkadot/bridges/relays/ethereum/README.md b/polkadot/bridges/relays/bin-ethereum/README.md similarity index 100% rename from polkadot/bridges/relays/ethereum/README.md rename to polkadot/bridges/relays/bin-ethereum/README.md diff --git a/polkadot/bridges/relays/ethereum/res/substrate-bridge-abi.json b/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-abi.json similarity index 100% rename from polkadot/bridges/relays/ethereum/res/substrate-bridge-abi.json rename to polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-abi.json diff --git a/polkadot/bridges/relays/ethereum/res/substrate-bridge-bytecode.hex b/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-bytecode.hex similarity index 100% rename from polkadot/bridges/relays/ethereum/res/substrate-bridge-bytecode.hex rename to polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-bytecode.hex diff --git a/polkadot/bridges/relays/ethereum/res/substrate-bridge-metadata.txt b/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-metadata.txt similarity index 100% rename from polkadot/bridges/relays/ethereum/res/substrate-bridge-metadata.txt rename to polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-metadata.txt diff --git a/polkadot/bridges/relays/ethereum/src/cli.yml b/polkadot/bridges/relays/bin-ethereum/src/cli.yml similarity index 96% rename from polkadot/bridges/relays/ethereum/src/cli.yml rename to polkadot/bridges/relays/bin-ethereum/src/cli.yml index c6a5b08e1b..78971787c0 100644 --- a/polkadot/bridges/relays/ethereum/src/cli.yml +++ b/polkadot/bridges/relays/bin-ethereum/src/cli.yml @@ -9,17 +9,17 @@ subcommands: - eth-host: ð-host long: eth-host value_name: ETH_HOST - help: Connect to Ethereum node at given host. + help: Connect to Ethereum node websocket server at given host. takes_value: true - eth-port: ð-port long: eth-port value_name: ETH_PORT - help: Connect to Ethereum node at given port. + help: Connect to Ethereum node websocket server at given port. takes_value: true - sub-host: &sub-host long: sub-host value_name: SUB_HOST - help: Connect to Substrate node at given host. + help: Connect to Substrate node websocket server at given host. takes_value: true - sub-port: &sub-port long: sub-port diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_client.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs similarity index 99% rename from polkadot/bridges/relays/ethereum/src/ethereum_client.rs rename to polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs index 46c2c76fee..71a3f38859 100644 --- a/polkadot/bridges/relays/ethereum/src/ethereum_client.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -29,7 +29,7 @@ use relay_ethereum_client::{ }; use relay_rialto_client::HeaderId as RialtoHeaderId; use relay_utils::{HeaderId, MaybeConnectionError}; -use sp_runtime::Justification; +use sp_runtime::EncodedJustification; use std::collections::HashSet; // to encode/decode contract calls @@ -68,7 +68,7 @@ pub trait EthereumHighLevelRpc { params: EthereumSigningParams, contract_address: Address, id: RialtoHeaderId, - justification: Justification, + justification: EncodedJustification, ) -> RpcResult; /// Submit ethereum transaction. @@ -194,7 +194,7 @@ impl EthereumHighLevelRpc for EthereumClient { params: EthereumSigningParams, contract_address: Address, id: RialtoHeaderId, - justification: Justification, + justification: EncodedJustification, ) -> RpcResult { let _ = self .submit_ethereum_transaction( diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_deploy_contract.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs similarity index 94% rename from polkadot/bridges/relays/ethereum/src/ethereum_deploy_contract.rs rename to polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs index 25f8c873e5..84c12be7a7 100644 --- a/polkadot/bridges/relays/ethereum/src/ethereum_deploy_contract.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -48,9 +48,7 @@ pub struct EthereumDeployContractParams { } /// Deploy Bridge contract on Ethereum chain. -pub fn run(params: EthereumDeployContractParams) { - let mut local_pool = futures::executor::LocalPool::new(); - +pub async fn run(params: EthereumDeployContractParams) { let EthereumDeployContractParams { eth_params, eth_sign, @@ -61,8 +59,8 @@ pub fn run(params: EthereumDeployContractParams) { eth_contract_code, } = params; - let result = local_pool.run_until(async move { - let eth_client = EthereumClient::new(eth_params); + let result = async move { + let eth_client = EthereumClient::new(eth_params).await.map_err(RpcError::Ethereum)?; let sub_client = SubstrateClient::::new(sub_params).await.map_err(RpcError::Substrate)?; let (initial_header_id, initial_header) = prepare_initial_header(&sub_client, sub_initial_header).await?; @@ -91,7 +89,7 @@ pub fn run(params: EthereumDeployContractParams) { initial_set_id, initial_set, ).await - }); + }.await; if let Err(error) = result { log::error!(target: "bridge", "{}", error); diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_exchange.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs similarity index 77% rename from polkadot/bridges/relays/ethereum/src/ethereum_exchange.rs rename to polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs index 92ba211535..18470512b5 100644 --- a/polkadot/bridges/relays/ethereum/src/ethereum_exchange.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -56,7 +56,6 @@ pub enum ExchangeRelayMode { } /// PoA exchange transaction relay params. -#[derive(Debug)] pub struct EthereumExchangeParams { /// Ethereum connection params. pub eth_params: EthereumConnectionParams, @@ -67,11 +66,24 @@ pub struct EthereumExchangeParams { /// Relay working mode. pub mode: ExchangeRelayMode, /// Metrics parameters. - pub metrics_params: Option, + pub metrics_params: MetricsParams, /// Instance of the bridge pallet being synchronized. pub instance: Arc, } +impl std::fmt::Debug for EthereumExchangeParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.debug_struct("EthereumExchangeParams") + .field("eth_params", &self.eth_params) + .field("sub_params", &self.sub_params) + .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) + .field("mode", &self.mode) + .field("metrics_params", &self.metrics_params) + .field("instance", &self.instance) + .finish() + } +} + /// Ethereum to Substrate exchange pipeline. struct EthereumToSubstrateExchange; @@ -130,8 +142,7 @@ impl RelayClient for EthereumTransactionsSource { type Error = RpcError; async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect(); - Ok(()) + self.client.reconnect().await.map_err(Into::into) } } @@ -283,19 +294,39 @@ impl TargetClient for SubstrateTransactionsTarget { } /// Relay exchange transaction proof(s) to Substrate node. -pub fn run(params: EthereumExchangeParams) { +pub async fn run(params: EthereumExchangeParams) { match params.mode { - ExchangeRelayMode::Single(eth_tx_hash) => run_single_transaction_relay(params, eth_tx_hash), - ExchangeRelayMode::Auto(eth_start_with_block_number) => { - run_auto_transactions_relay_loop(params, eth_start_with_block_number) + ExchangeRelayMode::Single(eth_tx_hash) => { + let result = run_single_transaction_relay(params, eth_tx_hash).await; + match result { + Ok(_) => log::info!( + target: "bridge", + "Ethereum transaction {} proof has been successfully submitted to Substrate node", + eth_tx_hash, + ), + Err(err) => log::error!( + target: "bridge", + "Error submitting Ethereum transaction {} proof to Substrate node: {}", + eth_tx_hash, + err, + ), + } } - }; + ExchangeRelayMode::Auto(eth_start_with_block_number) => { + let result = run_auto_transactions_relay_loop(params, eth_start_with_block_number).await; + if let Err(err) = result { + log::error!( + target: "bridge", + "Error auto-relaying Ethereum transactions proofs to Substrate node: {}", + err, + ); + } + } + } } /// Run single transaction proof relay and stop. -fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_hash: H256) { - let mut local_pool = futures::executor::LocalPool::new(); - +async fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_hash: H256) -> Result<(), String> { let EthereumExchangeParams { eth_params, sub_params, @@ -304,43 +335,25 @@ fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_hash: H25 .. } = params; - let result = local_pool.run_until(async move { - let eth_client = EthereumClient::new(eth_params); - let sub_client = SubstrateClient::::new(sub_params) - .await - .map_err(RpcError::Substrate)?; + let eth_client = EthereumClient::new(eth_params).await.map_err(RpcError::Ethereum)?; + let sub_client = SubstrateClient::::new(sub_params) + .await + .map_err(RpcError::Substrate)?; - let source = EthereumTransactionsSource { client: eth_client }; - let target = SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }; + let source = EthereumTransactionsSource { client: eth_client }; + let target = SubstrateTransactionsTarget { + client: sub_client, + sign_params: sub_sign, + bridge_instance: instance, + }; - relay_single_transaction_proof(&source, &target, eth_tx_hash).await - }); - - match result { - Ok(_) => { - log::info!( - target: "bridge", - "Ethereum transaction {} proof has been successfully submitted to Substrate node", - eth_tx_hash, - ); - } - Err(err) => { - log::error!( - target: "bridge", - "Error submitting Ethereum transaction {} proof to Substrate node: {}", - eth_tx_hash, - err, - ); - } - } + relay_single_transaction_proof(&source, &target, eth_tx_hash).await } -/// Run auto-relay loop. -fn run_auto_transactions_relay_loop(params: EthereumExchangeParams, eth_start_with_block_number: Option) { +async fn run_auto_transactions_relay_loop( + params: EthereumExchangeParams, + eth_start_with_block_number: Option, +) -> Result<(), String> { let EthereumExchangeParams { eth_params, sub_params, @@ -350,45 +363,41 @@ fn run_auto_transactions_relay_loop(params: EthereumExchangeParams, eth_start_wi .. } = params; - let do_run_loop = move || -> Result<(), String> { - let eth_client = EthereumClient::new(eth_params); - let sub_client = async_std::task::block_on(SubstrateClient::::new(sub_params)) - .map_err(|err| format!("Error starting Substrate client: {:?}", err))?; + let eth_client = EthereumClient::new(eth_params) + .await + .map_err(|err| format!("Error starting Ethereum client: {:?}", err))?; + let sub_client = SubstrateClient::::new(sub_params) + .await + .map_err(|err| format!("Error starting Substrate client: {:?}", err))?; - let eth_start_with_block_number = match eth_start_with_block_number { - Some(eth_start_with_block_number) => eth_start_with_block_number, - None => { - async_std::task::block_on(sub_client.best_ethereum_finalized_block()) - .map_err(|err| { - format!( - "Error retrieving best finalized Ethereum block from Substrate node: {:?}", - err - ) - })? - .0 - } - }; - - run_loop( - InMemoryStorage::new(eth_start_with_block_number), - EthereumTransactionsSource { client: eth_client }, - SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }, - metrics_params, - futures::future::pending(), - ); - - Ok(()) + let eth_start_with_block_number = match eth_start_with_block_number { + Some(eth_start_with_block_number) => eth_start_with_block_number, + None => { + sub_client + .best_ethereum_finalized_block() + .await + .map_err(|err| { + format!( + "Error retrieving best finalized Ethereum block from Substrate node: {:?}", + err + ) + })? + .0 + } }; - if let Err(err) = do_run_loop() { - log::error!( - target: "bridge", - "Error auto-relaying Ethereum transactions proofs to Substrate node: {}", - err, - ); - } + run_loop( + InMemoryStorage::new(eth_start_with_block_number), + EthereumTransactionsSource { client: eth_client }, + SubstrateTransactionsTarget { + client: sub_client, + sign_params: sub_sign, + bridge_instance: instance, + }, + metrics_params, + futures::future::pending(), + ) + .await?; + + Ok(()) } diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_exchange_submit.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs similarity index 91% rename from polkadot/bridges/relays/ethereum/src/ethereum_exchange_submit.rs rename to polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs index d2842b78a4..09871a0fc7 100644 --- a/polkadot/bridges/relays/ethereum/src/ethereum_exchange_submit.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -42,9 +42,7 @@ pub struct EthereumExchangeSubmitParams { } /// Submit single Ethereum -> Substrate exchange transaction. -pub fn run(params: EthereumExchangeSubmitParams) { - let mut local_pool = futures::executor::LocalPool::new(); - +pub async fn run(params: EthereumExchangeSubmitParams) { let EthereumExchangeSubmitParams { eth_params, eth_sign, @@ -53,8 +51,10 @@ pub fn run(params: EthereumExchangeSubmitParams) { sub_recipient, } = params; - let result: Result<_, String> = local_pool.run_until(async move { - let eth_client = EthereumClient::new(eth_params); + let result: Result<_, String> = async move { + let eth_client = EthereumClient::new(eth_params) + .await + .map_err(|err| format!("error connecting to Ethereum node: {:?}", err))?; let eth_signer_address = secret_to_address(ð_sign.signer); let sub_recipient_encoded = sub_recipient; @@ -92,7 +92,8 @@ pub fn run(params: EthereumExchangeSubmitParams) { .map_err(|err| format!("error submitting transaction: {:?}", err))?; Ok(eth_tx_unsigned) - }); + } + .await; match result { Ok(eth_tx_unsigned) => { diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_sync_loop.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs similarity index 91% rename from polkadot/bridges/relays/ethereum/src/ethereum_sync_loop.rs rename to polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs index c8741c2fe1..3dcd27e18f 100644 --- a/polkadot/bridges/relays/ethereum/src/ethereum_sync_loop.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -62,7 +62,6 @@ pub mod consts { } /// Ethereum synchronization parameters. -#[derive(Debug)] pub struct EthereumSyncParams { /// Ethereum connection params. pub eth_params: EthereumConnectionParams, @@ -73,11 +72,24 @@ pub struct EthereumSyncParams { /// Synchronization parameters. pub sync_params: HeadersSyncParams, /// Metrics parameters. - pub metrics_params: Option, + pub metrics_params: MetricsParams, /// Instance of the bridge pallet being synchronized. pub instance: Arc, } +impl Debug for EthereumSyncParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.debug_struct("EthereumSyncParams") + .field("eth_params", &self.eth_params) + .field("sub_params", &self.sub_params) + .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) + .field("sync_params", &self.sync_params) + .field("metrics_params", &self.metrics_params) + .field("instance", &self.instance) + .finish() + } +} + /// Ethereum synchronization pipeline. #[derive(Clone, Copy, Debug)] #[cfg_attr(test, derive(PartialEq))] @@ -122,8 +134,7 @@ impl RelayClient for EthereumHeadersSource { type Error = RpcError; async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect(); - Ok(()) + self.client.reconnect().await.map_err(Into::into) } } @@ -249,7 +260,7 @@ impl TargetClient for SubstrateHeadersTarget { } /// Run Ethereum headers synchronization. -pub fn run(params: EthereumSyncParams) -> Result<(), RpcError> { +pub async fn run(params: EthereumSyncParams) -> Result<(), RpcError> { let EthereumSyncParams { eth_params, sub_params, @@ -259,8 +270,8 @@ pub fn run(params: EthereumSyncParams) -> Result<(), RpcError> { instance, } = params; - let eth_client = EthereumClient::new(eth_params); - let sub_client = async_std::task::block_on(async { SubstrateClient::::new(sub_params).await })?; + let eth_client = EthereumClient::new(eth_params).await?; + let sub_client = SubstrateClient::::new(sub_params).await?; let sign_sub_transactions = match sync_params.target_tx_mode { TargetTransactionMode::Signed | TargetTransactionMode::Backup => true, @@ -279,7 +290,9 @@ pub fn run(params: EthereumSyncParams) -> Result<(), RpcError> { sync_params, metrics_params, futures::future::pending(), - ); + ) + .await + .map_err(RpcError::SyncLoop)?; Ok(()) } diff --git a/polkadot/bridges/relays/ethereum/src/instances.rs b/polkadot/bridges/relays/bin-ethereum/src/instances.rs similarity index 98% rename from polkadot/bridges/relays/ethereum/src/instances.rs rename to polkadot/bridges/relays/bin-ethereum/src/instances.rs index 7f29c26d8c..0d2a508f45 100644 --- a/polkadot/bridges/relays/ethereum/src/instances.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/instances.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright 2020-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/ethereum/src/main.rs b/polkadot/bridges/relays/bin-ethereum/src/main.rs similarity index 88% rename from polkadot/bridges/relays/ethereum/src/main.rs rename to polkadot/bridges/relays/bin-ethereum/src/main.rs index b75c0f44bb..234e1237fc 100644 --- a/polkadot/bridges/relays/ethereum/src/main.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/main.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -34,7 +34,10 @@ use ethereum_sync_loop::EthereumSyncParams; use headers_relay::sync::TargetTransactionMode; use hex_literal::hex; use instances::{BridgeInstance, Kovan, RialtoPoA}; -use relay_utils::{initialize::initialize_relay, metrics::MetricsParams}; +use relay_utils::{ + initialize::initialize_relay, + metrics::{MetricsAddress, MetricsParams}, +}; use secp256k1::SecretKey; use sp_core::crypto::Pair; use substrate_sync_loop::SubstrateSyncParams; @@ -50,6 +53,10 @@ fn main() { let yaml = clap::load_yaml!("cli.yml"); let matches = clap::App::from_yaml(yaml).get_matches(); + async_std::task::block_on(run_command(&matches)); +} + +async fn run_command(matches: &clap::ArgMatches<'_>) { match matches.subcommand() { ("eth-to-sub", Some(eth_to_sub_matches)) => { log::info!(target: "bridge", "Starting ETH âž¡ SUB relay."); @@ -60,6 +67,7 @@ fn main() { return; } }) + .await .is_err() { log::error!(target: "bridge", "Unable to get Substrate genesis block for Ethereum sync."); @@ -74,6 +82,7 @@ fn main() { return; } }) + .await .is_err() { log::error!(target: "bridge", "Unable to get Substrate genesis block for Substrate sync."); @@ -87,7 +96,8 @@ fn main() { log::error!(target: "bridge", "Error during contract deployment: {}", err); return; } - }); + }) + .await; } ("eth-submit-exchange-tx", Some(eth_exchange_submit_matches)) => { log::info!(target: "bridge", "Submitting ETH âž¡ SUB exchange transaction."); @@ -97,7 +107,8 @@ fn main() { log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err); return; } - }); + }) + .await; } ("eth-exchange-sub", Some(eth_exchange_matches)) => { log::info!(target: "bridge", "Starting ETH âž¡ SUB exchange transactions relay."); @@ -107,7 +118,8 @@ fn main() { log::error!(target: "bridge", "Error relaying Ethereum transactions proofs: {}", err); return; } - }); + }) + .await; } ("", _) => { log::error!(target: "bridge", "No subcommand specified"); @@ -158,10 +170,11 @@ fn substrate_connection_params(matches: &clap::ArgMatches) -> Result Result { - let mut params = RialtoSigningParams::default(); + let mut params = sp_keyring::AccountKeyring::Alice.pair(); + if let Some(sub_signer) = matches.value_of("sub-signer") { let sub_signer_password = matches.value_of("sub-signer-password"); - params.signer = sp_core::sr25519::Pair::from_string(sub_signer, sub_signer_password) + params = sp_core::sr25519::Pair::from_string(sub_signer, sub_signer_password) .map_err(|e| format!("Failed to parse sub-signer: {:?}", e))?; } Ok(params) @@ -243,14 +256,13 @@ fn ethereum_deploy_contract_params(matches: &clap::ArgMatches) -> Result Some( - sub_initial_authorities_set_id - .parse() - .map_err(|e| format!("Failed to parse sub-authorities-set-id: {}", e))?, - ), - None => None, - }; + let sub_initial_authorities_set_id = matches + .value_of("sub-authorities-set-id") + .map(|set| { + set.parse() + .map_err(|e| format!("Failed to parse sub-authorities-set-id: {}", e)) + }) + .transpose()?; let sub_initial_authorities_set = parse_hex_argument(matches, "sub-authorities-set")?; let sub_initial_header = parse_hex_argument(matches, "sub-initial-header")?; @@ -270,23 +282,26 @@ fn ethereum_deploy_contract_params(matches: &clap::ArgMatches) -> Result Result { - let eth_nonce = if let Some(eth_nonce) = matches.value_of("eth-nonce") { - Some( + let eth_nonce = matches + .value_of("eth-nonce") + .map(|eth_nonce| { relay_ethereum_client::types::U256::from_dec_str(ð_nonce) - .map_err(|e| format!("Failed to parse eth-nonce: {}", e))?, - ) - } else { - None - }; + .map_err(|e| format!("Failed to parse eth-nonce: {}", e)) + }) + .transpose()?; - let eth_amount = if let Some(eth_amount) = matches.value_of("eth-amount") { - eth_amount - .parse() - .map_err(|e| format!("Failed to parse eth-amount: {}", e))? - } else { - // This is in Wei, represents 1 ETH - 1_000_000_000_000_000_000_u64.into() - }; + let eth_amount = matches + .value_of("eth-amount") + .map(|eth_amount| { + eth_amount + .parse() + .map_err(|e| format!("Failed to parse eth-amount: {}", e)) + }) + .transpose()? + .unwrap_or_else(|| { + // This is in Wei, represents 1 ETH + 1_000_000_000_000_000_000_u64.into() + }); // This is the well-known Substrate account of Ferdie let default_recepient = hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c"); @@ -329,14 +344,16 @@ fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result ethereum_exchange::ExchangeRelayMode::Auto(match matches.value_of("eth-start-with-block") { - Some(eth_start_with_block) => Some( - eth_start_with_block - .parse() - .map_err(|e| format!("Failed to parse eth-start-with-block: {}", e))?, - ), - None => None, - }), + None => ethereum_exchange::ExchangeRelayMode::Auto( + matches + .value_of("eth-start-with-block") + .map(|eth_start_with_block| { + eth_start_with_block + .parse() + .map_err(|e| format!("Failed to parse eth-start-with-block: {}", e)) + }) + .transpose()?, + ), }; let params = EthereumExchangeParams { @@ -353,12 +370,12 @@ fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result Result, String> { +fn metrics_params(matches: &clap::ArgMatches) -> Result { if matches.is_present("no-prometheus") { - return Ok(None); + return Ok(None.into()); } - let mut metrics_params = MetricsParams::default(); + let mut metrics_params = MetricsAddress::default(); if let Some(prometheus_host) = matches.value_of("prometheus-host") { metrics_params.host = prometheus_host.into(); @@ -369,7 +386,7 @@ fn metrics_params(matches: &clap::ArgMatches) -> Result, S .map_err(|e| format!("Failed to parse prometheus-port: {}", e))?; } - Ok(Some(metrics_params)) + Ok(Some(metrics_params).into()) } fn instance_params(matches: &clap::ArgMatches) -> Result, String> { @@ -394,16 +411,3 @@ fn parse_hex_argument(matches: &clap::ArgMatches, arg: &str) -> Result Ok(None), } } - -#[cfg(test)] -mod tests { - - // Details: https://github.com/paritytech/parity-bridges-common/issues/118 - #[test] - fn async_std_sleep_works() { - let mut local_pool = futures::executor::LocalPool::new(); - local_pool.run_until(async move { - async_std::task::sleep(std::time::Duration::from_secs(1)).await; - }); - } -} diff --git a/polkadot/bridges/relays/ethereum/src/rialto_client.rs b/polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs similarity index 92% rename from polkadot/bridges/relays/ethereum/src/rialto_client.rs rename to polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs index 861ef8efeb..d9c0f265cb 100644 --- a/polkadot/bridges/relays/ethereum/src/rialto_client.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -156,13 +156,18 @@ impl SubmitEthereumHeaders for SubstrateClient { ) -> SubmittedHeaders { let ids = headers.iter().map(|header| header.id()).collect(); let submission_result = async { - let account_id = params.signer.public().as_array_ref().clone().into(); - let nonce = self.next_account_index(account_id).await?; - - let call = instance.build_signed_header_call(headers); - let transaction = Rialto::sign_transaction(self, ¶ms.signer, nonce, call); - - let _ = self.submit_extrinsic(Bytes(transaction.encode())).await?; + self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), |transaction_nonce| { + Bytes( + Rialto::sign_transaction( + *self.genesis_hash(), + ¶ms, + transaction_nonce, + instance.build_signed_header_call(headers), + ) + .encode(), + ) + }) + .await?; Ok(()) } .await; @@ -197,7 +202,7 @@ impl SubmitEthereumHeaders for SubstrateClient { let call = instance.build_unsigned_header_call(header); let transaction = create_unsigned_submit_transaction(call); - match self.submit_extrinsic(Bytes(transaction.encode())).await { + match self.submit_unsigned_extrinsic(Bytes(transaction.encode())).await { Ok(_) => submitted_headers.submitted.push(id), Err(error) => { submitted_headers.rejected.push(id); @@ -252,13 +257,18 @@ impl SubmitEthereumExchangeTransactionProof for SubstrateClient { instance: Arc, proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, ) -> RpcResult<()> { - let account_id = params.signer.public().as_array_ref().clone().into(); - let nonce = self.next_account_index(account_id).await?; - - let call = instance.build_currency_exchange_call(proof); - let transaction = Rialto::sign_transaction(self, ¶ms.signer, nonce, call); - - let _ = self.submit_extrinsic(Bytes(transaction.encode())).await?; + self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), |transaction_nonce| { + Bytes( + Rialto::sign_transaction( + *self.genesis_hash(), + ¶ms, + transaction_nonce, + instance.build_currency_exchange_call(proof), + ) + .encode(), + ) + }) + .await?; Ok(()) } } diff --git a/polkadot/bridges/relays/ethereum/src/rpc_errors.rs b/polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs similarity index 94% rename from polkadot/bridges/relays/ethereum/src/rpc_errors.rs rename to polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs index 9f7f14cf9a..27b233135f 100644 --- a/polkadot/bridges/relays/ethereum/src/rpc_errors.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -29,6 +29,8 @@ pub enum RpcError { Ethereum(EthereumNodeError), /// An error occured when interacting with a Substrate node. Substrate(SubstrateNodeError), + /// Error running relay loop. + SyncLoop(String), } impl From for String { @@ -37,6 +39,7 @@ impl From for String { RpcError::Serialization(e) => e.to_string(), RpcError::Ethereum(e) => e.to_string(), RpcError::Substrate(e) => e.to_string(), + RpcError::SyncLoop(e) => e, } } } diff --git a/polkadot/bridges/relays/ethereum/src/substrate_sync_loop.rs b/polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs similarity index 90% rename from polkadot/bridges/relays/ethereum/src/substrate_sync_loop.rs rename to polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs index a1cea3f424..4e7e433d82 100644 --- a/polkadot/bridges/relays/ethereum/src/substrate_sync_loop.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -36,13 +36,13 @@ use relay_substrate_client::{ ConnectionParams as SubstrateConnectionParams, }; use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; -use sp_runtime::Justification; +use sp_runtime::EncodedJustification; use std::fmt::Debug; use std::{collections::HashSet, time::Duration}; pub mod consts { - use super::Duration; + use super::*; /// Interval at which we check new Ethereum blocks. pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(5); @@ -68,7 +68,7 @@ pub struct SubstrateSyncParams { /// Synchronization parameters. pub sync_params: HeadersSyncParams, /// Metrics parameters. - pub metrics_params: Option, + pub metrics_params: MetricsParams, } /// Substrate synchronization pipeline. @@ -84,7 +84,7 @@ impl HeadersSyncPipeline for SubstrateHeadersSyncPipeline { type Number = rialto_runtime::BlockNumber; type Header = RialtoSyncHeader; type Extra = (); - type Completion = Justification; + type Completion = EncodedJustification; fn estimate_size(source: &QueuedHeader) -> usize { source.header().encode().len() @@ -123,8 +123,7 @@ impl RelayClient for EthereumHeadersTarget { type Error = RpcError; async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect(); - Ok(()) + self.client.reconnect().await.map_err(Into::into) } } @@ -152,7 +151,11 @@ impl TargetClient for EthereumHeadersTarget { self.client.incomplete_substrate_headers(self.contract).await } - async fn complete_header(&self, id: RialtoHeaderId, completion: Justification) -> Result { + async fn complete_header( + &self, + id: RialtoHeaderId, + completion: EncodedJustification, + ) -> Result { self.client .complete_substrate_header(self.sign_params.clone(), self.contract, id, completion) .await @@ -164,7 +167,7 @@ impl TargetClient for EthereumHeadersTarget { } /// Run Substrate headers synchronization. -pub fn run(params: SubstrateSyncParams) -> Result<(), RpcError> { +pub async fn run(params: SubstrateSyncParams) -> Result<(), RpcError> { let SubstrateSyncParams { sub_params, eth_params, @@ -174,8 +177,8 @@ pub fn run(params: SubstrateSyncParams) -> Result<(), RpcError> { metrics_params, } = params; - let eth_client = EthereumClient::new(eth_params); - let sub_client = async_std::task::block_on(async { SubstrateClient::::new(sub_params).await })?; + let eth_client = EthereumClient::new(eth_params).await?; + let sub_client = SubstrateClient::::new(sub_params).await?; let target = EthereumHeadersTarget::new(eth_client, eth_contract_address, eth_sign); let source = SubstrateHeadersSource::new(sub_client); @@ -189,7 +192,9 @@ pub fn run(params: SubstrateSyncParams) -> Result<(), RpcError> { sync_params, metrics_params, futures::future::pending(), - ); + ) + .await + .map_err(RpcError::SyncLoop)?; Ok(()) } diff --git a/polkadot/bridges/relays/ethereum/src/substrate_types.rs b/polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs similarity index 98% rename from polkadot/bridges/relays/ethereum/src/substrate_types.rs rename to polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs index b88f383139..af68d7e028 100644 --- a/polkadot/bridges/relays/ethereum/src/substrate_types.rs +++ b/polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright 2020-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/bin-substrate/Cargo.toml b/polkadot/bridges/relays/bin-substrate/Cargo.toml new file mode 100644 index 0000000000..d203201e60 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "substrate-relay" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +anyhow = "1.0" +async-std = "1.9.0" +async-trait = "0.1.42" +codec = { package = "parity-scale-codec", version = "2.0.0" } +futures = "0.3.12" +hex = "0.4" +log = "0.4.14" +num-format = "0.4" +num-traits = "0.2" +paste = "1.0" +structopt = "0.3" + +# Bridge dependencies + +bp-header-chain = { path = "../../primitives/header-chain" } +bp-kusama = { path = "../../primitives/chain-kusama" } +bp-messages = { path = "../../primitives/messages" } +bp-millau = { path = "../../primitives/chain-millau" } +bp-polkadot = { path = "../../primitives/chain-polkadot" } +bp-rialto = { path = "../../primitives/chain-rialto" } +bp-rococo = { path = "../../primitives/chain-rococo" } +bp-runtime = { path = "../../primitives/runtime" } +bp-westend = { path = "../../primitives/chain-westend" } +bridge-runtime-common = { path = "../../bin/runtime-common" } +finality-grandpa = { version = "0.14.0" } +finality-relay = { path = "../finality" } +headers-relay = { path = "../headers" } +messages-relay = { path = "../messages" } +millau-runtime = { path = "../../bin/millau/runtime" } +pallet-bridge-dispatch = { path = "../../modules/dispatch" } +pallet-bridge-messages = { path = "../../modules/messages" } +relay-kusama-client = { path = "../client-kusama" } +relay-millau-client = { path = "../client-millau" } +relay-polkadot-client = { path = "../client-polkadot" } +relay-rialto-client = { path = "../client-rialto" } +relay-rococo-client = { path = "../client-rococo" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } +relay-westend-client = { path = "../client-westend" } +rialto-runtime = { path = "../../bin/rialto/runtime" } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[dev-dependencies] +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +hex-literal = "0.3" diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs new file mode 100644 index 0000000000..ac5e611fdb --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs @@ -0,0 +1,101 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Millau chain specification for CLI. + +use crate::cli::{ + bridge, + encode_call::{self, Call, CliEncodeCall}, + encode_message, send_message, CliChain, +}; +use codec::Decode; +use frame_support::weights::{GetDispatchInfo, Weight}; +use pallet_bridge_dispatch::{CallOrigin, MessagePayload}; +use relay_millau_client::Millau; +use sp_version::RuntimeVersion; + +impl CliEncodeCall for Millau { + fn max_extrinsic_size() -> u32 { + bp_millau::max_extrinsic_size() + } + + fn encode_call(call: &Call) -> anyhow::Result { + Ok(match call { + Call::Raw { data } => Decode::decode(&mut &*data.0)?, + Call::Remark { remark_payload, .. } => millau_runtime::Call::System(millau_runtime::SystemCall::remark( + remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + )), + Call::Transfer { recipient, amount } => millau_runtime::Call::Balances( + millau_runtime::BalancesCall::transfer(recipient.raw_id(), amount.cast()), + ), + Call::BridgeSendMessage { + lane, + payload, + fee, + bridge_instance_index, + } => match *bridge_instance_index { + bridge::MILLAU_TO_RIALTO_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + millau_runtime::Call::BridgeRialtoMessages(millau_runtime::MessagesCall::send_message( + lane.0, + payload, + fee.cast(), + )) + } + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, + }) + } +} + +impl CliChain for Millau { + const RUNTIME_VERSION: RuntimeVersion = millau_runtime::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = MessagePayload>; + + fn ss58_format() -> u16 { + millau_runtime::SS58Prefix::get() as u16 + } + + fn max_extrinsic_weight() -> Weight { + bp_millau::max_extrinsic_weight() + } + + // TODO [#854|#843] support multiple bridges? + fn encode_message(message: encode_message::MessagePayload) -> Result { + match message { + encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) + .map_err(|e| format!("Failed to decode Millau's MessagePayload: {:?}", e)), + encode_message::MessagePayload::Call { mut call, mut sender } => { + type Source = Millau; + type Target = relay_rialto_client::Rialto; + + sender.enforce_chain::(); + let spec_version = Target::RUNTIME_VERSION.spec_version; + let origin = CallOrigin::SourceAccount(sender.raw_id()); + encode_call::preprocess_call::(&mut call, bridge::MILLAU_TO_RIALTO_INDEX); + let call = Target::encode_call(&call).map_err(|e| e.to_string())?; + let weight = call.get_dispatch_info().weight; + + Ok(send_message::message_payload(spec_version, weight, origin, &call)) + } + } + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs new file mode 100644 index 0000000000..58f0620b07 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs @@ -0,0 +1,53 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Millau-to-Rialto headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_millau_client::{Millau, SyncHeader as MillauSyncHeader}; +use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use sp_core::{Bytes, Pair}; + +/// Millau-to-Rialto finality sync pipeline. +pub(crate) type MillauFinalityToRialto = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for MillauFinalityToRialto { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; + + type TargetChain = Rialto; + + fn transactions_author(&self) -> bp_rialto::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: MillauSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = rialto_runtime::BridgeGrandpaMillauCall::submit_finality_proof(header.into_inner(), proof).into(); + + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Rialto::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/bridges/relays/substrate/src/millau_messages_to_rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs similarity index 58% rename from polkadot/bridges/relays/substrate/src/millau_messages_to_rialto.rs rename to polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs index ebab5cfb38..d96fa7b797 100644 --- a/polkadot/bridges/relays/substrate/src/millau_messages_to_rialto.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,13 +16,13 @@ //! Millau-to-Rialto messages sync entrypoint. -use crate::messages_lane::{select_delivery_transaction_limits, SubstrateMessageLane, SubstrateMessageLaneToSubstrate}; +use crate::messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, +}; use crate::messages_source::SubstrateMessagesSource; use crate::messages_target::SubstrateMessagesTarget; -use crate::{MillauClient, RialtoClient}; -use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce}; +use bp_messages::MessageNonce; use bp_runtime::{MILLAU_BRIDGE_INSTANCE, RIALTO_BRIDGE_INSTANCE}; use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::Encode; @@ -30,15 +30,17 @@ use frame_support::dispatch::GetDispatchInfo; use messages_relay::message_lane::MessageLane; use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams}; use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Chain, Error as SubstrateError, TransactionSignScheme}; -use relay_utils::metrics::MetricsParams; -use sp_core::Pair; +use relay_substrate_client::{ + metrics::{FloatStorageValueMetric, StorageProofOverheadMetric}, + Chain, TransactionSignScheme, +}; +use sp_core::{Bytes, Pair}; use std::{ops::RangeInclusive, time::Duration}; /// Millau-to-Rialto message lane. -type MillauMessagesToRialto = SubstrateMessageLaneToSubstrate; +pub type MillauMessagesToRialto = + SubstrateMessageLaneToSubstrate; -#[async_trait] impl SubstrateMessageLane for MillauMessagesToRialto { const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str = bp_rialto::TO_RIALTO_MESSAGES_DISPATCH_WEIGHT_METHOD; @@ -51,24 +53,28 @@ impl SubstrateMessageLane for MillauMessagesToRialto { bp_millau::FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD; const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_millau::FROM_MILLAU_UNREWARDED_RELAYERS_STATE; - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::FINALIZED_MILLAU_BLOCK_METHOD; - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rialto::FINALIZED_RIALTO_BLOCK_METHOD; + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; - type SourceSignedTransaction = ::SignedTransaction; - type TargetSignedTransaction = ::SignedTransaction; + type SourceChain = Millau; + type TargetChain = Rialto; - async fn make_messages_receiving_proof_transaction( + fn source_transactions_author(&self) -> bp_rialto::AccountId { + (*self.source_sign.public().as_array_ref()).into() + } + + fn make_messages_receiving_proof_transaction( &self, + transaction_nonce: ::Index, _generated_at_block: RialtoHeaderId, proof: ::MessagesReceivingProof, - ) -> Result { + ) -> Bytes { let (relayers_state, proof) = proof; - let account_id = self.source_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.source_client.next_account_index(account_id).await?; let call: millau_runtime::Call = - millau_runtime::MessageLaneCall::receive_messages_delivery_proof(proof, relayers_state).into(); + millau_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into(); let call_weight = call.get_dispatch_info().weight; - let transaction = Millau::sign_transaction(&self.source_client, &self.source_sign.signer, nonce, call); + let genesis_hash = *self.source_client.genesis_hash(); + let transaction = Millau::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); log::trace!( target: "bridge", "Prepared Rialto -> Millau confirmation transaction. Weight: {}/{}, size: {}/{}", @@ -77,15 +83,20 @@ impl SubstrateMessageLane for MillauMessagesToRialto { transaction.encode().len(), bp_millau::max_extrinsic_size(), ); - Ok(transaction) + Bytes(transaction.encode()) } - async fn make_messages_delivery_transaction( + fn target_transactions_author(&self) -> bp_rialto::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_messages_delivery_transaction( &self, + transaction_nonce: ::Index, _generated_at_header: MillauHeaderId, _nonces: RangeInclusive, proof: ::MessagesProof, - ) -> Result { + ) -> Bytes { let (dispatch_weight, proof) = proof; let FromBridgedChainMessagesProof { ref nonces_start, @@ -93,9 +104,7 @@ impl SubstrateMessageLane for MillauMessagesToRialto { .. } = proof; let messages_count = nonces_end - nonces_start + 1; - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call: rialto_runtime::Call = rialto_runtime::MessageLaneCall::receive_messages_proof( + let call: rialto_runtime::Call = rialto_runtime::MessagesCall::receive_messages_proof( self.relayer_id_at_source.clone(), proof, messages_count as _, @@ -103,7 +112,8 @@ impl SubstrateMessageLane for MillauMessagesToRialto { ) .into(); let call_weight = call.get_dispatch_info().weight; - let transaction = Rialto::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Rialto::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); log::trace!( target: "bridge", "Prepared Millau -> Rialto delivery transaction. Weight: {}/{}, size: {}/{}", @@ -112,33 +122,40 @@ impl SubstrateMessageLane for MillauMessagesToRialto { transaction.encode().len(), bp_rialto::max_extrinsic_size(), ); - Ok(transaction) + Bytes(transaction.encode()) } } /// Millau node as messages source. -type MillauSourceClient = SubstrateMessagesSource; +type MillauSourceClient = SubstrateMessagesSource< + Millau, + MillauMessagesToRialto, + millau_runtime::Runtime, + millau_runtime::WithRialtoMessagesInstance, +>; /// Rialto node as messages target. -type RialtoTargetClient = SubstrateMessagesTarget; +type RialtoTargetClient = SubstrateMessagesTarget< + Rialto, + MillauMessagesToRialto, + rialto_runtime::Runtime, + rialto_runtime::WithMillauMessagesInstance, +>; /// Run Millau-to-Rialto messages sync. -pub fn run( - millau_client: MillauClient, - millau_sign: MillauSigningParams, - rialto_client: RialtoClient, - rialto_sign: RialtoSigningParams, - lane_id: LaneId, - metrics_params: Option, -) { +pub async fn run( + params: MessagesRelayParams, +) -> Result<(), String> { let stall_timeout = Duration::from_secs(5 * 60); - let relayer_id_at_millau = millau_sign.signer.public().as_array_ref().clone().into(); + let relayer_id_at_millau = (*params.source_sign.public().as_array_ref()).into(); + let lane_id = params.lane_id; + let source_client = params.source_client; let lane = MillauMessagesToRialto { - source_client: millau_client.clone(), - source_sign: millau_sign, - target_client: rialto_client.clone(), - target_sign: rialto_sign, + source_client: source_client.clone(), + source_sign: params.source_sign, + target_client: params.target_client.clone(), + target_sign: params.target_sign, relayer_id_at_source: relayer_id_at_millau, }; @@ -146,7 +163,7 @@ pub fn run( let max_messages_size_in_single_batch = bp_rialto::max_extrinsic_size() as usize / 3; // TODO: use Millau weights after https://github.com/paritytech/parity-bridges-common/issues/390 let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - select_delivery_transaction_limits::>( + select_delivery_transaction_limits::>( bp_rialto::max_extrinsic_weight(), bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, ); @@ -179,9 +196,50 @@ pub fn run( max_messages_size_in_single_batch, }, }, - MillauSourceClient::new(millau_client, lane.clone(), lane_id, RIALTO_BRIDGE_INSTANCE), - RialtoTargetClient::new(rialto_client, lane, lane_id, MILLAU_BRIDGE_INSTANCE), - metrics_params, + MillauSourceClient::new( + source_client.clone(), + lane.clone(), + lane_id, + RIALTO_BRIDGE_INSTANCE, + params.target_to_source_headers_relay, + ), + RialtoTargetClient::new( + params.target_client, + lane, + lane_id, + MILLAU_BRIDGE_INSTANCE, + params.source_to_target_headers_relay, + ), + relay_utils::relay_metrics( + Some(messages_relay::message_lane_loop::metrics_prefix::< + MillauMessagesToRialto, + >(&lane_id)), + params.metrics_params, + ) + .standalone_metric(|registry, prefix| { + StorageProofOverheadMetric::new( + registry, + prefix, + source_client.clone(), + "millau_storage_proof_overhead".into(), + "Millau storage proof overhead".into(), + ) + })? + .standalone_metric(|registry, prefix| { + FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( + registry, + prefix, + source_client, + sp_core::storage::StorageKey( + millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec(), + ), + Some(millau_runtime::rialto_messages::INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE), + "millau_rialto_to_millau_conversion_rate".into(), + "Rialto to Millau tokens conversion rate (used by Rialto)".into(), + ) + })? + .into_params(), futures::future::pending(), - ); + ) + .await } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs b/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs new file mode 100644 index 0000000000..9e6a7361d5 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs @@ -0,0 +1,335 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Chain-specific relayer configuration. + +pub mod millau_headers_to_rialto; +pub mod millau_messages_to_rialto; +pub mod rialto_headers_to_millau; +pub mod rialto_messages_to_millau; +pub mod rococo_headers_to_westend; +pub mod westend_headers_to_millau; +pub mod westend_headers_to_rococo; + +mod millau; +mod rialto; +mod rococo; +mod westend; + +use relay_utils::metrics::{FloatJsonValueMetric, MetricsParams}; + +pub(crate) fn add_polkadot_kusama_price_metrics( + params: MetricsParams, +) -> anyhow::Result { + Ok( + relay_utils::relay_metrics(Some(finality_relay::metrics_prefix::()), params) + // Polkadot/Kusama prices are added as metrics here, because atm we don't have Polkadot <-> Kusama + // relays, but we want to test metrics/dashboards in advance + .standalone_metric(|registry, prefix| { + FloatJsonValueMetric::new( + registry, + prefix, + "https://api.coingecko.com/api/v3/simple/price?ids=Polkadot&vs_currencies=btc".into(), + "$.polkadot.btc".into(), + "polkadot_to_base_conversion_rate".into(), + "Rate used to convert from DOT to some BASE tokens".into(), + ) + }) + .map_err(|e| anyhow::format_err!("{}", e))? + .standalone_metric(|registry, prefix| { + FloatJsonValueMetric::new( + registry, + prefix, + "https://api.coingecko.com/api/v3/simple/price?ids=Kusama&vs_currencies=btc".into(), + "$.kusama.btc".into(), + "kusama_to_base_conversion_rate".into(), + "Rate used to convert from KSM to some BASE tokens".into(), + ) + }) + .map_err(|e| anyhow::format_err!("{}", e))? + .into_params(), + ) +} + +#[cfg(test)] +mod tests { + use crate::cli::{encode_call, send_message}; + use bp_messages::source_chain::TargetHeaderChain; + use codec::Encode; + use frame_support::dispatch::GetDispatchInfo; + use relay_millau_client::Millau; + use relay_rialto_client::Rialto; + use relay_substrate_client::TransactionSignScheme; + use sp_core::Pair; + use sp_runtime::traits::{IdentifyAccount, Verify}; + + #[test] + fn millau_signature_is_valid_on_rialto() { + let millau_sign = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap(); + + let call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![])); + + let millau_public: bp_millau::AccountSigner = millau_sign.public().into(); + let millau_account_id: bp_millau::AccountId = millau_public.into_account(); + + let digest = millau_runtime::rialto_account_ownership_digest( + &call, + millau_account_id, + rialto_runtime::VERSION.spec_version, + ); + + let rialto_signer = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap(); + let signature = rialto_signer.sign(&digest); + + assert!(signature.verify(&digest[..], &rialto_signer.public())); + } + + #[test] + fn rialto_signature_is_valid_on_millau() { + let rialto_sign = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap(); + + let call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![])); + + let rialto_public: bp_rialto::AccountSigner = rialto_sign.public().into(); + let rialto_account_id: bp_rialto::AccountId = rialto_public.into_account(); + + let digest = rialto_runtime::millau_account_ownership_digest( + &call, + rialto_account_id, + millau_runtime::VERSION.spec_version, + ); + + let millau_signer = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap(); + let signature = millau_signer.sign(&digest); + + assert!(signature.verify(&digest[..], &millau_signer.public())); + } + + #[test] + fn maximal_rialto_to_millau_message_arguments_size_is_computed_correctly() { + use rialto_runtime::millau_messages::Millau; + + let maximal_remark_size = encode_call::compute_maximal_message_arguments_size( + bp_rialto::max_extrinsic_size(), + bp_millau::max_extrinsic_size(), + ); + + let call: millau_runtime::Call = millau_runtime::SystemCall::remark(vec![42; maximal_remark_size as _]).into(); + let payload = send_message::message_payload( + Default::default(), + call.get_dispatch_info().weight, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert_eq!(Millau::verify_message(&payload), Ok(())); + + let call: millau_runtime::Call = + millau_runtime::SystemCall::remark(vec![42; (maximal_remark_size + 1) as _]).into(); + let payload = send_message::message_payload( + Default::default(), + call.get_dispatch_info().weight, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert!(Millau::verify_message(&payload).is_err()); + } + + #[test] + fn maximal_size_remark_to_rialto_is_generated_correctly() { + assert!( + bridge_runtime_common::messages::target::maximal_incoming_message_size( + bp_rialto::max_extrinsic_size() + ) > bp_millau::max_extrinsic_size(), + "We can't actually send maximal messages to Rialto from Millau, because Millau extrinsics can't be that large", + ) + } + + #[test] + fn maximal_rialto_to_millau_message_dispatch_weight_is_computed_correctly() { + use rialto_runtime::millau_messages::Millau; + + let maximal_dispatch_weight = + send_message::compute_maximal_message_dispatch_weight(bp_millau::max_extrinsic_weight()); + let call: millau_runtime::Call = rialto_runtime::SystemCall::remark(vec![]).into(); + + let payload = send_message::message_payload( + Default::default(), + maximal_dispatch_weight, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert_eq!(Millau::verify_message(&payload), Ok(())); + + let payload = send_message::message_payload( + Default::default(), + maximal_dispatch_weight + 1, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert!(Millau::verify_message(&payload).is_err()); + } + + #[test] + fn maximal_weight_fill_block_to_rialto_is_generated_correctly() { + use millau_runtime::rialto_messages::Rialto; + + let maximal_dispatch_weight = + send_message::compute_maximal_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); + let call: rialto_runtime::Call = millau_runtime::SystemCall::remark(vec![]).into(); + + let payload = send_message::message_payload( + Default::default(), + maximal_dispatch_weight, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert_eq!(Rialto::verify_message(&payload), Ok(())); + + let payload = send_message::message_payload( + Default::default(), + maximal_dispatch_weight + 1, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert!(Rialto::verify_message(&payload).is_err()); + } + + #[test] + fn rialto_tx_extra_bytes_constant_is_correct() { + let rialto_call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![])); + let rialto_tx = Rialto::sign_transaction( + Default::default(), + &sp_keyring::AccountKeyring::Alice.pair(), + 0, + rialto_call.clone(), + ); + let extra_bytes_in_transaction = rialto_tx.encode().len() - rialto_call.encode().len(); + assert!( + bp_rialto::TX_EXTRA_BYTES as usize >= extra_bytes_in_transaction, + "Hardcoded number of extra bytes in Rialto transaction {} is lower than actual value: {}", + bp_rialto::TX_EXTRA_BYTES, + extra_bytes_in_transaction, + ); + } + + #[test] + fn millau_tx_extra_bytes_constant_is_correct() { + let millau_call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![])); + let millau_tx = Millau::sign_transaction( + Default::default(), + &sp_keyring::AccountKeyring::Alice.pair(), + 0, + millau_call.clone(), + ); + let extra_bytes_in_transaction = millau_tx.encode().len() - millau_call.encode().len(); + assert!( + bp_millau::TX_EXTRA_BYTES as usize >= extra_bytes_in_transaction, + "Hardcoded number of extra bytes in Millau transaction {} is lower than actual value: {}", + bp_millau::TX_EXTRA_BYTES, + extra_bytes_in_transaction, + ); + } +} + +#[cfg(test)] +mod rococo_tests { + use bp_header_chain::justification::GrandpaJustification; + use codec::Encode; + + #[test] + fn scale_compatibility_of_bridges_call() { + // given + let header = sp_runtime::generic::Header { + parent_hash: Default::default(), + number: Default::default(), + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: sp_runtime::generic::Digest { logs: vec![] }, + }; + + let justification = GrandpaJustification { + round: 0, + commit: finality_grandpa::Commit { + target_hash: Default::default(), + target_number: Default::default(), + precommits: vec![], + }, + votes_ancestries: vec![], + }; + + let actual = bp_rococo::BridgeGrandpaWestendCall::submit_finality_proof(header.clone(), justification.clone()); + let expected = millau_runtime::BridgeGrandpaRialtoCall::::submit_finality_proof( + header, + justification, + ); + + // when + let actual_encoded = actual.encode(); + let expected_encoded = expected.encode(); + + // then + assert_eq!( + actual_encoded, expected_encoded, + "\n\nEncoding difference.\nGot {:#?} \nExpected: {:#?}", + actual, expected + ); + } +} + +#[cfg(test)] +mod westend_tests { + use bp_header_chain::justification::GrandpaJustification; + use codec::Encode; + + #[test] + fn scale_compatibility_of_bridges_call() { + // given + let header = sp_runtime::generic::Header { + parent_hash: Default::default(), + number: Default::default(), + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: sp_runtime::generic::Digest { logs: vec![] }, + }; + + let justification = GrandpaJustification { + round: 0, + commit: finality_grandpa::Commit { + target_hash: Default::default(), + target_number: Default::default(), + precommits: vec![], + }, + votes_ancestries: vec![], + }; + + let actual = bp_westend::BridgeGrandpaRococoCall::submit_finality_proof(header.clone(), justification.clone()); + let expected = millau_runtime::BridgeGrandpaRialtoCall::::submit_finality_proof( + header, + justification, + ); + + // when + let actual_encoded = actual.encode(); + let expected_encoded = expected.encode(); + + // then + assert_eq!( + actual_encoded, expected_encoded, + "\n\nEncoding difference.\nGot {:#?} \nExpected: {:#?}", + actual, expected + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs new file mode 100644 index 0000000000..25c1ab04c9 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs @@ -0,0 +1,98 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rialto chain specification for CLI. + +use crate::cli::{ + bridge, + encode_call::{self, Call, CliEncodeCall}, + encode_message, send_message, CliChain, +}; +use codec::Decode; +use frame_support::weights::{GetDispatchInfo, Weight}; +use pallet_bridge_dispatch::{CallOrigin, MessagePayload}; +use relay_rialto_client::Rialto; +use sp_version::RuntimeVersion; + +impl CliEncodeCall for Rialto { + fn max_extrinsic_size() -> u32 { + bp_rialto::max_extrinsic_size() + } + + fn encode_call(call: &Call) -> anyhow::Result { + Ok(match call { + Call::Raw { data } => Decode::decode(&mut &*data.0)?, + Call::Remark { remark_payload, .. } => rialto_runtime::Call::System(rialto_runtime::SystemCall::remark( + remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + )), + Call::Transfer { recipient, amount } => { + rialto_runtime::Call::Balances(rialto_runtime::BalancesCall::transfer(recipient.raw_id(), amount.0)) + } + Call::BridgeSendMessage { + lane, + payload, + fee, + bridge_instance_index, + } => match *bridge_instance_index { + bridge::RIALTO_TO_MILLAU_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + rialto_runtime::Call::BridgeMillauMessages(rialto_runtime::MessagesCall::send_message( + lane.0, payload, fee.0, + )) + } + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, + }) + } +} + +impl CliChain for Rialto { + const RUNTIME_VERSION: RuntimeVersion = rialto_runtime::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = MessagePayload>; + + fn ss58_format() -> u16 { + rialto_runtime::SS58Prefix::get() as u16 + } + + fn max_extrinsic_weight() -> Weight { + bp_rialto::max_extrinsic_weight() + } + + fn encode_message(message: encode_message::MessagePayload) -> Result { + match message { + encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) + .map_err(|e| format!("Failed to decode Rialto's MessagePayload: {:?}", e)), + encode_message::MessagePayload::Call { mut call, mut sender } => { + type Source = Rialto; + type Target = relay_millau_client::Millau; + + sender.enforce_chain::(); + let spec_version = Target::RUNTIME_VERSION.spec_version; + let origin = CallOrigin::SourceAccount(sender.raw_id()); + encode_call::preprocess_call::(&mut call, bridge::RIALTO_TO_MILLAU_INDEX); + let call = Target::encode_call(&call).map_err(|e| e.to_string())?; + let weight = call.get_dispatch_info().weight; + + Ok(send_message::message_payload(spec_version, weight, origin, &call)) + } + } + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs new file mode 100644 index 0000000000..39295c8943 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs @@ -0,0 +1,57 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rialto-to-Millau headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_millau_client::{Millau, SigningParams as MillauSigningParams}; +use relay_rialto_client::{Rialto, SyncHeader as RialtoSyncHeader}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use sp_core::{Bytes, Pair}; + +/// Rialto-to-Millau finality sync pipeline. +pub(crate) type RialtoFinalityToMillau = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for RialtoFinalityToMillau { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; + + type TargetChain = Millau; + + fn transactions_author(&self) -> bp_millau::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: RialtoSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = millau_runtime::BridgeGrandpaRialtoCall::< + millau_runtime::Runtime, + millau_runtime::RialtoGrandpaInstance, + >::submit_finality_proof(header.into_inner(), proof) + .into(); + + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/bridges/relays/substrate/src/rialto_messages_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs similarity index 58% rename from polkadot/bridges/relays/substrate/src/rialto_messages_to_millau.rs rename to polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs index 1c11a11141..ec39a4caa3 100644 --- a/polkadot/bridges/relays/substrate/src/rialto_messages_to_millau.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,13 +16,13 @@ //! Rialto-to-Millau messages sync entrypoint. -use crate::messages_lane::{select_delivery_transaction_limits, SubstrateMessageLane, SubstrateMessageLaneToSubstrate}; +use crate::messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, +}; use crate::messages_source::SubstrateMessagesSource; use crate::messages_target::SubstrateMessagesTarget; -use crate::{MillauClient, RialtoClient}; -use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce}; +use bp_messages::MessageNonce; use bp_runtime::{MILLAU_BRIDGE_INSTANCE, RIALTO_BRIDGE_INSTANCE}; use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::Encode; @@ -30,15 +30,17 @@ use frame_support::dispatch::GetDispatchInfo; use messages_relay::message_lane::MessageLane; use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams}; use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Chain, Error as SubstrateError, TransactionSignScheme}; -use relay_utils::metrics::MetricsParams; -use sp_core::Pair; +use relay_substrate_client::{ + metrics::{FloatStorageValueMetric, StorageProofOverheadMetric}, + Chain, TransactionSignScheme, +}; +use sp_core::{Bytes, Pair}; use std::{ops::RangeInclusive, time::Duration}; /// Rialto-to-Millau message lane. -type RialtoMessagesToMillau = SubstrateMessageLaneToSubstrate; +pub type RialtoMessagesToMillau = + SubstrateMessageLaneToSubstrate; -#[async_trait] impl SubstrateMessageLane for RialtoMessagesToMillau { const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str = bp_millau::TO_MILLAU_MESSAGES_DISPATCH_WEIGHT_METHOD; @@ -51,24 +53,28 @@ impl SubstrateMessageLane for RialtoMessagesToMillau { bp_rialto::FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD; const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rialto::FROM_RIALTO_UNREWARDED_RELAYERS_STATE; - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::FINALIZED_RIALTO_BLOCK_METHOD; - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_millau::FINALIZED_MILLAU_BLOCK_METHOD; + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; - type SourceSignedTransaction = ::SignedTransaction; - type TargetSignedTransaction = ::SignedTransaction; + type SourceChain = Rialto; + type TargetChain = Millau; - async fn make_messages_receiving_proof_transaction( + fn source_transactions_author(&self) -> bp_rialto::AccountId { + (*self.source_sign.public().as_array_ref()).into() + } + + fn make_messages_receiving_proof_transaction( &self, + transaction_nonce: ::Index, _generated_at_block: MillauHeaderId, proof: ::MessagesReceivingProof, - ) -> Result { + ) -> Bytes { let (relayers_state, proof) = proof; - let account_id = self.source_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.source_client.next_account_index(account_id).await?; let call: rialto_runtime::Call = - rialto_runtime::MessageLaneCall::receive_messages_delivery_proof(proof, relayers_state).into(); + rialto_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into(); let call_weight = call.get_dispatch_info().weight; - let transaction = Rialto::sign_transaction(&self.source_client, &self.source_sign.signer, nonce, call); + let genesis_hash = *self.source_client.genesis_hash(); + let transaction = Rialto::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); log::trace!( target: "bridge", "Prepared Millau -> Rialto confirmation transaction. Weight: {}/{}, size: {}/{}", @@ -77,15 +83,20 @@ impl SubstrateMessageLane for RialtoMessagesToMillau { transaction.encode().len(), bp_rialto::max_extrinsic_size(), ); - Ok(transaction) + Bytes(transaction.encode()) } - async fn make_messages_delivery_transaction( + fn target_transactions_author(&self) -> bp_rialto::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_messages_delivery_transaction( &self, + transaction_nonce: ::Index, _generated_at_header: RialtoHeaderId, _nonces: RangeInclusive, proof: ::MessagesProof, - ) -> Result { + ) -> Bytes { let (dispatch_weight, proof) = proof; let FromBridgedChainMessagesProof { ref nonces_start, @@ -93,9 +104,7 @@ impl SubstrateMessageLane for RialtoMessagesToMillau { .. } = proof; let messages_count = nonces_end - nonces_start + 1; - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call: millau_runtime::Call = millau_runtime::MessageLaneCall::receive_messages_proof( + let call: millau_runtime::Call = millau_runtime::MessagesCall::receive_messages_proof( self.relayer_id_at_source.clone(), proof, messages_count as _, @@ -103,7 +112,8 @@ impl SubstrateMessageLane for RialtoMessagesToMillau { ) .into(); let call_weight = call.get_dispatch_info().weight; - let transaction = Millau::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); log::trace!( target: "bridge", "Prepared Rialto -> Millau delivery transaction. Weight: {}/{}, size: {}/{}", @@ -112,40 +122,47 @@ impl SubstrateMessageLane for RialtoMessagesToMillau { transaction.encode().len(), bp_millau::max_extrinsic_size(), ); - Ok(transaction) + Bytes(transaction.encode()) } } /// Rialto node as messages source. -type RialtoSourceClient = SubstrateMessagesSource; +type RialtoSourceClient = SubstrateMessagesSource< + Rialto, + RialtoMessagesToMillau, + rialto_runtime::Runtime, + rialto_runtime::WithMillauMessagesInstance, +>; /// Millau node as messages target. -type MillauTargetClient = SubstrateMessagesTarget; +type MillauTargetClient = SubstrateMessagesTarget< + Millau, + RialtoMessagesToMillau, + millau_runtime::Runtime, + millau_runtime::WithRialtoMessagesInstance, +>; /// Run Rialto-to-Millau messages sync. -pub fn run( - rialto_client: RialtoClient, - rialto_sign: RialtoSigningParams, - millau_client: MillauClient, - millau_sign: MillauSigningParams, - lane_id: LaneId, - metrics_params: Option, -) { +pub async fn run( + params: MessagesRelayParams, +) -> Result<(), String> { let stall_timeout = Duration::from_secs(5 * 60); - let relayer_id_at_rialto = rialto_sign.signer.public().as_array_ref().clone().into(); + let relayer_id_at_rialto = (*params.source_sign.public().as_array_ref()).into(); + let lane_id = params.lane_id; + let source_client = params.source_client; let lane = RialtoMessagesToMillau { - source_client: rialto_client.clone(), - source_sign: rialto_sign, - target_client: millau_client.clone(), - target_sign: millau_sign, + source_client: source_client.clone(), + source_sign: params.source_sign, + target_client: params.target_client.clone(), + target_sign: params.target_sign, relayer_id_at_source: relayer_id_at_rialto, }; // 2/3 is reserved for proofs and tx overhead let max_messages_size_in_single_batch = bp_millau::max_extrinsic_size() as usize / 3; let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - select_delivery_transaction_limits::>( + select_delivery_transaction_limits::>( bp_millau::max_extrinsic_weight(), bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, ); @@ -178,9 +195,50 @@ pub fn run( max_messages_size_in_single_batch, }, }, - RialtoSourceClient::new(rialto_client, lane.clone(), lane_id, MILLAU_BRIDGE_INSTANCE), - MillauTargetClient::new(millau_client, lane, lane_id, RIALTO_BRIDGE_INSTANCE), - metrics_params, + RialtoSourceClient::new( + source_client.clone(), + lane.clone(), + lane_id, + MILLAU_BRIDGE_INSTANCE, + params.target_to_source_headers_relay, + ), + MillauTargetClient::new( + params.target_client, + lane, + lane_id, + RIALTO_BRIDGE_INSTANCE, + params.source_to_target_headers_relay, + ), + relay_utils::relay_metrics( + Some(messages_relay::message_lane_loop::metrics_prefix::< + RialtoMessagesToMillau, + >(&lane_id)), + params.metrics_params, + ) + .standalone_metric(|registry, prefix| { + StorageProofOverheadMetric::new( + registry, + prefix, + source_client.clone(), + "rialto_storage_proof_overhead".into(), + "Rialto storage proof overhead".into(), + ) + })? + .standalone_metric(|registry, prefix| { + FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( + registry, + prefix, + source_client, + sp_core::storage::StorageKey( + rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec(), + ), + Some(rialto_runtime::millau_messages::INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE), + "rialto_millau_to_rialto_conversion_rate".into(), + "Millau to Rialto tokens conversion rate (used by Millau)".into(), + ) + })? + .into_params(), futures::future::pending(), - ); + ) + .await } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs new file mode 100644 index 0000000000..0bcf388c34 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs @@ -0,0 +1,39 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{encode_message, CliChain}; +use frame_support::weights::Weight; +use relay_rococo_client::Rococo; +use sp_version::RuntimeVersion; + +impl CliChain for Rococo { + const RUNTIME_VERSION: RuntimeVersion = bp_rococo::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = (); + + fn ss58_format() -> u16 { + 42 + } + + fn max_extrinsic_weight() -> Weight { + 0 + } + + fn encode_message(_message: encode_message::MessagePayload) -> Result { + Err("Sending messages from Rococo is not yet supported.".into()) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_westend.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_westend.rs new file mode 100644 index 0000000000..dca91adb3d --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_westend.rs @@ -0,0 +1,60 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rococo-to-Westend headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_rococo_client::{Rococo, SyncHeader as RococoSyncHeader}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_utils::metrics::MetricsParams; +use relay_westend_client::{SigningParams as WestendSigningParams, Westend}; +use sp_core::{Bytes, Pair}; + +/// Rococo-to-Westend finality sync pipeline. +pub(crate) type RococoFinalityToWestend = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for RococoFinalityToWestend { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; + + type TargetChain = Westend; + + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + crate::chains::add_polkadot_kusama_price_metrics::(params) + } + + fn transactions_author(&self) -> bp_westend::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: RococoSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = bp_westend::Call::BridgeGrandpaRococo(bp_westend::BridgeGrandpaRococoCall::submit_finality_proof( + header.into_inner(), + proof, + )); + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Westend::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs b/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs new file mode 100644 index 0000000000..27621472d6 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs @@ -0,0 +1,41 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Westend chain specification for CLI. + +use crate::cli::{encode_message, CliChain}; +use frame_support::weights::Weight; +use relay_westend_client::Westend; +use sp_version::RuntimeVersion; + +impl CliChain for Westend { + const RUNTIME_VERSION: RuntimeVersion = bp_westend::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = (); + + fn ss58_format() -> u16 { + 42 + } + + fn max_extrinsic_weight() -> Weight { + 0 + } + + fn encode_message(_message: encode_message::MessagePayload) -> Result { + Err("Sending messages from Westend is not yet supported.".into()) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs new file mode 100644 index 0000000000..1523dc1be5 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs @@ -0,0 +1,62 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Westend-to-Millau headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_millau_client::{Millau, SigningParams as MillauSigningParams}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_utils::metrics::MetricsParams; +use relay_westend_client::{SyncHeader as WestendSyncHeader, Westend}; +use sp_core::{Bytes, Pair}; + +/// Westend-to-Millau finality sync pipeline. +pub(crate) type WestendFinalityToMillau = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for WestendFinalityToMillau { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD; + + type TargetChain = Millau; + + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + crate::chains::add_polkadot_kusama_price_metrics::(params) + } + + fn transactions_author(&self) -> bp_millau::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: WestendSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = millau_runtime::BridgeGrandpaWestendCall::< + millau_runtime::Runtime, + millau_runtime::WestendGrandpaInstance, + >::submit_finality_proof(header.into_inner(), proof) + .into(); + + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs new file mode 100644 index 0000000000..577a858d92 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs @@ -0,0 +1,60 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Westend-to-Rococo headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_rococo_client::{Rococo, SigningParams as RococoSigningParams}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_utils::metrics::MetricsParams; +use relay_westend_client::{SyncHeader as WestendSyncHeader, Westend}; +use sp_core::{Bytes, Pair}; + +/// Westend-to-Rococo finality sync pipeline. +pub(crate) type WestendFinalityToRococo = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for WestendFinalityToRococo { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD; + + type TargetChain = Rococo; + + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + crate::chains::add_polkadot_kusama_price_metrics::(params) + } + + fn transactions_author(&self) -> bp_rococo::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: WestendSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = bp_rococo::Call::BridgeGrandpaWestend(bp_rococo::BridgeGrandpaWestendCall::submit_finality_proof( + header.into_inner(), + proof, + )); + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Rococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs b/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs new file mode 100644 index 0000000000..faf4417d1e --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs @@ -0,0 +1,96 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use structopt::clap::arg_enum; + +arg_enum! { + #[derive(Debug, PartialEq, Eq)] + /// Supported full bridges (headers + messages). + pub enum FullBridge { + MillauToRialto, + RialtoToMillau, + } +} + +impl FullBridge { + /// Return instance index of the bridge pallet in source runtime. + pub fn bridge_instance_index(&self) -> u8 { + match self { + Self::MillauToRialto => MILLAU_TO_RIALTO_INDEX, + Self::RialtoToMillau => RIALTO_TO_MILLAU_INDEX, + } + } +} + +pub const RIALTO_TO_MILLAU_INDEX: u8 = 0; +pub const MILLAU_TO_RIALTO_INDEX: u8 = 0; + +/// The macro allows executing bridge-specific code without going fully generic. +/// +/// It matches on the [`FullBridge`] enum, sets bridge-specific types or imports and injects +/// the `$generic` code at every variant. +#[macro_export] +macro_rules! select_full_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + FullBridge::MillauToRialto => { + type Source = relay_millau_client::Millau; + #[allow(dead_code)] + type Target = relay_rialto_client::Rialto; + + // Derive-account + #[allow(unused_imports)] + use bp_millau::derive_account_from_rialto_id as derive_account; + + // Relay-messages + #[allow(unused_imports)] + use crate::chains::millau_messages_to_rialto::run as relay_messages; + + // Send-message / Estimate-fee + #[allow(unused_imports)] + use bp_rialto::TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; + // Send-message + #[allow(unused_imports)] + use millau_runtime::rialto_account_ownership_digest as account_ownership_digest; + + $generic + } + FullBridge::RialtoToMillau => { + type Source = relay_rialto_client::Rialto; + #[allow(dead_code)] + type Target = relay_millau_client::Millau; + + // Derive-account + #[allow(unused_imports)] + use bp_rialto::derive_account_from_millau_id as derive_account; + + // Relay-messages + #[allow(unused_imports)] + use crate::chains::rialto_messages_to_millau::run as relay_messages; + + // Send-message / Estimate-fee + #[allow(unused_imports)] + use bp_millau::TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; + + // Send-message + #[allow(unused_imports)] + use rialto_runtime::millau_account_ownership_digest as account_ownership_digest; + + $generic + } + } + }; +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs b/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs new file mode 100644 index 0000000000..92b32b0d47 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs @@ -0,0 +1,102 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{bridge::FullBridge, AccountId}; +use crate::select_full_bridge; +use relay_substrate_client::Chain; +use structopt::StructOpt; + +/// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain. +/// +/// The (derived) target chain `AccountId` is going to be used as dispatch origin of the call +/// that has been sent over the bridge. +/// This account can also be used to receive target-chain funds (or other form of ownership), +/// since messages sent over the bridge will be able to spend these. +#[derive(StructOpt)] +pub struct DeriveAccount { + /// A bridge instance to initalize. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + /// Source-chain address to derive Target-chain address from. + account: AccountId, +} + +impl DeriveAccount { + /// Parse CLI arguments and derive account. + /// + /// Returns both the Source account in correct SS58 format and the derived account. + fn derive_account(&self) -> (AccountId, AccountId) { + select_full_bridge!(self.bridge, { + let mut account = self.account.clone(); + account.enforce_chain::(); + let acc = bp_runtime::SourceAccount::Account(account.raw_id()); + let id = derive_account(acc); + let derived_account = AccountId::from_raw::(id); + (account, derived_account) + }) + } + + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_full_bridge!(self.bridge, { + let (account, derived_account) = self.derive_account(); + println!("Source address:\n{} ({})", account, Source::NAME); + println!( + "->Corresponding (derived) address:\n{} ({})", + derived_account, + Target::NAME, + ); + + Ok(()) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn derive_account_cli(bridge: &str, account: &str) -> (AccountId, AccountId) { + DeriveAccount::from_iter(vec!["derive-account", bridge, account]).derive_account() + } + + #[test] + fn should_derive_accounts_correctly() { + // given + let rialto = "5sauUXUfPjmwxSgmb3tZ5d6yx24eZX4wWJ2JtVUBaQqFbvEU"; + let millau = "752paRyW1EGfq9YLTSSqcSJ5hqnBDidBmaftGhBo8fy6ypW9"; + + // when + let (rialto_parsed, rialto_derived) = derive_account_cli("RialtoToMillau", rialto); + let (millau_parsed, millau_derived) = derive_account_cli("MillauToRialto", millau); + let (millau2_parsed, millau2_derived) = derive_account_cli("MillauToRialto", rialto); + + // then + assert_eq!(format!("{}", rialto_parsed), rialto); + assert_eq!(format!("{}", millau_parsed), millau); + assert_eq!(format!("{}", millau2_parsed), millau); + + assert_eq!( + format!("{}", rialto_derived), + "73gLnUwrAdH4vMjbXCiNEpgyz1PLk9JxCaY4cKzvfSZT73KE" + ); + assert_eq!( + format!("{}", millau_derived), + "5rpTJqGv1BPAYy2sXzkPpc3Wx1ZpQtgfuBsrDpNV4HsXAmbi" + ); + assert_eq!(millau_derived, millau2_derived); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs b/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs new file mode 100644 index 0000000000..6e1130cffc --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs @@ -0,0 +1,275 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::bridge::FullBridge; +use crate::cli::{AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId}; +use crate::select_full_bridge; +use frame_support::dispatch::GetDispatchInfo; +use relay_substrate_client::Chain; +use structopt::StructOpt; + +/// Encode source chain runtime call. +#[derive(StructOpt, Debug)] +pub struct EncodeCall { + /// A bridge instance to encode call for. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + #[structopt(flatten)] + call: Call, +} + +/// All possible messages that may be delivered to generic Substrate chain. +/// +/// Note this enum may be used in the context of both Source (as part of `encode-call`) +/// and Target chain (as part of `encode-message/send-message`). +#[derive(StructOpt, Debug, PartialEq, Eq)] +pub enum Call { + /// Raw bytes for the message + Raw { + /// Raw, SCALE-encoded message + data: HexBytes, + }, + /// Make an on-chain remark (comment). + Remark { + /// Explicit remark payload. + #[structopt(long, conflicts_with("remark-size"))] + remark_payload: Option, + /// Remark size. If not passed, small UTF8-encoded string is generated by relay as remark. + #[structopt(long, conflicts_with("remark-payload"))] + remark_size: Option>, + }, + /// Transfer the specified `amount` of native tokens to a particular `recipient`. + Transfer { + /// Address of an account to receive the transfer. + #[structopt(long)] + recipient: AccountId, + /// Amount of target tokens to send in target chain base currency units. + #[structopt(long)] + amount: Balance, + }, + /// A call to the specific Bridge Messages pallet to queue message to be sent over a bridge. + BridgeSendMessage { + /// An index of the bridge instance which represents the expected target chain. + #[structopt(skip = 255)] + bridge_instance_index: u8, + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + /// Raw SCALE-encoded Message Payload to submit to the messages pallet. + /// + /// This can be obtained by encoding call for the target chain. + #[structopt(long)] + payload: HexBytes, + /// Declared delivery and dispatch fee in base source-chain currency units. + #[structopt(long)] + fee: Balance, + }, +} + +pub trait CliEncodeCall: Chain { + /// Maximal size (in bytes) of any extrinsic (from the runtime). + fn max_extrinsic_size() -> u32; + + /// Encode a CLI call. + fn encode_call(call: &Call) -> anyhow::Result; +} + +impl EncodeCall { + fn encode(&mut self) -> anyhow::Result { + select_full_bridge!(self.bridge, { + preprocess_call::(&mut self.call, self.bridge.bridge_instance_index()); + let call = Source::encode_call(&self.call)?; + + let encoded = HexBytes::encode(&call); + + log::info!(target: "bridge", "Generated {} call: {:#?}", Source::NAME, call); + log::info!(target: "bridge", "Weight of {} call: {}", Source::NAME, call.get_dispatch_info().weight); + log::info!(target: "bridge", "Encoded {} call: {:?}", Source::NAME, encoded); + + Ok(encoded) + }) + } + + /// Run the command. + pub async fn run(mut self) -> anyhow::Result<()> { + println!("{:?}", self.encode()?); + Ok(()) + } +} + +/// Prepare the call to be passed to [`CliEncodeCall::encode_call`]. +/// +/// This function will fill in all optional and missing pieces and will make sure that +/// values are converted to bridge-specific ones. +/// +/// Most importantly, the method will fill-in [`bridge_instance_index`] parameter for +/// target-chain specific calls. +pub(crate) fn preprocess_call( + call: &mut Call, + bridge_instance: u8, +) { + match *call { + Call::Raw { .. } => {} + Call::Remark { + ref remark_size, + ref mut remark_payload, + } => { + if remark_payload.is_none() { + *remark_payload = Some(HexBytes(generate_remark_payload( + &remark_size, + compute_maximal_message_arguments_size(Source::max_extrinsic_size(), Target::max_extrinsic_size()), + ))); + } + } + Call::Transfer { ref mut recipient, .. } => { + recipient.enforce_chain::(); + } + Call::BridgeSendMessage { + ref mut bridge_instance_index, + .. + } => { + *bridge_instance_index = bridge_instance; + } + }; +} + +fn generate_remark_payload(remark_size: &Option>, maximal_allowed_size: u32) -> Vec { + match remark_size { + Some(ExplicitOrMaximal::Explicit(remark_size)) => vec![0; *remark_size], + Some(ExplicitOrMaximal::Maximal) => vec![0; maximal_allowed_size as _], + None => format!( + "Unix time: {}", + std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(), + ) + .as_bytes() + .to_vec(), + } +} + +pub(crate) fn compute_maximal_message_arguments_size( + maximal_source_extrinsic_size: u32, + maximal_target_extrinsic_size: u32, +) -> u32 { + // assume that both signed extensions and other arguments fit 1KB + let service_tx_bytes_on_source_chain = 1024; + let maximal_source_extrinsic_size = maximal_source_extrinsic_size - service_tx_bytes_on_source_chain; + let maximal_call_size = + bridge_runtime_common::messages::target::maximal_incoming_message_size(maximal_target_extrinsic_size); + let maximal_call_size = if maximal_call_size > maximal_source_extrinsic_size { + maximal_source_extrinsic_size + } else { + maximal_call_size + }; + + // bytes in Call encoding that are used to encode everything except arguments + let service_bytes = 1 + 1 + 4; + maximal_call_size - service_bytes +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_encode_transfer_call() { + // given + let mut encode_call = EncodeCall::from_iter(vec![ + "encode-call", + "RialtoToMillau", + "transfer", + "--amount", + "12345", + "--recipient", + "5sauUXUfPjmwxSgmb3tZ5d6yx24eZX4wWJ2JtVUBaQqFbvEU", + ]); + + // when + let hex = encode_call.encode().unwrap(); + + // then + assert_eq!( + format!("{:?}", hex), + "0x0c00d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0" + ); + } + + #[test] + fn should_encode_remark_with_default_payload() { + // given + let mut encode_call = EncodeCall::from_iter(vec!["encode-call", "RialtoToMillau", "remark"]); + + // when + let hex = encode_call.encode().unwrap(); + + // then + assert!(format!("{:?}", hex).starts_with("0x070154556e69782074696d653a")); + } + + #[test] + fn should_encode_remark_with_explicit_payload() { + // given + let mut encode_call = EncodeCall::from_iter(vec![ + "encode-call", + "RialtoToMillau", + "remark", + "--remark-payload", + "1234", + ]); + + // when + let hex = encode_call.encode().unwrap(); + + // then + assert_eq!(format!("{:?}", hex), "0x0701081234"); + } + + #[test] + fn should_encode_remark_with_size() { + // given + let mut encode_call = + EncodeCall::from_iter(vec!["encode-call", "RialtoToMillau", "remark", "--remark-size", "12"]); + + // when + let hex = encode_call.encode().unwrap(); + + // then + assert_eq!(format!("{:?}", hex), "0x070130000000000000000000000000"); + } + + #[test] + fn should_disallow_both_payload_and_size() { + // when + let err = EncodeCall::from_iter_safe(vec![ + "encode-call", + "RialtoToMillau", + "remark", + "--remark-payload", + "1234", + "--remark-size", + "12", + ]) + .unwrap_err(); + + // then + assert_eq!(err.kind, structopt::clap::ErrorKind::ArgumentConflict); + + let info = err.info.unwrap(); + assert!(info.contains(&"remark-payload".to_string()) | info.contains(&"remark-size".to_string())) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs b/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs new file mode 100644 index 0000000000..a29aa8597d --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs @@ -0,0 +1,106 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{bridge::FullBridge, AccountId, CliChain, HexBytes}; +use crate::select_full_bridge; +use structopt::StructOpt; + +/// Generic message payload. +#[derive(StructOpt, Debug, PartialEq, Eq)] +pub enum MessagePayload { + /// Raw, SCALE-encoded `MessagePayload`. + Raw { + /// Hex-encoded SCALE data. + data: HexBytes, + }, + /// Construct message to send over the bridge. + Call { + /// Message details. + #[structopt(flatten)] + call: crate::cli::encode_call::Call, + /// SS58 encoded Source account that will send the payload. + #[structopt(long)] + sender: AccountId, + }, +} + +/// A `MessagePayload` to encode. +#[derive(StructOpt)] +pub struct EncodeMessage { + /// A bridge instance to initalize. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + #[structopt(flatten)] + payload: MessagePayload, +} + +impl EncodeMessage { + /// Run the command. + pub fn encode(self) -> anyhow::Result { + select_full_bridge!(self.bridge, { + let payload = Source::encode_message(self.payload).map_err(|e| anyhow::format_err!("{}", e))?; + Ok(HexBytes::encode(&payload)) + }) + } + + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + let payload = self.encode()?; + println!("{:?}", payload); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::crypto::Ss58Codec; + + #[test] + fn should_encode_raw_message() { + // given + let msg = "01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d3c040130000000000000000000000000"; + let encode_message = EncodeMessage::from_iter(vec!["encode-message", "MillauToRialto", "raw", msg]); + + // when + let hex = encode_message.encode().unwrap(); + + // then + assert_eq!(format!("{:?}", hex), format!("0x{}", msg)); + } + + #[test] + fn should_encode_remark_with_size() { + // given + let sender = sp_keyring::AccountKeyring::Alice.to_account_id().to_ss58check(); + let encode_message = EncodeMessage::from_iter(vec![ + "encode-message", + "RialtoToMillau", + "call", + "--sender", + &sender, + "remark", + "--remark-size", + "12", + ]); + + // when + let hex = encode_message.encode().unwrap(); + + // then + assert_eq!(format!("{:?}", hex), "0x01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d3c040130000000000000000000000000"); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs b/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs new file mode 100644 index 0000000000..4e39ad351e --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs @@ -0,0 +1,128 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::bridge::FullBridge; +use crate::cli::{Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams}; +use crate::select_full_bridge; +use codec::{Decode, Encode}; +use relay_substrate_client::{Chain, ChainWithBalances}; +use structopt::StructOpt; + +/// Estimate Delivery & Dispatch Fee command. +#[derive(StructOpt, Debug, PartialEq, Eq)] +pub struct EstimateFee { + /// A bridge instance to encode call for. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + #[structopt(flatten)] + source: SourceConnectionParams, + /// Hex-encoded id of lane that will be delivering the message. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + /// Payload to send over the bridge. + #[structopt(flatten)] + payload: crate::cli::encode_message::MessagePayload, +} + +impl EstimateFee { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + let Self { + source, + bridge, + lane, + payload, + } = self; + + select_full_bridge!(bridge, { + let source_client = source.to_client::().await?; + let lane = lane.into(); + let payload = Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?; + + let fee: ::NativeBalance = + estimate_message_delivery_and_dispatch_fee(&source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, payload) + .await?; + + log::info!(target: "bridge", "Fee: {:?}", Balance(fee as _)); + println!("{}", fee); + Ok(()) + }) + } +} + +pub(crate) async fn estimate_message_delivery_and_dispatch_fee( + client: &relay_substrate_client::Client, + estimate_fee_method: &str, + lane: bp_messages::LaneId, + payload: P, +) -> anyhow::Result { + let encoded_response = client + .state_call(estimate_fee_method.into(), (lane, payload).encode().into(), None) + .await?; + let decoded_response: Option = + Decode::decode(&mut &encoded_response.0[..]).map_err(relay_substrate_client::Error::ResponseParseFailed)?; + let fee = decoded_response + .ok_or_else(|| anyhow::format_err!("Unable to decode fee from: {:?}", HexBytes(encoded_response.to_vec())))?; + Ok(fee) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::encode_call; + use sp_core::crypto::Ss58Codec; + + #[test] + fn should_parse_cli_options() { + // given + let alice = sp_keyring::AccountKeyring::Alice.to_account_id().to_ss58check(); + + // when + let res = EstimateFee::from_iter(vec![ + "estimate_fee", + "RialtoToMillau", + "--source-port", + "1234", + "call", + "--sender", + &alice, + "remark", + "--remark-payload", + "1234", + ]); + + // then + assert_eq!( + res, + EstimateFee { + bridge: FullBridge::RialtoToMillau, + lane: HexLaneId([0, 0, 0, 0]), + source: SourceConnectionParams { + source_host: "127.0.0.1".into(), + source_port: 1234, + source_secure: false, + }, + payload: crate::cli::encode_message::MessagePayload::Call { + sender: alice.parse().unwrap(), + call: encode_call::Call::Remark { + remark_payload: Some(HexBytes(vec![0x12, 0x34])), + remark_size: None, + } + } + } + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs b/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs new file mode 100644 index 0000000000..cdd8ec3691 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs @@ -0,0 +1,162 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{SourceConnectionParams, TargetConnectionParams, TargetSigningParams}; +use bp_header_chain::InitializationData; +use bp_runtime::Chain as ChainBase; +use codec::Encode; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use sp_core::{Bytes, Pair}; +use structopt::{clap::arg_enum, StructOpt}; + +/// Initialize bridge pallet. +#[derive(StructOpt)] +pub struct InitBridge { + /// A bridge instance to initalize. + #[structopt(possible_values = &InitBridgeName::variants(), case_insensitive = true)] + bridge: InitBridgeName, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, +} + +// TODO [#851] Use kebab-case. +arg_enum! { + #[derive(Debug)] + /// Bridge to initialize. + pub enum InitBridgeName { + MillauToRialto, + RialtoToMillau, + WestendToMillau, + WestendToRococo, + RococoToWestend, + } +} + +macro_rules! select_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + InitBridgeName::MillauToRialto => { + type Source = relay_millau_client::Millau; + type Target = relay_rialto_client::Rialto; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + rialto_runtime::SudoCall::sudo(Box::new( + rialto_runtime::BridgeGrandpaMillauCall::initialize(init_data).into(), + )) + .into() + } + + $generic + } + InitBridgeName::RialtoToMillau => { + type Source = relay_rialto_client::Rialto; + type Target = relay_millau_client::Millau; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + let initialize_call = millau_runtime::BridgeGrandpaRialtoCall::< + millau_runtime::Runtime, + millau_runtime::RialtoGrandpaInstance, + >::initialize(init_data); + millau_runtime::SudoCall::sudo(Box::new(initialize_call.into())).into() + } + + $generic + } + InitBridgeName::WestendToMillau => { + type Source = relay_westend_client::Westend; + type Target = relay_millau_client::Millau; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + // at Westend -> Millau initialization we're not using sudo, because otherwise our deployments + // may fail, because we need to initialize both Rialto -> Millau and Westend -> Millau bridge. + // => since there's single possible sudo account, one of transaction may fail with duplicate nonce error + millau_runtime::BridgeGrandpaWestendCall::< + millau_runtime::Runtime, + millau_runtime::WestendGrandpaInstance, + >::initialize(init_data) + .into() + } + + $generic + } + InitBridgeName::WestendToRococo => { + type Source = relay_westend_client::Westend; + type Target = relay_rococo_client::Rococo; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + bp_rococo::Call::BridgeGrandpaWestend(bp_rococo::BridgeGrandpaWestendCall::initialize(init_data)) + } + + $generic + } + InitBridgeName::RococoToWestend => { + type Source = relay_rococo_client::Rococo; + type Target = relay_westend_client::Westend; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + bp_westend::Call::BridgeGrandpaRococo(bp_westend::BridgeGrandpaRococoCall::initialize(init_data)) + } + + $generic + } + } + }; +} + +impl InitBridge { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_bridge!(self.bridge, { + let source_client = self.source.to_client::().await?; + let target_client = self.target.to_client::().await?; + let target_sign = self.target_sign.to_keypair::()?; + + crate::headers_initialize::initialize( + source_client, + target_client.clone(), + target_sign.public().into(), + move |transaction_nonce, initialization_data| { + Bytes( + Target::sign_transaction( + *target_client.genesis_hash(), + &target_sign, + transaction_nonce, + encode_init_bridge(initialization_data), + ) + .encode(), + ) + }, + ) + .await; + + Ok(()) + }) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs b/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs new file mode 100644 index 0000000000..505ef11ee2 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs @@ -0,0 +1,444 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Deal with CLI args of substrate-to-substrate relay. + +use std::convert::TryInto; + +use bp_messages::LaneId; +use codec::{Decode, Encode}; +use frame_support::weights::Weight; +use sp_runtime::app_crypto::Ss58Codec; +use structopt::{clap::arg_enum, StructOpt}; + +pub(crate) mod bridge; +pub(crate) mod encode_call; +pub(crate) mod encode_message; +pub(crate) mod estimate_fee; +pub(crate) mod send_message; + +mod derive_account; +mod init_bridge; +mod relay_headers; +mod relay_headers_and_messages; +mod relay_messages; + +/// Parse relay CLI args. +pub fn parse_args() -> Command { + Command::from_args() +} + +/// Substrate-to-Substrate bridge utilities. +#[derive(StructOpt)] +#[structopt(about = "Substrate-to-Substrate relay")] +pub enum Command { + /// Start headers relay between two chains. + /// + /// The on-chain bridge component should have been already initialized with + /// `init-bridge` sub-command. + RelayHeaders(relay_headers::RelayHeaders), + /// Start messages relay between two chains. + /// + /// Ties up to `Messages` pallets on both chains and starts relaying messages. + /// Requires the header relay to be already running. + RelayMessages(relay_messages::RelayMessages), + /// Start headers and messages relay between two Substrate chains. + /// + /// This high-level relay internally starts four low-level relays: two `RelayHeaders` + /// and two `RelayMessages` relays. Headers are only relayed when they are required by + /// the message relays - i.e. when there are messages or confirmations that needs to be + /// relayed between chains. + RelayHeadersAndMessages(relay_headers_and_messages::RelayHeadersAndMessages), + /// Initialize on-chain bridge pallet with current header data. + /// + /// Sends initialization transaction to bootstrap the bridge with current finalized block data. + InitBridge(init_bridge::InitBridge), + /// Send custom message over the bridge. + /// + /// Allows interacting with the bridge by sending messages over `Messages` component. + /// The message is being sent to the source chain, delivered to the target chain and dispatched + /// there. + SendMessage(send_message::SendMessage), + /// Generate SCALE-encoded `Call` for choosen network. + /// + /// The call can be used either as message payload or can be wrapped into a transaction + /// and executed on the chain directly. + EncodeCall(encode_call::EncodeCall), + /// Generate SCALE-encoded `MessagePayload` object that can be sent over selected bridge. + /// + /// The `MessagePayload` can be then fed to `Messages::send_message` function and sent over + /// the bridge. + EncodeMessage(encode_message::EncodeMessage), + /// Estimate Delivery and Dispatch Fee required for message submission to messages pallet. + EstimateFee(estimate_fee::EstimateFee), + /// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain. + DeriveAccount(derive_account::DeriveAccount), +} + +impl Command { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RelayHeaders(arg) => arg.run().await?, + Self::RelayMessages(arg) => arg.run().await?, + Self::RelayHeadersAndMessages(arg) => arg.run().await?, + Self::InitBridge(arg) => arg.run().await?, + Self::SendMessage(arg) => arg.run().await?, + Self::EncodeCall(arg) => arg.run().await?, + Self::EncodeMessage(arg) => arg.run().await?, + Self::EstimateFee(arg) => arg.run().await?, + Self::DeriveAccount(arg) => arg.run().await?, + } + Ok(()) + } +} + +arg_enum! { + #[derive(Debug)] + /// The origin to use when dispatching the message on the target chain. + /// + /// - `Target` uses account existing on the target chain (requires target private key). + /// - `Origin` uses account derived from the source-chain account. + pub enum Origins { + Target, + Source, + } +} + +/// Generic balance type. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Balance(pub u128); + +impl std::fmt::Display for Balance { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + use num_format::{Locale, ToFormattedString}; + write!(fmt, "{}", self.0.to_formatted_string(&Locale::en)) + } +} + +impl std::str::FromStr for Balance { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + Ok(Self(s.parse()?)) + } +} + +impl Balance { + /// Cast balance to `u64` type, panicking if it's too large. + pub fn cast(&self) -> u64 { + self.0.try_into().expect("Balance is too high for this chain.") + } +} + +/// Generic account id with custom parser. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct AccountId { + account: sp_runtime::AccountId32, + ss58_format: sp_core::crypto::Ss58AddressFormat, +} + +impl std::fmt::Display for AccountId { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "{}", self.account.to_ss58check_with_version(self.ss58_format)) + } +} + +impl std::str::FromStr for AccountId { + type Err = String; + + fn from_str(s: &str) -> Result { + let (account, ss58_format) = sp_runtime::AccountId32::from_ss58check_with_version(s) + .map_err(|err| format!("Unable to decode SS58 address: {:?}", err))?; + Ok(Self { account, ss58_format }) + } +} + +const SS58_FORMAT_PROOF: &str = "u16 -> Ss58Format is infallible; qed"; + +impl AccountId { + /// Create new SS58-formatted address from raw account id. + pub fn from_raw(account: sp_runtime::AccountId32) -> Self { + Self { + account, + ss58_format: T::ss58_format().try_into().expect(SS58_FORMAT_PROOF), + } + } + + /// Enforces formatting account to be for given [`CliChain`] type. + /// + /// This will change the `ss58format` of the account to match the requested one. + /// Note that a warning will be produced in case the current format does not match + /// the requested one, but the conversion always succeeds. + pub fn enforce_chain(&mut self) { + let original = self.clone(); + self.ss58_format = T::ss58_format().try_into().expect(SS58_FORMAT_PROOF); + log::debug!("{} SS58 format: {} (RAW: {})", self, self.ss58_format, self.account); + if original.ss58_format != self.ss58_format { + log::warn!( + target: "bridge", + "Address {} does not seem to match {}'s SS58 format (got: {}, expected: {}).\nConverted to: {}", + original, + T::NAME, + original.ss58_format, + self.ss58_format, + self, + ) + } + } + + /// Returns the raw (no SS58-prefixed) account id. + pub fn raw_id(&self) -> sp_runtime::AccountId32 { + self.account.clone() + } +} + +/// Bridge-supported network definition. +/// +/// Used to abstract away CLI commands. +pub trait CliChain: relay_substrate_client::Chain { + /// Chain's current version of the runtime. + const RUNTIME_VERSION: sp_version::RuntimeVersion; + + /// Crypto keypair type used to send messages. + /// + /// In case of chains supporting multiple cryptos, pick one used by the CLI. + type KeyPair: sp_core::crypto::Pair; + + /// Bridge Message Payload type. + /// + /// TODO [#854] This should be removed in favour of target-specifc types. + type MessagePayload; + + /// Numeric value of SS58 format. + fn ss58_format() -> u16; + + /// Construct message payload to be sent over the bridge. + fn encode_message(message: crate::cli::encode_message::MessagePayload) -> Result; + + /// Maximal extrinsic weight (from the runtime). + fn max_extrinsic_weight() -> Weight; +} + +/// Lane id. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HexLaneId(pub LaneId); + +impl From for LaneId { + fn from(lane_id: HexLaneId) -> LaneId { + lane_id.0 + } +} + +impl std::str::FromStr for HexLaneId { + type Err = hex::FromHexError; + + fn from_str(s: &str) -> Result { + let mut lane_id = LaneId::default(); + hex::decode_to_slice(s, &mut lane_id)?; + Ok(HexLaneId(lane_id)) + } +} + +/// Nicer formatting for raw bytes vectors. +#[derive(Default, Encode, Decode, PartialEq, Eq)] +pub struct HexBytes(pub Vec); + +impl std::str::FromStr for HexBytes { + type Err = hex::FromHexError; + + fn from_str(s: &str) -> Result { + Ok(Self(hex::decode(s)?)) + } +} + +impl std::fmt::Debug for HexBytes { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "0x{}", self) + } +} + +impl std::fmt::Display for HexBytes { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "{}", hex::encode(&self.0)) + } +} + +impl HexBytes { + /// Encode given object and wrap into nicely formatted bytes. + pub fn encode(t: &T) -> Self { + Self(t.encode()) + } +} + +/// Prometheus metrics params. +#[derive(StructOpt)] +pub struct PrometheusParams { + /// Do not expose a Prometheus metric endpoint. + #[structopt(long)] + pub no_prometheus: bool, + /// Expose Prometheus endpoint at given interface. + #[structopt(long, default_value = "127.0.0.1")] + pub prometheus_host: String, + /// Expose Prometheus endpoint at given port. + #[structopt(long, default_value = "9616")] + pub prometheus_port: u16, +} + +impl From for relay_utils::metrics::MetricsParams { + fn from(cli_params: PrometheusParams) -> relay_utils::metrics::MetricsParams { + if !cli_params.no_prometheus { + Some(relay_utils::metrics::MetricsAddress { + host: cli_params.prometheus_host, + port: cli_params.prometheus_port, + }) + .into() + } else { + None.into() + } + } +} + +/// Either explicit or maximal allowed value. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ExplicitOrMaximal { + /// User has explicitly specified argument value. + Explicit(V), + /// Maximal allowed value for this argument. + Maximal, +} + +impl std::str::FromStr for ExplicitOrMaximal +where + V::Err: std::fmt::Debug, +{ + type Err = String; + + fn from_str(s: &str) -> Result { + if s.to_lowercase() == "max" { + return Ok(ExplicitOrMaximal::Maximal); + } + + V::from_str(s) + .map(ExplicitOrMaximal::Explicit) + .map_err(|e| format!("Failed to parse '{:?}'. Expected 'max' or explicit value", e)) + } +} + +/// Create chain-specific set of configuration objects: connection parameters, +/// signing parameters and bridge initialisation parameters. +#[macro_export] +macro_rules! declare_chain_options { + ($chain:ident, $chain_prefix:ident) => { + paste::item! { + #[doc = $chain " connection params."] + #[derive(StructOpt, Debug, PartialEq, Eq)] + pub struct [<$chain ConnectionParams>] { + #[doc = "Connect to " $chain " node at given host."] + #[structopt(long, default_value = "127.0.0.1")] + pub [<$chain_prefix _host>]: String, + #[doc = "Connect to " $chain " node websocket server at given port."] + #[structopt(long)] + pub [<$chain_prefix _port>]: u16, + #[doc = "Use secure websocket connection."] + #[structopt(long)] + pub [<$chain_prefix _secure>]: bool, + } + + #[doc = $chain " signing params."] + #[derive(StructOpt, Debug, PartialEq, Eq)] + pub struct [<$chain SigningParams>] { + #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] + #[structopt(long)] + pub [<$chain_prefix _signer>]: String, + #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] + #[structopt(long)] + pub [<$chain_prefix _signer_password>]: Option, + } + + impl [<$chain SigningParams>] { + /// Parse signing params into chain-specific KeyPair. + pub fn to_keypair(&self) -> anyhow::Result { + use sp_core::crypto::Pair; + + Chain::KeyPair::from_string( + &self.[<$chain_prefix _signer>], + self.[<$chain_prefix _signer_password>].as_deref() + ).map_err(|e| anyhow::format_err!("{:?}", e)) + } + } + + impl [<$chain ConnectionParams>] { + /// Convert connection params into Substrate client. + pub async fn to_client( + &self, + ) -> anyhow::Result> { + Ok(relay_substrate_client::Client::new(relay_substrate_client::ConnectionParams { + host: self.[<$chain_prefix _host>].clone(), + port: self.[<$chain_prefix _port>], + secure: self.[<$chain_prefix _secure>], + }) + .await? + ) + } + } + } + }; +} + +declare_chain_options!(Source, source); +declare_chain_options!(Target, target); + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::*; + + #[test] + fn should_format_addresses_with_ss58_format() { + // given + let rialto1 = "5sauUXUfPjmwxSgmb3tZ5d6yx24eZX4wWJ2JtVUBaQqFbvEU"; + let rialto2 = "5rERgaT1Z8nM3et2epA5i1VtEBfp5wkhwHtVE8HK7BRbjAH2"; + let millau1 = "752paRyW1EGfq9YLTSSqcSJ5hqnBDidBmaftGhBo8fy6ypW9"; + let millau2 = "74GNQjmkcfstRftSQPJgMREchqHM56EvAUXRc266cZ1NYVW5"; + + let expected = vec![rialto1, rialto2, millau1, millau2]; + + // when + let parsed = expected + .iter() + .map(|s| AccountId::from_str(s).unwrap()) + .collect::>(); + + let actual = parsed.iter().map(|a| format!("{}", a)).collect::>(); + + assert_eq!(actual, expected) + } + + #[test] + fn hex_bytes_display_matches_from_str_for_clap() { + // given + let hex = HexBytes(vec![1, 2, 3, 4]); + let display = format!("{}", hex); + + // when + let hex2: HexBytes = display.parse().unwrap(); + + // then + assert_eq!(hex.0, hex2.0); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs new file mode 100644 index 0000000000..346790f2ae --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs @@ -0,0 +1,110 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams}; +use crate::finality_pipeline::SubstrateFinalitySyncPipeline; +use structopt::{clap::arg_enum, StructOpt}; + +/// Start headers relayer process. +#[derive(StructOpt)] +pub struct RelayHeaders { + /// A bridge instance to relay headers for. + #[structopt(possible_values = &RelayHeadersBridge::variants(), case_insensitive = true)] + bridge: RelayHeadersBridge, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, + #[structopt(flatten)] + prometheus_params: PrometheusParams, +} + +// TODO [#851] Use kebab-case. +arg_enum! { + #[derive(Debug)] + /// Headers relay bridge. + pub enum RelayHeadersBridge { + MillauToRialto, + RialtoToMillau, + WestendToMillau, + WestendToRococo, + RococoToWestend, + } +} + +macro_rules! select_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + RelayHeadersBridge::MillauToRialto => { + type Source = relay_millau_client::Millau; + type Target = relay_rialto_client::Rialto; + type Finality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto; + + $generic + } + RelayHeadersBridge::RialtoToMillau => { + type Source = relay_rialto_client::Rialto; + type Target = relay_millau_client::Millau; + type Finality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; + + $generic + } + RelayHeadersBridge::WestendToMillau => { + type Source = relay_westend_client::Westend; + type Target = relay_millau_client::Millau; + type Finality = crate::chains::westend_headers_to_millau::WestendFinalityToMillau; + + $generic + } + RelayHeadersBridge::WestendToRococo => { + type Source = relay_westend_client::Westend; + type Target = relay_rococo_client::Rococo; + type Finality = crate::chains::westend_headers_to_rococo::WestendFinalityToRococo; + + $generic + } + RelayHeadersBridge::RococoToWestend => { + type Source = relay_rococo_client::Rococo; + type Target = relay_westend_client::Westend; + type Finality = crate::chains::rococo_headers_to_westend::RococoFinalityToWestend; + + $generic + } + } + }; +} + +impl RelayHeaders { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_bridge!(self.bridge, { + let source_client = self.source.to_client::().await?; + let target_client = self.target.to_client::().await?; + let target_sign = self.target_sign.to_keypair::()?; + let metrics_params = Finality::customize_metrics(self.prometheus_params.into())?; + + crate::finality_pipeline::run( + Finality::new(target_client.clone(), target_sign), + source_client, + target_client, + metrics_params, + ) + .await + }) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs new file mode 100644 index 0000000000..98ff1268fa --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs @@ -0,0 +1,183 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Complex headers+messages relays support. +//! +//! To add new complex relay between `ChainA` and `ChainB`, you must: +//! +//! 1) ensure that there's a `declare_chain_options!(...)` for both chains; +//! 2) add `declare_bridge_options!(...)` for the bridge; +//! 3) add bridge support to the `select_bridge! { ... }` macro. + +use crate::cli::{CliChain, HexLaneId, PrometheusParams}; +use crate::declare_chain_options; +use crate::messages_lane::MessagesRelayParams; +use crate::on_demand_headers::OnDemandHeadersRelay; + +use futures::{FutureExt, TryFutureExt}; +use relay_utils::metrics::MetricsParams; +use structopt::StructOpt; + +/// Start headers+messages relayer process. +#[derive(StructOpt)] +pub enum RelayHeadersAndMessages { + MillauRialto(MillauRialtoHeadersAndMessages), +} + +/// Parameters that have the same names across all bridges. +#[derive(StructOpt)] +pub struct HeadersAndMessagesSharedParams { + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + #[structopt(flatten)] + prometheus_params: PrometheusParams, +} + +// The reason behind this macro is that 'normal' relays are using source and target chains terminology, +// which is unusable for both-way relays (if you're relaying headers from Rialto to Millau and from +// Millau to Rialto, then which chain is source?). +macro_rules! declare_bridge_options { + ($chain1:ident, $chain2:ident) => { + paste::item! { + #[doc = $chain1 " and " $chain2 " headers+messages relay params."] + #[derive(StructOpt)] + pub struct [<$chain1 $chain2 HeadersAndMessages>] { + #[structopt(flatten)] + shared: HeadersAndMessagesSharedParams, + #[structopt(flatten)] + left: [<$chain1 ConnectionParams>], + #[structopt(flatten)] + left_sign: [<$chain1 SigningParams>], + #[structopt(flatten)] + right: [<$chain2 ConnectionParams>], + #[structopt(flatten)] + right_sign: [<$chain2 SigningParams>], + } + + #[allow(unreachable_patterns)] + impl From for [<$chain1 $chain2 HeadersAndMessages>] { + fn from(relay_params: RelayHeadersAndMessages) -> [<$chain1 $chain2 HeadersAndMessages>] { + match relay_params { + RelayHeadersAndMessages::[<$chain1 $chain2>](params) => params, + _ => unreachable!(), + } + } + } + } + }; +} + +macro_rules! select_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + RelayHeadersAndMessages::MillauRialto(_) => { + type Params = MillauRialtoHeadersAndMessages; + + type Left = relay_millau_client::Millau; + type Right = relay_rialto_client::Rialto; + + type LeftToRightFinality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto; + type RightToLeftFinality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; + + type LeftToRightMessages = crate::chains::millau_messages_to_rialto::MillauMessagesToRialto; + type RightToLeftMessages = crate::chains::rialto_messages_to_millau::RialtoMessagesToMillau; + + use crate::chains::millau_messages_to_rialto::run as left_to_right_messages; + use crate::chains::rialto_messages_to_millau::run as right_to_left_messages; + + $generic + } + } + }; +} + +// All supported chains. +declare_chain_options!(Millau, millau); +declare_chain_options!(Rialto, rialto); +// All supported bridges. +declare_bridge_options!(Millau, Rialto); + +impl RelayHeadersAndMessages { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_bridge!(self, { + let params: Params = self.into(); + + let left_client = params.left.to_client::().await?; + let left_sign = params.left_sign.to_keypair::()?; + let right_client = params.right.to_client::().await?; + let right_sign = params.right_sign.to_keypair::()?; + + let lane = params.shared.lane.into(); + + let metrics_params: MetricsParams = params.shared.prometheus_params.into(); + let metrics_params = relay_utils::relay_metrics(None, metrics_params).into_params(); + + let left_to_right_on_demand_headers = OnDemandHeadersRelay::new( + left_client.clone(), + right_client.clone(), + LeftToRightFinality::new(right_client.clone(), right_sign.clone()), + ); + let right_to_left_on_demand_headers = OnDemandHeadersRelay::new( + right_client.clone(), + left_client.clone(), + RightToLeftFinality::new(left_client.clone(), left_sign.clone()), + ); + + let left_to_right_messages = left_to_right_messages(MessagesRelayParams { + source_client: left_client.clone(), + source_sign: left_sign.clone(), + target_client: right_client.clone(), + target_sign: right_sign.clone(), + source_to_target_headers_relay: Some(left_to_right_on_demand_headers.clone()), + target_to_source_headers_relay: Some(right_to_left_on_demand_headers.clone()), + lane_id: lane, + metrics_params: metrics_params + .clone() + .disable() + .metrics_prefix(messages_relay::message_lane_loop::metrics_prefix::(&lane)), + }) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); + let right_to_left_messages = right_to_left_messages(MessagesRelayParams { + source_client: right_client, + source_sign: right_sign, + target_client: left_client.clone(), + target_sign: left_sign.clone(), + source_to_target_headers_relay: Some(right_to_left_on_demand_headers), + target_to_source_headers_relay: Some(left_to_right_on_demand_headers), + lane_id: lane, + metrics_params: metrics_params + .clone() + .disable() + .metrics_prefix(messages_relay::message_lane_loop::metrics_prefix::(&lane)), + }) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); + + relay_utils::relay_metrics(None, metrics_params) + .expose() + .await + .map_err(|e| anyhow::format_err!("{}", e))?; + + futures::future::select(left_to_right_messages, right_to_left_messages) + .await + .factor_first() + .0 + }) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs new file mode 100644 index 0000000000..94630886ca --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs @@ -0,0 +1,71 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::bridge::FullBridge; +use crate::cli::{ + HexLaneId, PrometheusParams, SourceConnectionParams, SourceSigningParams, TargetConnectionParams, + TargetSigningParams, +}; +use crate::messages_lane::MessagesRelayParams; +use crate::select_full_bridge; + +use structopt::StructOpt; + +/// Start messages relayer process. +#[derive(StructOpt)] +pub struct RelayMessages { + /// A bridge instance to relay messages for. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + source_sign: SourceSigningParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, + #[structopt(flatten)] + prometheus_params: PrometheusParams, +} + +impl RelayMessages { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_full_bridge!(self.bridge, { + let source_client = self.source.to_client::().await?; + let source_sign = self.source_sign.to_keypair::()?; + let target_client = self.target.to_client::().await?; + let target_sign = self.target_sign.to_keypair::()?; + + relay_messages(MessagesRelayParams { + source_client, + source_sign, + target_client, + target_sign, + source_to_target_headers_relay: None, + target_to_source_headers_relay: None, + lane_id: self.lane.into(), + metrics_params: self.prometheus_params.into(), + }) + .await + .map_err(|e| anyhow::format_err!("{}", e)) + }) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs b/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs new file mode 100644 index 0000000000..64448f0f1d --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs @@ -0,0 +1,317 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::bridge::FullBridge; +use crate::cli::encode_call::{self, CliEncodeCall}; +use crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee; +use crate::cli::{ + Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, Origins, SourceConnectionParams, SourceSigningParams, + TargetSigningParams, +}; +use codec::Encode; +use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; +use pallet_bridge_dispatch::{CallOrigin, MessagePayload}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use sp_core::{Bytes, Pair}; +use sp_runtime::{traits::IdentifyAccount, AccountId32, MultiSignature, MultiSigner}; +use std::fmt::Debug; +use structopt::StructOpt; + +/// Send bridge message. +#[derive(StructOpt)] +pub struct SendMessage { + /// A bridge instance to encode call for. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + source_sign: SourceSigningParams, + // TODO [#885] Move TargetSign to origins + #[structopt(flatten)] + target_sign: TargetSigningParams, + /// Hex-encoded lane id. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + /// Dispatch weight of the message. If not passed, determined automatically. + #[structopt(long)] + dispatch_weight: Option>, + /// Delivery and dispatch fee in source chain base currency units. If not passed, determined automatically. + #[structopt(long)] + fee: Option, + /// Message type. + #[structopt(subcommand)] + message: crate::cli::encode_call::Call, + /// The origin to use when dispatching the message on the target chain. Defaults to + /// `SourceAccount`. + #[structopt(long, possible_values = &Origins::variants(), default_value = "Source")] + origin: Origins, +} + +impl SendMessage { + pub fn encode_payload( + &mut self, + ) -> anyhow::Result>> { + crate::select_full_bridge!(self.bridge, { + let SendMessage { + source_sign, + target_sign, + ref mut message, + dispatch_weight, + origin, + bridge, + .. + } = self; + + let source_sign = source_sign.to_keypair::()?; + let target_sign = target_sign.to_keypair::()?; + + encode_call::preprocess_call::(message, bridge.bridge_instance_index()); + let target_call = Target::encode_call(&message)?; + + let payload = { + let target_call_weight = prepare_call_dispatch_weight( + dispatch_weight, + ExplicitOrMaximal::Explicit(target_call.get_dispatch_info().weight), + compute_maximal_message_dispatch_weight(Target::max_extrinsic_weight()), + ); + let source_sender_public: MultiSigner = source_sign.public().into(); + let source_account_id = source_sender_public.into_account(); + + message_payload( + Target::RUNTIME_VERSION.spec_version, + target_call_weight, + match origin { + Origins::Source => CallOrigin::SourceAccount(source_account_id), + Origins::Target => { + let digest = account_ownership_digest( + &target_call, + source_account_id.clone(), + Target::RUNTIME_VERSION.spec_version, + ); + let target_origin_public = target_sign.public(); + let digest_signature = target_sign.sign(&digest); + CallOrigin::TargetAccount( + source_account_id, + target_origin_public.into(), + digest_signature.into(), + ) + } + }, + &target_call, + ) + }; + Ok(payload) + }) + } + + /// Run the command. + pub async fn run(mut self) -> anyhow::Result<()> { + crate::select_full_bridge!(self.bridge, { + let payload = self.encode_payload()?; + + let source_client = self.source.to_client::().await?; + let source_sign = self.source_sign.to_keypair::()?; + + let lane = self.lane.clone().into(); + let fee = match self.fee { + Some(fee) => fee, + None => Balance( + estimate_message_delivery_and_dispatch_fee::< + ::NativeBalance, + _, + _, + >(&source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, payload.clone()) + .await? as _, + ), + }; + let dispatch_weight = payload.weight; + let send_message_call = Source::encode_call(&encode_call::Call::BridgeSendMessage { + bridge_instance_index: self.bridge.bridge_instance_index(), + lane: self.lane, + payload: HexBytes::encode(&payload), + fee, + })?; + + source_client + .submit_signed_extrinsic(source_sign.public().into(), |transaction_nonce| { + let signed_source_call = Source::sign_transaction( + *source_client.genesis_hash(), + &source_sign, + transaction_nonce, + send_message_call, + ) + .encode(); + + log::info!( + target: "bridge", + "Sending message to {}. Size: {}. Dispatch weight: {}. Fee: {}", + Target::NAME, + signed_source_call.len(), + dispatch_weight, + fee, + ); + log::info!( + target: "bridge", + "Signed {} Call: {:?}", + Source::NAME, + HexBytes::encode(&signed_source_call) + ); + + Bytes(signed_source_call) + }) + .await?; + }); + + Ok(()) + } +} + +fn prepare_call_dispatch_weight( + user_specified_dispatch_weight: &Option>, + weight_from_pre_dispatch_call: ExplicitOrMaximal, + maximal_allowed_weight: Weight, +) -> Weight { + match user_specified_dispatch_weight + .clone() + .unwrap_or(weight_from_pre_dispatch_call) + { + ExplicitOrMaximal::Explicit(weight) => weight, + ExplicitOrMaximal::Maximal => maximal_allowed_weight, + } +} + +pub(crate) fn message_payload( + spec_version: u32, + weight: Weight, + origin: CallOrigin, + call: &impl Encode, +) -> MessagePayload> +where + SAccountId: Encode + Debug, + TPublic: Encode + Debug, + TSignature: Encode + Debug, +{ + // Display nicely formatted call. + let payload = MessagePayload { + spec_version, + weight, + origin, + call: HexBytes::encode(call), + }; + + log::info!(target: "bridge", "Created Message Payload: {:#?}", payload); + log::info!(target: "bridge", "Encoded Message Payload: {:?}", HexBytes::encode(&payload)); + + // re-pack to return `Vec` + let MessagePayload { + spec_version, + weight, + origin, + call, + } = payload; + MessagePayload { + spec_version, + weight, + origin, + call: call.0, + } +} + +pub(crate) fn compute_maximal_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { + bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight) +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + #[test] + fn send_remark_rialto_to_millau() { + // given + let mut send_message = SendMessage::from_iter(vec![ + "send-message", + "RialtoToMillau", + "--source-port", + "1234", + "--source-signer", + "//Alice", + "--target-signer", + "//Bob", + "remark", + "--remark-payload", + "1234", + ]); + + // when + let payload = send_message.encode_payload().unwrap(); + + // then + assert_eq!( + payload, + MessagePayload { + spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, + weight: 1345000, + origin: CallOrigin::SourceAccount(sp_keyring::AccountKeyring::Alice.to_account_id()), + call: hex!("0401081234").to_vec(), + } + ); + } + + #[test] + fn send_remark_millau_to_rialto() { + // given + let mut send_message = SendMessage::from_iter(vec![ + "send-message", + "MillauToRialto", + "--source-port", + "1234", + "--source-signer", + "//Alice", + "--origin", + "Target", + "--target-signer", + "//Bob", + "remark", + "--remark-payload", + "1234", + ]); + + // when + let payload = send_message.encode_payload().unwrap(); + + // then + // Since signatures are randomized we extract it from here and only check the rest. + let signature = match payload.origin { + CallOrigin::TargetAccount(_, _, ref sig) => sig.clone(), + _ => panic!("Unexpected `CallOrigin`: {:?}", payload), + }; + assert_eq!( + payload, + MessagePayload { + spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, + weight: 1345000, + origin: CallOrigin::TargetAccount( + sp_keyring::AccountKeyring::Alice.to_account_id(), + sp_keyring::AccountKeyring::Bob.into(), + signature, + ), + call: hex!("0701081234").to_vec(), + } + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/finality_pipeline.rs b/polkadot/bridges/relays/bin-substrate/src/finality_pipeline.rs new file mode 100644 index 0000000000..bc8461f6a8 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/finality_pipeline.rs @@ -0,0 +1,149 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate-to-Substrate headers sync entrypoint. + +use crate::finality_target::SubstrateFinalityTarget; + +use bp_header_chain::justification::GrandpaJustification; +use finality_relay::{FinalitySyncParams, FinalitySyncPipeline}; +use relay_substrate_client::{finality_source::FinalitySource, BlockNumberOf, Chain, Client, HashOf, SyncHeader}; +use relay_utils::{metrics::MetricsParams, BlockNumberBase}; +use sp_core::Bytes; +use std::{fmt::Debug, marker::PhantomData, time::Duration}; + +/// Default synchronization loop timeout. +const STALL_TIMEOUT: Duration = Duration::from_secs(120); +/// Default limit of recent finality proofs. +/// +/// Finality delay of 4096 blocks is unlikely to happen in practice in +/// Substrate+GRANDPA based chains (good to know). +const RECENT_FINALITY_PROOFS_LIMIT: usize = 4096; + +/// Headers sync pipeline for Substrate <-> Substrate relays. +pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline { + /// Name of the runtime method that returns id of best finalized source header at target chain. + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str; + + /// Chain with GRANDPA bridge pallet. + type TargetChain: Chain; + + /// Customize metrics exposed by headers sync loop. + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + Ok(params) + } + + /// Returns id of account that we're using to sign transactions at target chain. + fn transactions_author(&self) -> ::AccountId; + + /// Make submit header transaction. + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: Self::Header, + proof: Self::FinalityProof, + ) -> Bytes; +} + +/// Substrate-to-Substrate finality proof pipeline. +#[derive(Clone)] +pub struct SubstrateFinalityToSubstrate { + /// Client for the target chain. + pub(crate) target_client: Client, + /// Data required to sign target chain transactions. + pub(crate) target_sign: TargetSign, + /// Unused generic arguments dump. + _marker: PhantomData, +} + +impl Debug + for SubstrateFinalityToSubstrate +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.debug_struct("SubstrateFinalityToSubstrate") + .field("target_client", &self.target_client) + .finish() + } +} + +impl SubstrateFinalityToSubstrate { + /// Create new Substrate-to-Substrate headers pipeline. + pub fn new(target_client: Client, target_sign: TargetSign) -> Self { + SubstrateFinalityToSubstrate { + target_client, + target_sign, + _marker: Default::default(), + } + } +} + +impl FinalitySyncPipeline + for SubstrateFinalityToSubstrate +where + SourceChain: Clone + Chain + Debug, + BlockNumberOf: BlockNumberBase, + TargetChain: Clone + Chain + Debug, + TargetSign: Clone + Send + Sync, +{ + const SOURCE_NAME: &'static str = SourceChain::NAME; + const TARGET_NAME: &'static str = TargetChain::NAME; + + type Hash = HashOf; + type Number = BlockNumberOf; + type Header = SyncHeader; + type FinalityProof = GrandpaJustification; +} + +/// Run Substrate-to-Substrate finality sync. +pub async fn run( + pipeline: P, + source_client: Client, + target_client: Client, + metrics_params: MetricsParams, +) -> anyhow::Result<()> +where + P: SubstrateFinalitySyncPipeline< + Hash = HashOf, + Number = BlockNumberOf, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + TargetChain = TargetChain, + >, + SourceChain: Clone + Chain, + BlockNumberOf: BlockNumberBase, + TargetChain: Clone + Chain, +{ + log::info!( + target: "bridge", + "Starting {} -> {} finality proof relay", + SourceChain::NAME, + TargetChain::NAME, + ); + + finality_relay::run( + FinalitySource::new(source_client), + SubstrateFinalityTarget::new(target_client, pipeline), + FinalitySyncParams { + tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL), + recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, + stall_timeout: STALL_TIMEOUT, + }, + metrics_params, + futures::future::pending(), + ) + .await + .map_err(|e| anyhow::format_err!("{}", e)) +} diff --git a/polkadot/bridges/relays/bin-substrate/src/finality_target.rs b/polkadot/bridges/relays/bin-substrate/src/finality_target.rs new file mode 100644 index 0000000000..ffa10cabac --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/finality_target.rs @@ -0,0 +1,91 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate client as Substrate finality proof target. The chain we connect to should have +//! runtime that implements `FinalityApi` to allow bridging with +//! chain. + +use crate::finality_pipeline::SubstrateFinalitySyncPipeline; + +use async_trait::async_trait; +use codec::Decode; +use finality_relay::TargetClient; +use relay_substrate_client::{Chain, Client, Error as SubstrateError}; +use relay_utils::relay_loop::Client as RelayClient; + +/// Substrate client as Substrate finality target. +pub struct SubstrateFinalityTarget { + client: Client, + pipeline: P, +} + +impl SubstrateFinalityTarget { + /// Create new Substrate headers target. + pub fn new(client: Client, pipeline: P) -> Self { + SubstrateFinalityTarget { client, pipeline } + } +} + +impl Clone for SubstrateFinalityTarget { + fn clone(&self) -> Self { + SubstrateFinalityTarget { + client: self.client.clone(), + pipeline: self.pipeline.clone(), + } + } +} + +#[async_trait] +impl RelayClient for SubstrateFinalityTarget { + type Error = SubstrateError; + + async fn reconnect(&mut self) -> Result<(), SubstrateError> { + self.client.reconnect().await + } +} + +#[async_trait] +impl TargetClient

for SubstrateFinalityTarget +where + C: Chain, + P::Number: Decode, + P::Hash: Decode, + P: SubstrateFinalitySyncPipeline, +{ + async fn best_finalized_source_block_number(&self) -> Result { + // we can't continue to relay finality if target node is out of sync, because + // it may have already received (some of) headers that we're going to relay + self.client.ensure_synced().await?; + + Ok(crate::messages_source::read_client_state::( + &self.client, + P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET, + ) + .await? + .best_finalized_peer_at_best_self + .0) + } + + async fn submit_finality_proof(&self, header: P::Header, proof: P::FinalityProof) -> Result<(), SubstrateError> { + self.client + .submit_signed_extrinsic(self.pipeline.transactions_author(), move |transaction_nonce| { + self.pipeline + .make_submit_finality_proof_transaction(transaction_nonce, header, proof) + }) + .await + .map(drop) + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/headers_initialize.rs b/polkadot/bridges/relays/bin-substrate/src/headers_initialize.rs new file mode 100644 index 0000000000..c2eab1bd35 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/headers_initialize.rs @@ -0,0 +1,256 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Initialize Substrate -> Substrate headers bridge. +//! +//! Initialization is a transaction that calls `initialize()` function of the +//! `pallet-bridge-grandpa` pallet. This transaction brings initial header +//! and authorities set from source to target chain. The headers sync starts +//! with this header. + +use bp_header_chain::InitializationData; +use bp_header_chain::{ + find_grandpa_authorities_scheduled_change, + justification::{verify_justification, GrandpaJustification}, +}; +use codec::Decode; +use finality_grandpa::voter_set::VoterSet; +use num_traits::{One, Zero}; +use relay_substrate_client::{Chain, Client}; +use sp_core::Bytes; +use sp_finality_grandpa::AuthorityList as GrandpaAuthoritiesSet; +use sp_runtime::traits::Header as HeaderT; + +/// Submit headers-bridge initialization transaction. +pub async fn initialize( + source_client: Client, + target_client: Client, + target_transactions_signer: TargetChain::AccountId, + prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData) -> Bytes, +) { + let result = do_initialize( + source_client, + target_client, + target_transactions_signer, + prepare_initialize_transaction, + ) + .await; + + match result { + Ok(tx_hash) => log::info!( + target: "bridge", + "Successfully submitted {}-headers bridge initialization transaction to {}: {:?}", + SourceChain::NAME, + TargetChain::NAME, + tx_hash, + ), + Err(err) => log::error!( + target: "bridge", + "Failed to submit {}-headers bridge initialization transaction to {}: {:?}", + SourceChain::NAME, + TargetChain::NAME, + err, + ), + } +} + +/// Craft and submit initialization transaction, returning any error that may occur. +async fn do_initialize( + source_client: Client, + target_client: Client, + target_transactions_signer: TargetChain::AccountId, + prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData) -> Bytes, +) -> Result { + let initialization_data = prepare_initialization_data(source_client).await?; + log::info!( + target: "bridge", + "Prepared initialization data for {}-headers bridge at {}: {:?}", + SourceChain::NAME, + TargetChain::NAME, + initialization_data, + ); + + let initialization_tx_hash = target_client + .submit_signed_extrinsic(target_transactions_signer, move |transaction_nonce| { + prepare_initialize_transaction(transaction_nonce, initialization_data) + }) + .await + .map_err(|err| format!("Failed to submit {} transaction: {:?}", TargetChain::NAME, err))?; + Ok(initialization_tx_hash) +} + +/// Prepare initialization data for the GRANDPA verifier pallet. +async fn prepare_initialization_data( + source_client: Client, +) -> Result, String> { + // In ideal world we just need to get best finalized header and then to read GRANDPA authorities + // set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at this header. + // + // But now there are problems with this approach - `CurrentSetId` may return invalid value. So here + // we're waiting for the next justification, read the authorities set and then try to figure out + // the set id with bruteforce. + let mut justifications = source_client + .subscribe_justifications() + .await + .map_err(|err| format!("Failed to subscribe to {} justifications: {:?}", SourceChain::NAME, err))?; + + // Read next justification - the header that it finalizes will be used as initial header. + let justification = justifications.next().await.ok_or_else(|| { + format!( + "Failed to read {} justification from the stream: stream has ended unexpectedly", + SourceChain::NAME, + ) + })?; + + // Read initial header. + let justification: GrandpaJustification = Decode::decode(&mut &justification.0[..]) + .map_err(|err| format!("Failed to decode {} justification: {:?}", SourceChain::NAME, err))?; + + let (initial_header_hash, initial_header_number) = + (justification.commit.target_hash, justification.commit.target_number); + + let initial_header = source_header(&source_client, initial_header_hash).await?; + log::trace!(target: "bridge", "Selected {} initial header: {}/{}", + SourceChain::NAME, + initial_header_number, + initial_header_hash, + ); + + // Read GRANDPA authorities set at initial header. + let initial_authorities_set = source_authorities_set(&source_client, initial_header_hash).await?; + log::trace!(target: "bridge", "Selected {} initial authorities set: {:?}", + SourceChain::NAME, + initial_authorities_set, + ); + + // If initial header changes the GRANDPA authorities set, then we need previous authorities + // to verify justification. + let mut authorities_for_verification = initial_authorities_set.clone(); + let scheduled_change = find_grandpa_authorities_scheduled_change(&initial_header); + assert!( + scheduled_change.as_ref().map(|c| c.delay.is_zero()).unwrap_or(true), + "GRANDPA authorities change at {} scheduled to happen in {:?} blocks. We expect\ + regular hange to have zero delay", + initial_header_hash, + scheduled_change.as_ref().map(|c| c.delay), + ); + let schedules_change = scheduled_change.is_some(); + if schedules_change { + authorities_for_verification = source_authorities_set(&source_client, *initial_header.parent_hash()).await?; + log::trace!( + target: "bridge", + "Selected {} header is scheduling GRANDPA authorities set changes. Using previous set: {:?}", + SourceChain::NAME, + authorities_for_verification, + ); + } + + // Now let's try to guess authorities set id by verifying justification. + let mut initial_authorities_set_id = 0; + let mut min_possible_block_number = SourceChain::BlockNumber::zero(); + let authorities_for_verification = VoterSet::new(authorities_for_verification.clone()).ok_or_else(|| { + format!( + "Read invalid {} authorities set: {:?}", + SourceChain::NAME, + authorities_for_verification, + ) + })?; + loop { + log::trace!( + target: "bridge", "Trying {} GRANDPA authorities set id: {}", + SourceChain::NAME, + initial_authorities_set_id, + ); + + let is_valid_set_id = verify_justification::( + (initial_header_hash, initial_header_number), + initial_authorities_set_id, + &authorities_for_verification, + &justification, + ) + .is_ok(); + + if is_valid_set_id { + break; + } + + initial_authorities_set_id += 1; + min_possible_block_number += One::one(); + if min_possible_block_number > initial_header_number { + // there can't be more authorities set changes than headers => if we have reached `initial_block_number` + // and still have not found correct value of `initial_authorities_set_id`, then something + // else is broken => fail + return Err(format!( + "Failed to guess initial {} GRANDPA authorities set id: checked all\ + possible ids in range [0; {}]", + SourceChain::NAME, + initial_header_number + )); + } + } + + Ok(InitializationData { + header: initial_header, + authority_list: initial_authorities_set, + set_id: if schedules_change { + initial_authorities_set_id + 1 + } else { + initial_authorities_set_id + }, + is_halted: false, + }) +} + +/// Read header by hash from the source client. +async fn source_header( + source_client: &Client, + header_hash: SourceChain::Hash, +) -> Result { + source_client.header_by_hash(header_hash).await.map_err(|err| { + format!( + "Failed to retrive {} header with hash {}: {:?}", + SourceChain::NAME, + header_hash, + err, + ) + }) +} + +/// Read GRANDPA authorities set at given header. +async fn source_authorities_set( + source_client: &Client, + header_hash: SourceChain::Hash, +) -> Result { + let raw_authorities_set = source_client + .grandpa_authorities_set(header_hash) + .await + .map_err(|err| { + format!( + "Failed to retrive {} GRANDPA authorities set at header {}: {:?}", + SourceChain::NAME, + header_hash, + err, + ) + })?; + GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]).map_err(|err| { + format!( + "Failed to decode {} GRANDPA authorities set at header {}: {:?}", + SourceChain::NAME, + header_hash, + err, + ) + }) +} diff --git a/polkadot/bridges/relays/bin-substrate/src/main.rs b/polkadot/bridges/relays/bin-substrate/src/main.rs new file mode 100644 index 0000000000..6bf7561fcd --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/main.rs @@ -0,0 +1,41 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate-to-substrate relay entrypoint. + +#![warn(missing_docs)] + +use relay_utils::initialize::initialize_logger; + +mod chains; +mod cli; +mod finality_pipeline; +mod finality_target; +mod headers_initialize; +mod messages_lane; +mod messages_source; +mod messages_target; +mod on_demand_headers; + +fn main() { + initialize_logger(false); + let command = cli::parse_args(); + let run = command.run(); + let result = async_std::task::block_on(run); + if let Err(error) = result { + log::error!(target: "bridge", "Failed to start relay: {}", error); + } +} diff --git a/polkadot/bridges/relays/substrate/src/messages_lane.rs b/polkadot/bridges/relays/bin-substrate/src/messages_lane.rs similarity index 77% rename from polkadot/bridges/relays/substrate/src/messages_lane.rs rename to polkadot/bridges/relays/bin-substrate/src/messages_lane.rs index 78b5f5c024..9948b6ec08 100644 --- a/polkadot/bridges/relays/substrate/src/messages_lane.rs +++ b/polkadot/bridges/relays/bin-substrate/src/messages_lane.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,18 +16,37 @@ use crate::messages_source::SubstrateMessagesProof; use crate::messages_target::SubstrateMessagesReceivingProof; +use crate::on_demand_headers::OnDemandHeadersRelay; -use async_trait::async_trait; -use bp_message_lane::MessageNonce; -use codec::Encode; +use bp_messages::{LaneId, MessageNonce}; use frame_support::weights::Weight; use messages_relay::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; -use relay_substrate_client::{BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf}; -use relay_utils::BlockNumberBase; +use relay_substrate_client::{BlockNumberOf, Chain, Client, HashOf}; +use relay_utils::{metrics::MetricsParams, BlockNumberBase}; +use sp_core::Bytes; use std::ops::RangeInclusive; +/// Substrate <-> Substrate messages relay parameters. +pub struct MessagesRelayParams { + /// Messages source client. + pub source_client: Client, + /// Sign parameters for messages source chain. + pub source_sign: SS, + /// Messages target client. + pub target_client: Client, + /// Sign parameters for messages target chain. + pub target_sign: TS, + /// Optional on-demand source to target headers relay. + pub source_to_target_headers_relay: Option>, + /// Optional on-demand target to source headers relay. + pub target_to_source_headers_relay: Option>, + /// Identifier of lane that needs to be served. + pub lane_id: LaneId, + /// Metrics parameters. + pub metrics_params: MetricsParams, +} + /// Message sync pipeline for Substrate <-> Substrate relays. -#[async_trait] pub trait SubstrateMessageLane: MessageLane { /// Name of the runtime method that returns dispatch weight of outbound messages at the source chain. const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str; @@ -48,25 +67,33 @@ pub trait SubstrateMessageLane: MessageLane { /// Name of the runtime method that returns id of best finalized target header at source chain. const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str; - /// Signed transaction type of the source chain. - type SourceSignedTransaction: Send + Sync + Encode; - /// Signed transaction type of the target chain. - type TargetSignedTransaction: Send + Sync + Encode; + /// Source chain. + type SourceChain: Chain; + /// Target chain. + type TargetChain: Chain; + + /// Returns id of account that we're using to sign transactions at target chain (messages proof). + fn target_transactions_author(&self) -> ::AccountId; /// Make messages delivery transaction. - async fn make_messages_delivery_transaction( + fn make_messages_delivery_transaction( &self, + transaction_nonce: ::Index, generated_at_header: SourceHeaderIdOf, nonces: RangeInclusive, proof: Self::MessagesProof, - ) -> Result; + ) -> Bytes; + + /// Returns id of account that we're using to sign transactions at source chain (delivery proof). + fn source_transactions_author(&self) -> ::AccountId; /// Make messages receiving proof transaction. - async fn make_messages_receiving_proof_transaction( + fn make_messages_receiving_proof_transaction( &self, + transaction_nonce: ::Index, generated_at_header: TargetHeaderIdOf, proof: Self::MessagesReceivingProof, - ) -> Result; + ) -> Bytes; } /// Substrate-to-Substrate message lane. @@ -121,7 +148,7 @@ where /// Returns maximal number of messages and their maximal cumulative dispatch weight, based /// on given chain parameters. -pub fn select_delivery_transaction_limits( +pub fn select_delivery_transaction_limits( max_extrinsic_weight: Weight, max_unconfirmed_messages_at_inbound_lane: MessageNonce, ) -> (MessageNonce, Weight) { @@ -161,11 +188,11 @@ pub fn select_delivery_transaction_limits mod tests { use super::*; - type RialtoToMillauMessageLaneWeights = pallet_message_lane::weights::RialtoWeight; + type RialtoToMillauMessagesWeights = pallet_bridge_messages::weights::RialtoWeight; #[test] fn select_delivery_transaction_limits_works() { - let (max_count, max_weight) = select_delivery_transaction_limits::( + let (max_count, max_weight) = select_delivery_transaction_limits::( bp_millau::max_extrinsic_weight(), bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, ); @@ -176,7 +203,7 @@ mod tests { // reserved for messages dispatch allows dispatch of non-trivial messages. // // Any significant change in this values should attract additional attention. - (955, 216_583_333_334), + (1020, 216_583_333_334), ); } } diff --git a/polkadot/bridges/relays/substrate/src/messages_source.rs b/polkadot/bridges/relays/bin-substrate/src/messages_source.rs similarity index 81% rename from polkadot/bridges/relays/substrate/src/messages_source.rs rename to polkadot/bridges/relays/bin-substrate/src/messages_source.rs index db894df8c7..cf98f3276b 100644 --- a/polkadot/bridges/relays/substrate/src/messages_source.rs +++ b/polkadot/bridges/relays/bin-substrate/src/messages_source.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -19,24 +19,26 @@ //! chain. use crate::messages_lane::SubstrateMessageLane; +use crate::on_demand_headers::OnDemandHeadersRelay; use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce}; +use bp_messages::{LaneId, MessageNonce}; use bp_runtime::InstanceId; use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::{Decode, Encode}; -use frame_support::weights::Weight; +use frame_support::{traits::Instance, weights::Weight}; use messages_relay::{ message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, message_lane_loop::{ ClientState, MessageProofParameters, MessageWeights, MessageWeightsMap, SourceClient, SourceClientState, }, }; +use pallet_bridge_messages::Config as MessagesConfig; use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf}; use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId}; use sp_core::Bytes; use sp_runtime::{traits::Header as HeaderT, DeserializeOwned}; -use std::ops::RangeInclusive; +use std::{marker::PhantomData, ops::RangeInclusive}; /// Intermediate message proof returned by the source Substrate node. Includes everything /// required to submit to the target node: cumulative dispatch weight of bundled messages and @@ -44,38 +46,56 @@ use std::ops::RangeInclusive; pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof>); /// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { +pub struct SubstrateMessagesSource { client: Client, lane: P, lane_id: LaneId, instance: InstanceId, + target_to_source_headers_relay: Option>, + _phantom: PhantomData<(R, I)>, } -impl SubstrateMessagesSource { +impl SubstrateMessagesSource { /// Create new Substrate headers source. - pub fn new(client: Client, lane: P, lane_id: LaneId, instance: InstanceId) -> Self { + pub fn new( + client: Client, + lane: P, + lane_id: LaneId, + instance: InstanceId, + target_to_source_headers_relay: Option>, + ) -> Self { SubstrateMessagesSource { client, lane, lane_id, instance, + target_to_source_headers_relay, + _phantom: Default::default(), } } } -impl Clone for SubstrateMessagesSource { +impl Clone for SubstrateMessagesSource { fn clone(&self) -> Self { Self { client: self.client.clone(), lane: self.lane.clone(), lane_id: self.lane_id, instance: self.instance, + target_to_source_headers_relay: self.target_to_source_headers_relay.clone(), + _phantom: Default::default(), } } } #[async_trait] -impl RelayClient for SubstrateMessagesSource { +impl RelayClient for SubstrateMessagesSource +where + C: Chain, + P: SubstrateMessageLane, + R: Send + Sync, + I: Send + Sync + Instance, +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -84,7 +104,7 @@ impl RelayClient for SubstrateMessagesSource< } #[async_trait] -impl SourceClient

for SubstrateMessagesSource +impl SourceClient

for SubstrateMessagesSource where C: Chain, C::Header: DeserializeOwned, @@ -94,9 +114,13 @@ where MessagesProof = SubstrateMessagesProof, SourceHeaderNumber = ::Number, SourceHeaderHash = ::Hash, + SourceChain = C, >, + P::TargetChain: Chain, P::TargetHeaderNumber: Decode, P::TargetHeaderHash: Decode, + R: Send + Sync + MessagesConfig, + I: Send + Sync + Instance, { async fn state(&self) -> Result, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because @@ -170,15 +194,22 @@ where nonces: RangeInclusive, proof_parameters: MessageProofParameters, ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), SubstrateError> { + let mut storage_keys = Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1); + let mut message_nonce = *nonces.start(); + while message_nonce <= *nonces.end() { + let message_key = pallet_bridge_messages::storage_keys::message_key::(&self.lane_id, message_nonce); + storage_keys.push(message_key); + message_nonce += 1; + } + if proof_parameters.outbound_state_proof_required { + storage_keys.push(pallet_bridge_messages::storage_keys::outbound_lane_data_key::( + &self.lane_id, + )); + } + let proof = self .client - .prove_messages( - self.instance, - self.lane_id, - nonces.clone(), - proof_parameters.outbound_state_proof_required, - id.1, - ) + .prove_storage(storage_keys, id.1) .await? .iter_nodes() .collect(); @@ -197,13 +228,20 @@ where generated_at_block: TargetHeaderIdOf

, proof: P::MessagesReceivingProof, ) -> Result<(), SubstrateError> { - let tx = self - .lane - .make_messages_receiving_proof_transaction(generated_at_block, proof) + self.client + .submit_signed_extrinsic(self.lane.source_transactions_author(), move |transaction_nonce| { + self.lane + .make_messages_receiving_proof_transaction(transaction_nonce, generated_at_block, proof) + }) .await?; - self.client.submit_extrinsic(Bytes(tx.encode())).await?; Ok(()) } + + async fn require_target_header_on_source(&self, id: TargetHeaderIdOf

) { + if let Some(ref target_to_source_headers_relay) = self.target_to_source_headers_relay { + target_to_source_headers_relay.require_finalized_header(id); + } + } } pub async fn read_client_state( diff --git a/polkadot/bridges/relays/substrate/src/messages_target.rs b/polkadot/bridges/relays/bin-substrate/src/messages_target.rs similarity index 71% rename from polkadot/bridges/relays/substrate/src/messages_target.rs rename to polkadot/bridges/relays/bin-substrate/src/messages_target.rs index e5ac8880c8..1760832730 100644 --- a/polkadot/bridges/relays/substrate/src/messages_target.rs +++ b/polkadot/bridges/relays/bin-substrate/src/messages_target.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -20,21 +20,24 @@ use crate::messages_lane::SubstrateMessageLane; use crate::messages_source::read_client_state; +use crate::on_demand_headers::OnDemandHeadersRelay; use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce, UnrewardedRelayersState}; +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; use bp_runtime::InstanceId; use bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof; use codec::{Decode, Encode}; +use frame_support::traits::Instance; use messages_relay::{ message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, message_lane_loop::{TargetClient, TargetClientState}, }; +use pallet_bridge_messages::Config as MessagesConfig; use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf}; use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase}; use sp_core::Bytes; use sp_runtime::{traits::Header as HeaderT, DeserializeOwned}; -use std::ops::RangeInclusive; +use std::{marker::PhantomData, ops::RangeInclusive}; /// Message receiving proof returned by the target Substrate node. pub type SubstrateMessagesReceivingProof = ( @@ -43,38 +46,56 @@ pub type SubstrateMessagesReceivingProof = ( ); /// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { +pub struct SubstrateMessagesTarget { client: Client, lane: P, lane_id: LaneId, instance: InstanceId, + source_to_target_headers_relay: Option>, + _phantom: PhantomData<(R, I)>, } -impl SubstrateMessagesTarget { +impl SubstrateMessagesTarget { /// Create new Substrate headers target. - pub fn new(client: Client, lane: P, lane_id: LaneId, instance: InstanceId) -> Self { + pub fn new( + client: Client, + lane: P, + lane_id: LaneId, + instance: InstanceId, + source_to_target_headers_relay: Option>, + ) -> Self { SubstrateMessagesTarget { client, lane, lane_id, instance, + source_to_target_headers_relay, + _phantom: Default::default(), } } } -impl Clone for SubstrateMessagesTarget { +impl Clone for SubstrateMessagesTarget { fn clone(&self) -> Self { Self { client: self.client.clone(), lane: self.lane.clone(), lane_id: self.lane_id, instance: self.instance, + source_to_target_headers_relay: self.source_to_target_headers_relay.clone(), + _phantom: Default::default(), } } } #[async_trait] -impl RelayClient for SubstrateMessagesTarget { +impl RelayClient for SubstrateMessagesTarget +where + C: Chain, + P: SubstrateMessageLane, + R: Send + Sync, + I: Send + Sync + Instance, +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -83,19 +104,23 @@ impl RelayClient for SubstrateMessagesTarget< } #[async_trait] -impl TargetClient

for SubstrateMessagesTarget +impl TargetClient

for SubstrateMessagesTarget where C: Chain, C::Header: DeserializeOwned, C::Index: DeserializeOwned, ::Number: BlockNumberBase, P: SubstrateMessageLane< + TargetChain = C, MessagesReceivingProof = SubstrateMessagesReceivingProof, TargetHeaderNumber = ::Number, TargetHeaderHash = ::Hash, >, + P::SourceChain: Chain, P::SourceHeaderNumber: Decode, P::SourceHeaderHash: Decode, + R: Send + Sync + MessagesConfig, + I: Send + Sync + Instance, { async fn state(&self) -> Result, SubstrateError> { // we can't continue to deliver messages if target node is out of sync, because @@ -165,10 +190,13 @@ where id: TargetHeaderIdOf

, ) -> Result<(TargetHeaderIdOf

, P::MessagesReceivingProof), SubstrateError> { let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; + let inbound_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::(&self.lane_id); let proof = self .client - .prove_messages_delivery(self.instance, self.lane_id, id.1) - .await?; + .prove_storage(vec![inbound_data_key], id.1) + .await? + .iter_nodes() + .collect(); let proof = FromBridgedChainMessagesDeliveryProof { bridged_header_hash: id.1, storage_proof: proof, @@ -183,11 +211,22 @@ where nonces: RangeInclusive, proof: P::MessagesProof, ) -> Result, SubstrateError> { - let tx = self - .lane - .make_messages_delivery_transaction(generated_at_header, nonces.clone(), proof) + self.client + .submit_signed_extrinsic(self.lane.target_transactions_author(), |transaction_nonce| { + self.lane.make_messages_delivery_transaction( + transaction_nonce, + generated_at_header, + nonces.clone(), + proof, + ) + }) .await?; - self.client.submit_extrinsic(Bytes(tx.encode())).await?; Ok(nonces) } + + async fn require_source_header_on_target(&self, id: SourceHeaderIdOf

) { + if let Some(ref source_to_target_headers_relay) = self.source_to_target_headers_relay { + source_to_target_headers_relay.require_finalized_header(id); + } + } } diff --git a/polkadot/bridges/relays/bin-substrate/src/on_demand_headers.rs b/polkadot/bridges/relays/bin-substrate/src/on_demand_headers.rs new file mode 100644 index 0000000000..4c86b6a170 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/on_demand_headers.rs @@ -0,0 +1,255 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! On-demand Substrate -> Substrate headers relay. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; +use crate::finality_target::SubstrateFinalityTarget; + +use bp_header_chain::justification::GrandpaJustification; +use finality_relay::TargetClient as FinalityTargetClient; +use futures::{ + channel::{mpsc, oneshot}, + select, FutureExt, StreamExt, +}; +use num_traits::Zero; +use relay_substrate_client::{BlockNumberOf, Chain, Client, HashOf, HeaderIdOf, SyncHeader}; +use relay_utils::{metrics::MetricsParams, BlockNumberBase, HeaderId}; +use std::fmt::Debug; + +/// On-demand Substrate <-> Substrate headers relay. +/// +/// This relay may be started by messages whenever some other relay (e.g. messages relay) needs more +/// headers to be relayed to continue its regular work. When enough headers are relayed, on-demand +/// relay may be deactivated. +#[derive(Clone)] +pub struct OnDemandHeadersRelay { + /// Background task name. + background_task_name: String, + /// Required headers to background sender. + required_header_tx: mpsc::Sender>, +} + +impl OnDemandHeadersRelay { + /// Create new on-demand headers relay. + pub fn new( + source_client: Client, + target_client: Client, + pipeline: SubstrateFinalityToSubstrate, + ) -> Self + where + SourceChain: Chain + Debug, + SourceChain::BlockNumber: BlockNumberBase, + TargetChain: Chain + Debug, + TargetChain::BlockNumber: BlockNumberBase, + TargetSign: Clone + Send + Sync + 'static, + SubstrateFinalityToSubstrate: SubstrateFinalitySyncPipeline< + Hash = HashOf, + Number = BlockNumberOf, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + TargetChain = TargetChain, + >, + SubstrateFinalityTarget>: + FinalityTargetClient>, + { + let (required_header_tx, required_header_rx) = mpsc::channel(1); + async_std::task::spawn(async move { + background_task(source_client, target_client, pipeline, required_header_rx).await; + }); + + let background_task_name = format!( + "{}-background", + on_demand_headers_relay_name::() + ); + OnDemandHeadersRelay { + background_task_name, + required_header_tx, + } + } + + /// Someone is asking us to relay given finalized header. + pub fn require_finalized_header(&self, header_id: HeaderIdOf) { + if let Err(error) = self.required_header_tx.clone().try_send(header_id) { + log::error!( + target: "bridge", + "Failed to send require header id {:?} to {:?}: {:?}", + header_id, + self.background_task_name, + error, + ); + } + } +} + +/// Background task that is responsible for starting and stopping headers relay when required. +async fn background_task( + source_client: Client, + target_client: Client, + pipeline: SubstrateFinalityToSubstrate, + mut required_header_rx: mpsc::Receiver>, +) where + SourceChain: Chain + Debug, + SourceChain::BlockNumber: BlockNumberBase, + TargetChain: Chain + Debug, + TargetChain::BlockNumber: BlockNumberBase, + TargetSign: Clone + Send + Sync + 'static, + SubstrateFinalityToSubstrate: SubstrateFinalitySyncPipeline< + Hash = HashOf, + Number = BlockNumberOf, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + TargetChain = TargetChain, + >, + SubstrateFinalityTarget>: + FinalityTargetClient>, +{ + let relay_task_name = on_demand_headers_relay_name::(); + let finality_target = SubstrateFinalityTarget::new(target_client.clone(), pipeline.clone()); + + let mut active_headers_relay = None; + let mut required_header_number = Zero::zero(); + let mut relay_exited_rx = futures::future::pending().left_future(); + + loop { + // wait for next target block or for new required header + select! { + _ = async_std::task::sleep(TargetChain::AVERAGE_BLOCK_INTERVAL).fuse() => {}, + required_header_id = required_header_rx.next() => { + match required_header_id { + Some(required_header_id) => { + if required_header_id.0 > required_header_number { + required_header_number = required_header_id.0; + } + }, + None => { + // that's the only way to exit background task - to drop `required_header_tx` + break + }, + } + }, + _ = relay_exited_rx => { + // there could be a situation when we're receiving exit signals after we + // have already stopped relay or when we have already started new relay. + // but it isn't critical, because even if we'll accidentally stop new relay + // we'll restart it almost immediately + stop_on_demand_headers_relay(active_headers_relay.take()).await; + }, + } + + // read best finalized source block from target + let available_header_number = match finality_target.best_finalized_source_block_number().await { + Ok(available_header_number) => available_header_number, + Err(error) => { + log::error!( + target: "bridge", + "Failed to read best finalized {} header from {} in {} relay: {:?}", + SourceChain::NAME, + TargetChain::NAME, + relay_task_name, + error, + ); + + // we don't know what's happening with target client, so better to stop on-demand relay than + // submit unneeded transactions + // => assume that required header is known to the target node + required_header_number + } + }; + + // start or stop headers relay if required + let activate = required_header_number > available_header_number; + match (activate, active_headers_relay.is_some()) { + (true, false) => { + let (relay_exited_tx, new_relay_exited_rx) = oneshot::channel(); + active_headers_relay = start_on_demand_headers_relay( + relay_task_name.clone(), + relay_exited_tx, + source_client.clone(), + target_client.clone(), + pipeline.clone(), + ); + if active_headers_relay.is_some() { + relay_exited_rx = new_relay_exited_rx.right_future(); + } + } + (false, true) => { + stop_on_demand_headers_relay(active_headers_relay.take()).await; + } + _ => (), + } + } +} + +/// On-demand headers relay task name. +fn on_demand_headers_relay_name() -> String { + format!("on-demand-{}-to-{}", SourceChain::NAME, TargetChain::NAME) +} + +/// Start on-demand headers relay task. +fn start_on_demand_headers_relay( + task_name: String, + relay_exited_tx: oneshot::Sender<()>, + source_client: Client, + target_client: Client, + pipeline: SubstrateFinalityToSubstrate, +) -> Option> +where + SourceChain::BlockNumber: BlockNumberBase, + SubstrateFinalityToSubstrate: SubstrateFinalitySyncPipeline< + Hash = HashOf, + Number = BlockNumberOf, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + TargetChain = TargetChain, + >, + TargetSign: 'static, +{ + let headers_relay_future = + crate::finality_pipeline::run(pipeline, source_client, target_client, MetricsParams::disabled()); + let closure_task_name = task_name.clone(); + async_std::task::Builder::new() + .name(task_name.clone()) + .spawn(async move { + log::info!(target: "bridge", "Starting {} headers relay", closure_task_name); + let result = headers_relay_future.await; + log::trace!(target: "bridge", "{} headers relay has exited. Result: {:?}", closure_task_name, result); + let _ = relay_exited_tx.send(()); + }) + .map_err(|error| { + log::error!( + target: "bridge", + "Failed to start {} relay: {:?}", + task_name, + error, + ); + }) + .ok() +} + +/// Stop on-demand headers relay task. +async fn stop_on_demand_headers_relay(task: Option>) { + if let Some(task) = task { + let task_name = task + .task() + .name() + .expect("on-demand tasks are always started with name; qed") + .to_string(); + log::trace!(target: "bridge", "Cancelling {} headers relay", task_name); + task.cancel().await; + log::info!(target: "bridge", "Cancelled {} headers relay", task_name); + } +} diff --git a/polkadot/bridges/relays/ethereum-client/Cargo.toml b/polkadot/bridges/relays/client-ethereum/Cargo.toml similarity index 67% rename from polkadot/bridges/relays/ethereum-client/Cargo.toml rename to polkadot/bridges/relays/client-ethereum/Cargo.toml index b0f6485ffd..ebae252ed5 100644 --- a/polkadot/bridges/relays/ethereum-client/Cargo.toml +++ b/polkadot/bridges/relays/client-ethereum/Cargo.toml @@ -8,10 +8,11 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] bp-eth-poa = { path = "../../primitives/ethereum-poa" } codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } +headers-relay = { path = "../headers" } hex-literal = "0.3" -jsonrpsee = { git = "https://github.com/svyatonik/jsonrpsee.git", branch = "shared-client-in-rpc-api", default-features = false, features = ["http"] } +jsonrpsee-proc-macros = "=0.2.0-alpha.5" +jsonrpsee-ws-client = "=0.2.0-alpha.5" libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } log = "0.4.11" relay-utils = { path = "../utils" } -web3 = { version = "0.15", git = "https://github.com/tomusdrw/rust-web3.git", branch ="td-ethabi", default-features = false } +web3 = { version = "0.15", git = "https://github.com/tomusdrw/rust-web3", branch ="td-ethabi", default-features = false } diff --git a/polkadot/bridges/relays/ethereum-client/src/client.rs b/polkadot/bridges/relays/client-ethereum/src/client.rs similarity index 74% rename from polkadot/bridges/relays/ethereum-client/src/client.rs rename to polkadot/bridges/relays/client-ethereum/src/client.rs index 30a62a400e..e2def5fb03 100644 --- a/polkadot/bridges/relays/ethereum-client/src/client.rs +++ b/polkadot/bridges/relays/client-ethereum/src/client.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -21,9 +21,8 @@ use crate::types::{ }; use crate::{ConnectionParams, Error, Result}; -use jsonrpsee::raw::RawClient; -use jsonrpsee::transport::http::HttpTransportClient; -use jsonrpsee::Client as RpcClient; +use jsonrpsee_ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; +use std::sync::Arc; /// Number of headers missing from the Ethereum node for us to consider node not synced. const MAJOR_SYNC_BLOCKS: u64 = 5; @@ -32,36 +31,36 @@ const MAJOR_SYNC_BLOCKS: u64 = 5; #[derive(Clone)] pub struct Client { params: ConnectionParams, - client: RpcClient, + client: Arc, } impl Client { /// Create a new Ethereum RPC Client. - pub fn new(params: ConnectionParams) -> Self { - Self { - client: Self::build_client(¶ms), + pub async fn new(params: ConnectionParams) -> Result { + Ok(Self { + client: Self::build_client(¶ms).await?, params, - } + }) } /// Build client to use in connection. - fn build_client(params: &ConnectionParams) -> RpcClient { - let uri = format!("http://{}:{}", params.host, params.port); - let transport = HttpTransportClient::new(&uri); - let raw_client = RawClient::new(transport); - raw_client.into() + async fn build_client(params: &ConnectionParams) -> Result> { + let uri = format!("ws://{}:{}", params.host, params.port); + let client = RpcClientBuilder::default().build(&uri).await?; + Ok(Arc::new(client)) } /// Reopen client connection. - pub fn reconnect(&mut self) { - self.client = Self::build_client(&self.params); + pub async fn reconnect(&mut self) -> Result<()> { + self.client = Self::build_client(&self.params).await?; + Ok(()) } } impl Client { /// Returns true if client is connected to at least one peer and is in synced state. pub async fn ensure_synced(&self) -> Result<()> { - match Ethereum::syncing(&self.client).await? { + match Ethereum::syncing(&*self.client).await? { SyncState::NotSyncing => Ok(()), SyncState::Syncing(syncing) => { let missing_headers = syncing.highest_block.saturating_sub(syncing.current_block); @@ -76,18 +75,18 @@ impl Client { /// Estimate gas usage for the given call. pub async fn estimate_gas(&self, call_request: CallRequest) -> Result { - Ok(Ethereum::estimate_gas(&self.client, call_request).await?) + Ok(Ethereum::estimate_gas(&*self.client, call_request).await?) } /// Retrieve number of the best known block from the Ethereum node. pub async fn best_block_number(&self) -> Result { - Ok(Ethereum::block_number(&self.client).await?.as_u64()) + Ok(Ethereum::block_number(&*self.client).await?.as_u64()) } /// Retrieve number of the best known block from the Ethereum node. pub async fn header_by_number(&self, block_number: u64) -> Result

{ let get_full_tx_objects = false; - let header = Ethereum::get_block_by_number(&self.client, block_number, get_full_tx_objects).await?; + let header = Ethereum::get_block_by_number(&*self.client, block_number, get_full_tx_objects).await?; match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { true => Ok(header), false => Err(Error::IncompleteHeader), @@ -97,7 +96,7 @@ impl Client { /// Retrieve block header by its hash from Ethereum node. pub async fn header_by_hash(&self, hash: H256) -> Result
{ let get_full_tx_objects = false; - let header = Ethereum::get_block_by_hash(&self.client, hash, get_full_tx_objects).await?; + let header = Ethereum::get_block_by_hash(&*self.client, hash, get_full_tx_objects).await?; match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { true => Ok(header), false => Err(Error::IncompleteHeader), @@ -107,7 +106,8 @@ impl Client { /// Retrieve block header and its transactions by its number from Ethereum node. pub async fn header_by_number_with_transactions(&self, number: u64) -> Result { let get_full_tx_objects = true; - let header = Ethereum::get_block_by_number_with_transactions(&self.client, number, get_full_tx_objects).await?; + let header = + Ethereum::get_block_by_number_with_transactions(&*self.client, number, get_full_tx_objects).await?; let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); if !is_complete_header { @@ -125,7 +125,7 @@ impl Client { /// Retrieve block header and its transactions by its hash from Ethereum node. pub async fn header_by_hash_with_transactions(&self, hash: H256) -> Result { let get_full_tx_objects = true; - let header = Ethereum::get_block_by_hash_with_transactions(&self.client, hash, get_full_tx_objects).await?; + let header = Ethereum::get_block_by_hash_with_transactions(&*self.client, hash, get_full_tx_objects).await?; let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); if !is_complete_header { @@ -142,17 +142,17 @@ impl Client { /// Retrieve transaction by its hash from Ethereum node. pub async fn transaction_by_hash(&self, hash: H256) -> Result> { - Ok(Ethereum::transaction_by_hash(&self.client, hash).await?) + Ok(Ethereum::transaction_by_hash(&*self.client, hash).await?) } /// Retrieve transaction receipt by transaction hash. pub async fn transaction_receipt(&self, transaction_hash: H256) -> Result { - Ok(Ethereum::get_transaction_receipt(&self.client, transaction_hash).await?) + Ok(Ethereum::get_transaction_receipt(&*self.client, transaction_hash).await?) } /// Get the nonce of the given account. pub async fn account_nonce(&self, address: Address) -> Result { - Ok(Ethereum::get_transaction_count(&self.client, address).await?) + Ok(Ethereum::get_transaction_count(&*self.client, address).await?) } /// Submit an Ethereum transaction. @@ -160,13 +160,13 @@ impl Client { /// The transaction must already be signed before sending it through this method. pub async fn submit_transaction(&self, signed_raw_tx: SignedRawTx) -> Result { let transaction = Bytes(signed_raw_tx); - let tx_hash = Ethereum::submit_transaction(&self.client, transaction).await?; + let tx_hash = Ethereum::submit_transaction(&*self.client, transaction).await?; log::trace!(target: "bridge", "Sent transaction to Ethereum node: {:?}", tx_hash); Ok(tx_hash) } /// Call Ethereum smart contract. pub async fn eth_call(&self, call_transaction: CallRequest) -> Result { - Ok(Ethereum::call(&self.client, call_transaction).await?) + Ok(Ethereum::call(&*self.client, call_transaction).await?) } } diff --git a/polkadot/bridges/relays/ethereum-client/src/error.rs b/polkadot/bridges/relays/client-ethereum/src/error.rs similarity index 83% rename from polkadot/bridges/relays/ethereum-client/src/error.rs rename to polkadot/bridges/relays/client-ethereum/src/error.rs index 0f47891138..bcd8edc3f3 100644 --- a/polkadot/bridges/relays/ethereum-client/src/error.rs +++ b/polkadot/bridges/relays/client-ethereum/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -18,7 +18,7 @@ use crate::types::U256; -use jsonrpsee::client::RequestError; +use jsonrpsee_ws_client::Error as RpcError; use relay_utils::MaybeConnectionError; /// Result type used by Ethereum client. @@ -30,7 +30,7 @@ pub type Result = std::result::Result; pub enum Error { /// An error that can occur when making an HTTP request to /// an JSON-RPC client. - Request(RequestError), + RpcError(RpcError), /// Failed to parse response. ResponseParseFailed(String), /// We have received a header with missing fields. @@ -47,9 +47,9 @@ pub enum Error { ClientNotSynced(U256), } -impl From for Error { - fn from(error: RequestError) -> Self { - Error::Request(error) +impl From for Error { + fn from(error: RpcError) -> Self { + Error::RpcError(error) } } @@ -57,7 +57,11 @@ impl MaybeConnectionError for Error { fn is_connection_error(&self) -> bool { matches!( *self, - Error::Request(RequestError::TransportError(_)) | Error::ClientNotSynced(_), + Error::RpcError(RpcError::TransportError(_)) + // right now if connection to the ws server is dropped (after it is already established), + // we're getting this error + | Error::RpcError(RpcError::Internal(_)) + | Error::ClientNotSynced(_), ) } } @@ -65,7 +69,7 @@ impl MaybeConnectionError for Error { impl ToString for Error { fn to_string(&self) -> String { match self { - Self::Request(e) => e.to_string(), + Self::RpcError(e) => e.to_string(), Self::ResponseParseFailed(e) => e.to_string(), Self::IncompleteHeader => { "Incomplete Ethereum Header Received (missing some of required fields - hash, number, logs_bloom)" diff --git a/polkadot/bridges/relays/ethereum-client/src/lib.rs b/polkadot/bridges/relays/client-ethereum/src/lib.rs similarity index 87% rename from polkadot/bridges/relays/ethereum-client/src/lib.rs rename to polkadot/bridges/relays/client-ethereum/src/lib.rs index 8c5a00e01b..8b3c6d8f8e 100644 --- a/polkadot/bridges/relays/ethereum-client/src/lib.rs +++ b/polkadot/bridges/relays/client-ethereum/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -29,12 +29,12 @@ pub use crate::sign::{sign_and_submit_transaction, SigningParams}; pub mod types; -/// Ethereum connection params. +/// Ethereum-over-websocket connection params. #[derive(Debug, Clone)] pub struct ConnectionParams { - /// Ethereum RPC host. + /// Websocket server hostname. pub host: String, - /// Ethereum RPC port. + /// Websocket server TCP port. pub port: u16, } @@ -42,7 +42,7 @@ impl Default for ConnectionParams { fn default() -> Self { ConnectionParams { host: "localhost".into(), - port: 8545, + port: 8546, } } } diff --git a/polkadot/bridges/relays/ethereum-client/src/rpc.rs b/polkadot/bridges/relays/client-ethereum/src/rpc.rs similarity index 92% rename from polkadot/bridges/relays/ethereum-client/src/rpc.rs rename to polkadot/bridges/relays/client-ethereum/src/rpc.rs index 3fa4f6ceb9..0fb81f7655 100644 --- a/polkadot/bridges/relays/ethereum-client/src/rpc.rs +++ b/polkadot/bridges/relays/client-ethereum/src/rpc.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,17 +16,12 @@ //! Ethereum node RPC interface. -// The compiler doesn't think we're using the -// code from rpc_api! -#![allow(dead_code)] -#![allow(unused_variables)] - use crate::types::{ Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction, TransactionHash, H256, U256, U64, }; -jsonrpsee::rpc_api! { +jsonrpsee_proc_macros::rpc_client_api! { pub(crate) Ethereum { #[rpc(method = "eth_syncing", positional_params)] fn syncing() -> SyncState; diff --git a/polkadot/bridges/relays/ethereum-client/src/sign.rs b/polkadot/bridges/relays/client-ethereum/src/sign.rs similarity index 98% rename from polkadot/bridges/relays/ethereum-client/src/sign.rs rename to polkadot/bridges/relays/client-ethereum/src/sign.rs index 462cb5dbd7..6f479ab7d5 100644 --- a/polkadot/bridges/relays/ethereum-client/src/sign.rs +++ b/polkadot/bridges/relays/client-ethereum/src/sign.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/ethereum-client/src/types.rs b/polkadot/bridges/relays/client-ethereum/src/types.rs similarity index 97% rename from polkadot/bridges/relays/ethereum-client/src/types.rs rename to polkadot/bridges/relays/client-ethereum/src/types.rs index 1bb9233b82..f589474aff 100644 --- a/polkadot/bridges/relays/ethereum-client/src/types.rs +++ b/polkadot/bridges/relays/client-ethereum/src/types.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/kusama-client/Cargo.toml b/polkadot/bridges/relays/client-kusama/Cargo.toml similarity index 85% rename from polkadot/bridges/relays/kusama-client/Cargo.toml rename to polkadot/bridges/relays/client-kusama/Cargo.toml index 04958cf2b3..b9c397bca6 100644 --- a/polkadot/bridges/relays/kusama-client/Cargo.toml +++ b/polkadot/bridges/relays/client-kusama/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } -relay-substrate-client = { path = "../substrate-client" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } # Bridge dependencies -bp-kusama = { path = "../../primitives/kusama" } +bp-kusama = { path = "../../primitives/chain-kusama" } # Substrate Dependencies diff --git a/polkadot/bridges/relays/kusama-client/src/lib.rs b/polkadot/bridges/relays/client-kusama/src/lib.rs similarity index 96% rename from polkadot/bridges/relays/kusama-client/src/lib.rs rename to polkadot/bridges/relays/client-kusama/src/lib.rs index 9f9507f5ca..3c3b1cd4c5 100644 --- a/polkadot/bridges/relays/kusama-client/src/lib.rs +++ b/polkadot/bridges/relays/client-kusama/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/millau-client/Cargo.toml b/polkadot/bridges/relays/client-millau/Cargo.toml similarity index 89% rename from polkadot/bridges/relays/millau-client/Cargo.toml rename to polkadot/bridges/relays/client-millau/Cargo.toml index 5f9cbd170c..e16f06f852 100644 --- a/polkadot/bridges/relays/millau-client/Cargo.toml +++ b/polkadot/bridges/relays/client-millau/Cargo.toml @@ -7,8 +7,8 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } -relay-substrate-client = { path = "../substrate-client" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } # Supported Chains diff --git a/polkadot/bridges/relays/millau-client/src/lib.rs b/polkadot/bridges/relays/client-millau/src/lib.rs similarity index 82% rename from polkadot/bridges/relays/millau-client/src/lib.rs rename to polkadot/bridges/relays/client-millau/src/lib.rs index c7d0405687..1708a8efa1 100644 --- a/polkadot/bridges/relays/millau-client/src/lib.rs +++ b/polkadot/bridges/relays/client-millau/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -17,13 +17,11 @@ //! Types used to connect to the Millau-Substrate chain. use codec::Encode; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, Client, TransactionSignScheme}; +use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; -pub use millau_runtime::BridgeRialtoCall; - /// Millau header id. pub type HeaderId = relay_utils::HeaderId; @@ -65,7 +63,7 @@ impl TransactionSignScheme for Millau { type SignedTransaction = millau_runtime::UncheckedExtrinsic; fn sign_transaction( - client: &Client, + genesis_hash: ::Hash, signer: &Self::AccountKeyPair, signer_nonce: ::Index, call: ::Call, @@ -84,8 +82,8 @@ impl TransactionSignScheme for Millau { ( millau_runtime::VERSION.spec_version, millau_runtime::VERSION.transaction_version, - *client.genesis_hash(), - *client.genesis_hash(), + genesis_hash, + genesis_hash, (), (), (), @@ -100,26 +98,7 @@ impl TransactionSignScheme for Millau { } /// Millau signing params. -#[derive(Clone)] -pub struct SigningParams { - /// Substrate transactions signer. - pub signer: sp_core::sr25519::Pair, -} - -impl SigningParams { - /// Create signing params from SURI and password. - pub fn from_suri(suri: &str, password: Option<&str>) -> Result { - Ok(SigningParams { - signer: sp_core::sr25519::Pair::from_string(suri, password)?, - }) - } -} - -impl std::fmt::Debug for SigningParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.signer.public()) - } -} +pub type SigningParams = sp_core::sr25519::Pair; /// Millau header type used in headers sync. pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/bridges/relays/polkadot-client/Cargo.toml b/polkadot/bridges/relays/client-polkadot/Cargo.toml similarity index 84% rename from polkadot/bridges/relays/polkadot-client/Cargo.toml rename to polkadot/bridges/relays/client-polkadot/Cargo.toml index 8764b6509b..b148745f5a 100644 --- a/polkadot/bridges/relays/polkadot-client/Cargo.toml +++ b/polkadot/bridges/relays/client-polkadot/Cargo.toml @@ -7,13 +7,13 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } -relay-substrate-client = { path = "../substrate-client" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } # Bridge dependencies -bp-polkadot = { path = "../../primitives/polkadot" } +bp-polkadot = { path = "../../primitives/chain-polkadot" } # Substrate Dependencies diff --git a/polkadot/bridges/relays/polkadot-client/src/lib.rs b/polkadot/bridges/relays/client-polkadot/src/lib.rs similarity index 96% rename from polkadot/bridges/relays/polkadot-client/src/lib.rs rename to polkadot/bridges/relays/client-polkadot/src/lib.rs index 7f85de1363..2c117c6d3d 100644 --- a/polkadot/bridges/relays/polkadot-client/src/lib.rs +++ b/polkadot/bridges/relays/client-polkadot/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/rialto-client/Cargo.toml b/polkadot/bridges/relays/client-rialto/Cargo.toml similarity index 89% rename from polkadot/bridges/relays/rialto-client/Cargo.toml rename to polkadot/bridges/relays/client-rialto/Cargo.toml index 6142ba05c9..88e8e12add 100644 --- a/polkadot/bridges/relays/rialto-client/Cargo.toml +++ b/polkadot/bridges/relays/client-rialto/Cargo.toml @@ -7,8 +7,8 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } -relay-substrate-client = { path = "../substrate-client" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } # Bridge dependencies diff --git a/polkadot/bridges/relays/rialto-client/src/lib.rs b/polkadot/bridges/relays/client-rialto/src/lib.rs similarity index 79% rename from polkadot/bridges/relays/rialto-client/src/lib.rs rename to polkadot/bridges/relays/client-rialto/src/lib.rs index 9e38831d56..0ddc03681d 100644 --- a/polkadot/bridges/relays/rialto-client/src/lib.rs +++ b/polkadot/bridges/relays/client-rialto/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -17,13 +17,11 @@ //! Types used to connect to the Rialto-Substrate chain. use codec::Encode; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, Client, TransactionSignScheme}; +use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; -pub use rialto_runtime::BridgeMillauCall; - /// Rialto header id. pub type HeaderId = relay_utils::HeaderId; @@ -65,7 +63,7 @@ impl TransactionSignScheme for Rialto { type SignedTransaction = rialto_runtime::UncheckedExtrinsic; fn sign_transaction( - client: &Client, + genesis_hash: ::Hash, signer: &Self::AccountKeyPair, signer_nonce: ::Index, call: ::Call, @@ -84,8 +82,8 @@ impl TransactionSignScheme for Rialto { ( rialto_runtime::VERSION.spec_version, rialto_runtime::VERSION.transaction_version, - *client.genesis_hash(), - *client.genesis_hash(), + genesis_hash, + genesis_hash, (), (), (), @@ -100,34 +98,7 @@ impl TransactionSignScheme for Rialto { } /// Rialto signing params. -#[derive(Clone)] -pub struct SigningParams { - /// Substrate transactions signer. - pub signer: sp_core::sr25519::Pair, -} - -impl SigningParams { - /// Create signing params from SURI and password. - pub fn from_suri(suri: &str, password: Option<&str>) -> Result { - Ok(SigningParams { - signer: sp_core::sr25519::Pair::from_string(suri, password)?, - }) - } -} - -impl std::fmt::Debug for SigningParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.signer.public()) - } -} - -impl Default for SigningParams { - fn default() -> Self { - SigningParams { - signer: sp_keyring::AccountKeyring::Alice.pair(), - } - } -} +pub type SigningParams = sp_core::sr25519::Pair; /// Rialto header type used in headers sync. pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/bridges/relays/client-rococo/Cargo.toml b/polkadot/bridges/relays/client-rococo/Cargo.toml new file mode 100644 index 0000000000..095f365374 --- /dev/null +++ b/polkadot/bridges/relays/client-rococo/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "relay-rococo-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } + +# Bridge dependencies +bp-rococo = { path = "../../primitives/chain-rococo" } + +# Substrate Dependencies +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-rococo/src/lib.rs b/polkadot/bridges/relays/client-rococo/src/lib.rs new file mode 100644 index 0000000000..09d205f06e --- /dev/null +++ b/polkadot/bridges/relays/client-rococo/src/lib.rs @@ -0,0 +1,97 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types used to connect to the Rococo-Substrate chain. + +use codec::Encode; +use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use sp_core::{storage::StorageKey, Pair}; +use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; +use std::time::Duration; + +/// Rococo header id. +pub type HeaderId = relay_utils::HeaderId; + +/// Rococo header type used in headers sync. +pub type SyncHeader = relay_substrate_client::SyncHeader; + +/// Rococo chain definition +#[derive(Debug, Clone, Copy)] +pub struct Rococo; + +impl ChainBase for Rococo { + type BlockNumber = bp_rococo::BlockNumber; + type Hash = bp_rococo::Hash; + type Hasher = bp_rococo::Hashing; + type Header = bp_rococo::Header; +} + +impl Chain for Rococo { + const NAME: &'static str = "Rococo"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + + type AccountId = bp_rococo::AccountId; + type Index = bp_rococo::Index; + type SignedBlock = bp_rococo::SignedBlock; + type Call = bp_rococo::Call; +} + +impl ChainWithBalances for Rococo { + type NativeBalance = bp_rococo::Balance; + + fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { + StorageKey(bp_rococo::account_info_storage_key(account_id)) + } +} + +impl TransactionSignScheme for Rococo { + type Chain = Rococo; + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = bp_rococo::UncheckedExtrinsic; + + fn sign_transaction( + genesis_hash: ::Hash, + signer: &Self::AccountKeyPair, + signer_nonce: ::Index, + call: ::Call, + ) -> Self::SignedTransaction { + let raw_payload = SignedPayload::new( + call, + bp_rococo::SignedExtensions::new( + bp_rococo::VERSION, + sp_runtime::generic::Era::Immortal, + genesis_hash, + signer_nonce, + 0, + ), + ) + .expect("SignedExtension never fails."); + + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); + let signer: sp_runtime::MultiSigner = signer.public().into(); + let (call, extra, _) = raw_payload.deconstruct(); + + bp_rococo::UncheckedExtrinsic::new_signed( + call, + sp_runtime::MultiAddress::Id(signer.into_account()), + signature.into(), + extra, + ) + } +} + +/// Rococo signing params. +pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/bridges/relays/substrate-client/Cargo.toml b/polkadot/bridges/relays/client-substrate/Cargo.toml similarity index 73% rename from polkadot/bridges/relays/substrate-client/Cargo.toml rename to polkadot/bridges/relays/client-substrate/Cargo.toml index 016a7d7d33..7b3f46230f 100644 --- a/polkadot/bridges/relays/substrate-client/Cargo.toml +++ b/polkadot/bridges/relays/client-substrate/Cargo.toml @@ -9,16 +9,19 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" async-std = "1.6.5" async-trait = "0.1.40" codec = { package = "parity-scale-codec", version = "2.0.0" } -jsonrpsee = { git = "https://github.com/svyatonik/jsonrpsee.git", branch = "shared-client-in-rpc-api", default-features = false, features = ["ws"] } +jsonrpsee-proc-macros = "=0.2.0-alpha.5" +jsonrpsee-ws-client = "=0.2.0-alpha.5" log = "0.4.11" num-traits = "0.2" rand = "0.7" # Bridge dependencies -bp-message-lane = { path = "../../primitives/message-lane" } +bp-header-chain = { path = "../../primitives/header-chain" } +bp-messages = { path = "../../primitives/messages" } bp-runtime = { path = "../../primitives/runtime" } -headers-relay = { path = "../headers-relay" } +finality-relay = { path = "../finality" } +headers-relay = { path = "../headers" } relay-utils = { path = "../utils" } # Substrate Dependencies @@ -28,7 +31,9 @@ frame-system = { git = "https://github.com/paritytech/substrate", branch = "mast pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/substrate-client/src/chain.rs b/polkadot/bridges/relays/client-substrate/src/chain.rs similarity index 77% rename from polkadot/bridges/relays/substrate-client/src/chain.rs rename to polkadot/bridges/relays/client-substrate/src/chain.rs index f309c3f775..64c0d6af52 100644 --- a/polkadot/bridges/relays/substrate-client/src/chain.rs +++ b/polkadot/bridges/relays/client-substrate/src/chain.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,22 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::client::Client; - use bp_runtime::Chain as ChainBase; use frame_support::Parameter; -use jsonrpsee::common::{DeserializeOwned, Serialize}; +use jsonrpsee_ws_client::{DeserializeOwned, Serialize}; use num_traits::{CheckedSub, Zero}; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{ generic::SignedBlock, - traits::{AtLeast32Bit, Dispatchable, MaybeDisplay, MaybeSerialize, MaybeSerializeDeserialize, Member}, - Justification, + traits::{ + AtLeast32Bit, Block as BlockT, Dispatchable, MaybeDisplay, MaybeSerialize, MaybeSerializeDeserialize, Member, + }, + EncodedJustification, }; use std::{fmt::Debug, time::Duration}; /// Substrate-based chain from minimal relay-client point of view. -pub trait Chain: ChainBase { +pub trait Chain: ChainBase + Clone { /// Chain name. const NAME: &'static str; /// Average block interval. @@ -51,7 +51,7 @@ pub trait Chain: ChainBase { + AtLeast32Bit + Copy; /// Block type. - type SignedBlock: Member + Serialize + DeserializeOwned + BlockWithJustification; + type SignedBlock: Member + Serialize + DeserializeOwned + BlockWithJustification; /// The aggregated `Call` type. type Call: Dispatchable + Debug; } @@ -67,9 +67,11 @@ pub trait ChainWithBalances: Chain { } /// Block with justification. -pub trait BlockWithJustification { +pub trait BlockWithJustification
{ + /// Return block header. + fn header(&self) -> Header; /// Return block justification, if known. - fn justification(&self) -> Option<&Justification>; + fn justification(&self) -> Option<&EncodedJustification>; } /// Substrate-based chain transactions signing scheme. @@ -83,21 +85,21 @@ pub trait TransactionSignScheme { /// Create transaction for given runtime call, signed by given account. fn sign_transaction( - client: &Client, + genesis_hash: ::Hash, signer: &Self::AccountKeyPair, signer_nonce: ::Index, call: ::Call, ) -> Self::SignedTransaction; } -impl BlockWithJustification for () { - fn justification(&self) -> Option<&Justification> { - None +impl BlockWithJustification for SignedBlock { + fn header(&self) -> Block::Header { + self.block.header().clone() } -} -impl BlockWithJustification for SignedBlock { - fn justification(&self) -> Option<&Justification> { - self.justification.as_ref() + fn justification(&self) -> Option<&EncodedJustification> { + self.justifications + .as_ref() + .and_then(|j| j.get(sp_finality_grandpa::GRANDPA_ENGINE_ID)) } } diff --git a/polkadot/bridges/relays/substrate-client/src/client.rs b/polkadot/bridges/relays/client-substrate/src/client.rs similarity index 57% rename from polkadot/bridges/relays/substrate-client/src/client.rs rename to polkadot/bridges/relays/client-substrate/src/client.rs index 767002d686..892a63d6d5 100644 --- a/polkadot/bridges/relays/substrate-client/src/client.rs +++ b/polkadot/bridges/relays/client-substrate/src/client.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -17,25 +17,22 @@ //! Substrate node client. use crate::chain::{Chain, ChainWithBalances}; -use crate::rpc::{Substrate, SubstrateMessageLane}; +use crate::rpc::Substrate; use crate::{ConnectionParams, Error, Result}; -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::InstanceId; +use async_std::sync::{Arc, Mutex}; use codec::Decode; use frame_system::AccountInfo; -use jsonrpsee::common::DeserializeOwned; -use jsonrpsee::raw::RawClient; -use jsonrpsee::transport::ws::WsTransportClient; -use jsonrpsee::{client::Subscription, Client as RpcClient}; +use jsonrpsee_ws_client::{traits::SubscriptionClient, v2::params::JsonRpcParams, DeserializeOwned}; +use jsonrpsee_ws_client::{Subscription, WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; use num_traits::Zero; use pallet_balances::AccountData; -use sp_core::Bytes; +use sp_core::{storage::StorageKey, Bytes}; use sp_trie::StorageProof; use sp_version::RuntimeVersion; -use std::ops::RangeInclusive; const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; +const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; /// Opaque justifications subscription type. pub type JustificationsSubscription = Subscription; @@ -50,9 +47,13 @@ pub struct Client { /// Client connection params. params: ConnectionParams, /// Substrate RPC client. - client: RpcClient, + client: Arc, /// Genesis block hash. genesis_hash: C::Hash, + /// If several tasks are submitting their transactions simultaneously using `submit_signed_extrinsic` + /// method, they may get the same transaction nonce. So one of transactions will be rejected + /// from the pool. This lock is here to prevent situations like that. + submit_signed_extrinsic_lock: Arc>, } impl Clone for Client { @@ -61,6 +62,7 @@ impl Clone for Client { params: self.params.clone(), client: self.client.clone(), genesis_hash: self.genesis_hash, + submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), } } } @@ -79,12 +81,13 @@ impl Client { let client = Self::build_client(params.clone()).await?; let number: C::BlockNumber = Zero::zero(); - let genesis_hash = Substrate::::chain_get_block_hash(&client, number).await?; + let genesis_hash = Substrate::::chain_get_block_hash(&*client, number).await?; Ok(Self { params, client, genesis_hash, + submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), }) } @@ -95,18 +98,26 @@ impl Client { } /// Build client to use in connection. - async fn build_client(params: ConnectionParams) -> Result { - let uri = format!("ws://{}:{}", params.host, params.port); - let transport = WsTransportClient::new(&uri).await?; - let raw_client = RawClient::new(transport); - Ok(raw_client.into()) + async fn build_client(params: ConnectionParams) -> Result> { + let uri = format!( + "{}://{}:{}", + if params.secure { "wss" } else { "ws" }, + params.host, + params.port, + ); + let client = RpcClientBuilder::default() + .max_notifs_per_subscription(MAX_SUBSCRIPTION_CAPACITY) + .build(&uri) + .await?; + + Ok(Arc::new(client)) } } impl Client { /// Returns true if client is connected to at least one peer and is in synced state. pub async fn ensure_synced(&self) -> Result<()> { - let health = Substrate::::system_health(&self.client).await?; + let health = Substrate::::system_health(&*self.client).await?; let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); if is_synced { Ok(()) @@ -122,7 +133,7 @@ impl Client { /// Return hash of the best finalized block. pub async fn best_finalized_header_hash(&self) -> Result { - Ok(Substrate::::chain_get_finalized_head(&self.client).await?) + Ok(Substrate::::chain_get_finalized_head(&*self.client).await?) } /// Returns the best Substrate header. @@ -130,12 +141,12 @@ impl Client { where C::Header: DeserializeOwned, { - Ok(Substrate::::chain_get_header(&self.client, None).await?) + Ok(Substrate::::chain_get_header(&*self.client, None).await?) } /// Get a Substrate block from its hash. pub async fn get_block(&self, block_hash: Option) -> Result { - Ok(Substrate::::chain_get_block(&self.client, block_hash).await?) + Ok(Substrate::::chain_get_block(&*self.client, block_hash).await?) } /// Get a Substrate header by its hash. @@ -143,12 +154,12 @@ impl Client { where C::Header: DeserializeOwned, { - Ok(Substrate::::chain_get_header(&self.client, block_hash).await?) + Ok(Substrate::::chain_get_header(&*self.client, block_hash).await?) } /// Get a Substrate block hash by its number. pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { - Ok(Substrate::::chain_get_block_hash(&self.client, number).await?) + Ok(Substrate::::chain_get_block_hash(&*self.client, number).await?) } /// Get a Substrate header by its number. @@ -162,7 +173,15 @@ impl Client { /// Return runtime version. pub async fn runtime_version(&self) -> Result { - Ok(Substrate::::runtime_version(&self.client).await?) + Ok(Substrate::::state_runtime_version(&*self.client).await?) + } + + /// Read value from runtime storage. + pub async fn storage_value(&self, storage_key: StorageKey) -> Result> { + Substrate::::state_get_storage(&*self.client, storage_key) + .await? + .map(|encoded_value| T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed)) + .transpose() } /// Return native tokens balance of the account. @@ -171,7 +190,7 @@ impl Client { C: ChainWithBalances, { let storage_key = C::account_info_storage_key(&account); - let encoded_account_data = Substrate::::get_storage(&self.client, storage_key) + let encoded_account_data = Substrate::::state_get_storage(&*self.client, storage_key) .await? .ok_or(Error::AccountDoesNotExist)?; let decoded_account_data = @@ -184,24 +203,44 @@ impl Client { /// /// Note: It's the caller's responsibility to make sure `account` is a valid ss58 address. pub async fn next_account_index(&self, account: C::AccountId) -> Result { - Ok(Substrate::::system_account_next_index(&self.client, account).await?) + Ok(Substrate::::system_account_next_index(&*self.client, account).await?) } - /// Submit an extrinsic for inclusion in a block. + /// Submit unsigned extrinsic for inclusion in a block. /// - /// Note: The given transaction does not need be SCALE encoded beforehand. - pub async fn submit_extrinsic(&self, transaction: Bytes) -> Result { - let tx_hash = Substrate::::author_submit_extrinsic(&self.client, transaction).await?; + /// Note: The given transaction needs to be SCALE encoded beforehand. + pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result { + let tx_hash = Substrate::::author_submit_extrinsic(&*self.client, transaction).await?; log::trace!(target: "bridge", "Sent transaction to Substrate node: {:?}", tx_hash); Ok(tx_hash) } + /// Submit an extrinsic signed by given account. + /// + /// All calls of this method are synchronized, so there can't be more than one active + /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen + /// if all client instances are clones of the same initial `Client`. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + pub async fn submit_signed_extrinsic( + &self, + extrinsic_signer: C::AccountId, + prepare_extrinsic: impl FnOnce(C::Index) -> Bytes, + ) -> Result { + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(extrinsic_signer).await?; + let extrinsic = prepare_extrinsic(transaction_nonce); + let tx_hash = Substrate::::author_submit_extrinsic(&*self.client, extrinsic).await?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(tx_hash) + } + /// Get the GRANDPA authority set at given block. pub async fn grandpa_authorities_set(&self, block: C::Hash) -> Result { let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); let data = Bytes(Vec::new()); - let encoded_response = Substrate::::state_call(&self.client, call, data, Some(block)).await?; + let encoded_response = Substrate::::state_call(&*self.client, call, data, Some(block)).await?; let authority_list = encoded_response.0; Ok(authority_list) @@ -209,59 +248,26 @@ impl Client { /// Execute runtime call at given block. pub async fn state_call(&self, method: String, data: Bytes, at_block: Option) -> Result { - Substrate::::state_call(&self.client, method, data, at_block) + Substrate::::state_call(&*self.client, method, data, at_block) .await .map_err(Into::into) } - /// Returns proof-of-message(s) in given inclusive range. - pub async fn prove_messages( - &self, - instance: InstanceId, - lane: LaneId, - range: RangeInclusive, - include_outbound_lane_state: bool, - at_block: C::Hash, - ) -> Result { - let encoded_trie_nodes = SubstrateMessageLane::::prove_messages( - &self.client, - instance, - lane, - *range.start(), - *range.end(), - include_outbound_lane_state, - Some(at_block), - ) - .await - .map_err(Error::Request)?; - let decoded_trie_nodes: Vec> = - Decode::decode(&mut &encoded_trie_nodes[..]).map_err(Error::ResponseParseFailed)?; - Ok(StorageProof::new(decoded_trie_nodes)) - } - - /// Returns proof-of-message(s) delivery. - pub async fn prove_messages_delivery( - &self, - instance: InstanceId, - lane: LaneId, - at_block: C::Hash, - ) -> Result>> { - let encoded_trie_nodes = - SubstrateMessageLane::::prove_messages_delivery(&self.client, instance, lane, Some(at_block)) - .await - .map_err(Error::Request)?; - let decoded_trie_nodes: Vec> = - Decode::decode(&mut &encoded_trie_nodes[..]).map_err(Error::ResponseParseFailed)?; - Ok(decoded_trie_nodes) + /// Returns storage proof of given storage keys. + pub async fn prove_storage(&self, keys: Vec, at_block: C::Hash) -> Result { + Substrate::::state_prove_storage(&*self.client, keys, Some(at_block)) + .await + .map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect())) + .map_err(Into::into) } /// Return new justifications stream. - pub async fn subscribe_justifications(self) -> Result { + pub async fn subscribe_justifications(&self) -> Result { Ok(self .client .subscribe( "grandpa_subscribeJustifications", - jsonrpsee::common::Params::None, + JsonRpcParams::NoParams, "grandpa_unsubscribeJustifications", ) .await?) diff --git a/polkadot/bridges/relays/substrate-client/src/error.rs b/polkadot/bridges/relays/client-substrate/src/error.rs similarity index 58% rename from polkadot/bridges/relays/substrate-client/src/error.rs rename to polkadot/bridges/relays/client-substrate/src/error.rs index 67aefe9885..304229ede1 100644 --- a/polkadot/bridges/relays/substrate-client/src/error.rs +++ b/polkadot/bridges/relays/client-substrate/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,8 +16,7 @@ //! Substrate node RPC errors. -use jsonrpsee::client::RequestError; -use jsonrpsee::transport::ws::WsNewDnsError; +use jsonrpsee_ws_client::Error as RpcError; use relay_utils::MaybeConnectionError; use sc_rpc_api::system::Health; @@ -28,32 +27,43 @@ pub type Result = std::result::Result; /// a Substrate node through RPC. #[derive(Debug)] pub enum Error { - /// Web socket connection error. - WsConnectionError(WsNewDnsError), /// An error that can occur when making a request to /// an JSON-RPC server. - Request(RequestError), + RpcError(RpcError), /// The response from the server could not be SCALE decoded. ResponseParseFailed(codec::Error), /// The Substrate bridge pallet has not yet been initialized. UninitializedBridgePallet, /// Account does not exist on the chain. AccountDoesNotExist, + /// Runtime storage is missing mandatory ":code:" entry. + MissingMandatoryCodeEntry, /// The client we're connected to is not synced, so we can't rely on its state. ClientNotSynced(Health), + /// An error has happened when we have tried to parse storage proof. + StorageProofError(bp_runtime::StorageProofError), /// Custom logic error. Custom(String), } -impl From for Error { - fn from(error: WsNewDnsError) -> Self { - Error::WsConnectionError(error) +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::RpcError(ref e) => Some(e), + Self::ResponseParseFailed(ref e) => Some(e), + Self::UninitializedBridgePallet => None, + Self::AccountDoesNotExist => None, + Self::MissingMandatoryCodeEntry => None, + Self::ClientNotSynced(_) => None, + Self::StorageProofError(_) => None, + Self::Custom(_) => None, + } } } -impl From for Error { - fn from(error: RequestError) -> Self { - Error::Request(error) +impl From for Error { + fn from(error: RpcError) -> Self { + Error::RpcError(error) } } @@ -61,27 +71,35 @@ impl MaybeConnectionError for Error { fn is_connection_error(&self) -> bool { matches!( *self, - Error::Request(RequestError::TransportError(_)) | Error::ClientNotSynced(_) + Error::RpcError(RpcError::TransportError(_)) + // right now if connection to the ws server is dropped (after it is already established), + // we're getting this error + | Error::RpcError(RpcError::Internal(_)) + | Error::RpcError(RpcError::RestartNeeded(_)) + | Error::ClientNotSynced(_), ) } } +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s = match self { + Self::RpcError(e) => e.to_string(), + Self::ResponseParseFailed(e) => e.to_string(), + Self::UninitializedBridgePallet => "The Substrate bridge pallet has not been initialized yet.".into(), + Self::AccountDoesNotExist => "Account does not exist on the chain".into(), + Self::MissingMandatoryCodeEntry => "Mandatory :code: entry is missing from runtime storage".into(), + Self::StorageProofError(e) => format!("Error when parsing storage proof: {:?}", e), + Self::ClientNotSynced(health) => format!("Substrate client is not synced: {}", health), + Self::Custom(e) => e.clone(), + }; + + write!(f, "{}", s) + } +} + impl From for String { fn from(error: Error) -> String { error.to_string() } } - -impl ToString for Error { - fn to_string(&self) -> String { - match self { - Self::WsConnectionError(e) => e.to_string(), - Self::Request(e) => e.to_string(), - Self::ResponseParseFailed(e) => e.to_string(), - Self::UninitializedBridgePallet => "The Substrate bridge pallet has not been initialized yet.".into(), - Self::AccountDoesNotExist => "Account does not exist on the chain".into(), - Self::ClientNotSynced(health) => format!("Substrate client is not synced: {}", health), - Self::Custom(e) => e.clone(), - } - } -} diff --git a/polkadot/bridges/relays/client-substrate/src/finality_source.rs b/polkadot/bridges/relays/client-substrate/src/finality_source.rs new file mode 100644 index 0000000000..3850093419 --- /dev/null +++ b/polkadot/bridges/relays/client-substrate/src/finality_source.rs @@ -0,0 +1,135 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Default generic implementation of finality source for basic Substrate client. + +use crate::chain::{BlockWithJustification, Chain}; +use crate::client::Client; +use crate::error::Error; +use crate::sync_header::SyncHeader; + +use async_trait::async_trait; +use bp_header_chain::justification::GrandpaJustification; +use codec::Decode; +use finality_relay::{FinalitySyncPipeline, SourceClient, SourceHeader}; +use futures::stream::{unfold, Stream, StreamExt}; +use relay_utils::relay_loop::Client as RelayClient; +use sp_runtime::traits::Header as HeaderT; +use std::{marker::PhantomData, pin::Pin}; + +/// Substrate node as finality source. +pub struct FinalitySource { + client: Client, + _phantom: PhantomData

, +} + +impl FinalitySource { + /// Create new headers source using given client. + pub fn new(client: Client) -> Self { + FinalitySource { + client, + _phantom: Default::default(), + } + } +} + +impl Clone for FinalitySource { + fn clone(&self) -> Self { + FinalitySource { + client: self.client.clone(), + _phantom: Default::default(), + } + } +} + +#[async_trait] +impl RelayClient for FinalitySource { + type Error = Error; + + async fn reconnect(&mut self) -> Result<(), Error> { + self.client.reconnect().await + } +} + +#[async_trait] +impl SourceClient

for FinalitySource +where + C: Chain, + C::BlockNumber: relay_utils::BlockNumberBase, + P: FinalitySyncPipeline< + Hash = C::Hash, + Number = C::BlockNumber, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + >, + P::Header: SourceHeader, +{ + type FinalityProofsStream = Pin> + Send>>; + + async fn best_finalized_block_number(&self) -> Result { + // we **CAN** continue to relay finality proofs if source node is out of sync, because + // target node may be missing proofs that are already available at the source + let finalized_header_hash = self.client.best_finalized_header_hash().await?; + let finalized_header = self.client.header_by_hash(finalized_header_hash).await?; + Ok(*finalized_header.number()) + } + + async fn header_and_finality_proof( + &self, + number: P::Number, + ) -> Result<(P::Header, Option), Error> { + let header_hash = self.client.block_hash_by_number(number).await?; + let signed_block = self.client.get_block(Some(header_hash)).await?; + + let justification = signed_block + .justification() + .map(|raw_justification| GrandpaJustification::::decode(&mut raw_justification.as_slice())) + .transpose() + .map_err(Error::ResponseParseFailed)?; + + Ok((signed_block.header().into(), justification)) + } + + async fn finality_proofs(&self) -> Result { + Ok(unfold( + self.client.clone().subscribe_justifications().await?, + move |mut subscription| async move { + loop { + let next_justification = subscription.next().await?; + let decoded_justification = + GrandpaJustification::::decode(&mut &next_justification.0[..]); + + let justification = match decoded_justification { + Ok(j) => j, + Err(err) => { + log::error!( + target: "bridge", + "Failed to decode justification target from the {} justifications stream: {:?}", + P::SOURCE_NAME, + err, + ); + + continue; + } + }; + + return Some((justification, subscription)); + } + }, + ) + .boxed()) + } +} diff --git a/polkadot/bridges/relays/substrate-client/src/guard.rs b/polkadot/bridges/relays/client-substrate/src/guard.rs similarity index 97% rename from polkadot/bridges/relays/substrate-client/src/guard.rs rename to polkadot/bridges/relays/client-substrate/src/guard.rs index d439ec8907..68fef1c4c9 100644 --- a/polkadot/bridges/relays/substrate-client/src/guard.rs +++ b/polkadot/bridges/relays/client-substrate/src/guard.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Module provides a set of guard functions that are running in background threads +//! Pallet provides a set of guard functions that are running in background threads //! and are aborting process if some condition fails. use crate::{Chain, ChainWithBalances, Client}; @@ -172,6 +172,7 @@ mod tests { SinkExt, }; + #[derive(Debug, Clone)] struct TestChain; impl bp_runtime::Chain for TestChain { @@ -187,7 +188,8 @@ mod tests { type AccountId = u32; type Index = u32; - type SignedBlock = (); + type SignedBlock = + sp_runtime::generic::SignedBlock>; type Call = (); } diff --git a/polkadot/bridges/relays/substrate-client/src/headers_source.rs b/polkadot/bridges/relays/client-substrate/src/headers_source.rs similarity index 93% rename from polkadot/bridges/relays/substrate-client/src/headers_source.rs rename to polkadot/bridges/relays/client-substrate/src/headers_source.rs index b347a1c9f5..3dfcb220de 100644 --- a/polkadot/bridges/relays/substrate-client/src/headers_source.rs +++ b/polkadot/bridges/relays/client-substrate/src/headers_source.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -26,7 +26,7 @@ use headers_relay::{ sync_types::{HeaderIdOf, HeadersSyncPipeline, QueuedHeader, SourceHeader}, }; use relay_utils::relay_loop::Client as RelayClient; -use sp_runtime::{traits::Header as HeaderT, Justification}; +use sp_runtime::{traits::Header as HeaderT, EncodedJustification}; use std::marker::PhantomData; /// Substrate node as headers source. @@ -69,7 +69,7 @@ where C: Chain, C::BlockNumber: relay_utils::BlockNumberBase, C::Header: Into, - P: HeadersSyncPipeline, + P: HeadersSyncPipeline, P::Header: SourceHeader, { async fn best_block_number(&self) -> Result { diff --git a/polkadot/bridges/relays/substrate-client/src/lib.rs b/polkadot/bridges/relays/client-substrate/src/lib.rs similarity index 90% rename from polkadot/bridges/relays/substrate-client/src/lib.rs rename to polkadot/bridges/relays/client-substrate/src/lib.rs index c6d077b21c..0f1bfb481e 100644 --- a/polkadot/bridges/relays/substrate-client/src/lib.rs +++ b/polkadot/bridges/relays/client-substrate/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -24,8 +24,10 @@ mod error; mod rpc; mod sync_header; +pub mod finality_source; pub mod guard; pub mod headers_source; +pub mod metrics; pub use crate::chain::{BlockWithJustification, Chain, ChainWithBalances, TransactionSignScheme}; pub use crate::client::{Client, JustificationsSubscription, OpaqueGrandpaAuthoritiesSet}; @@ -43,6 +45,8 @@ pub struct ConnectionParams { pub host: String, /// Websocket server TCP port. pub port: u16, + /// Use secure websocket connection. + pub secure: bool, } impl Default for ConnectionParams { @@ -50,6 +54,7 @@ impl Default for ConnectionParams { ConnectionParams { host: "localhost".into(), port: 9944, + secure: false, } } } diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs b/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs new file mode 100644 index 0000000000..f3ba8988ee --- /dev/null +++ b/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::chain::Chain; +use crate::client::Client; + +use async_trait::async_trait; +use codec::Decode; +use relay_utils::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, F64}; +use sp_core::storage::StorageKey; +use sp_runtime::{traits::UniqueSaturatedInto, FixedPointNumber}; +use std::time::Duration; + +/// Storage value update interval (in blocks). +const UPDATE_INTERVAL_IN_BLOCKS: u32 = 5; + +/// Metric that represents fixed-point runtime storage value as float gauge. +#[derive(Clone, Debug)] +pub struct FloatStorageValueMetric { + client: Client, + storage_key: StorageKey, + maybe_default_value: Option, + metric: Gauge, +} + +impl FloatStorageValueMetric { + /// Create new metric. + pub fn new( + registry: &Registry, + prefix: Option<&str>, + client: Client, + storage_key: StorageKey, + maybe_default_value: Option, + name: String, + help: String, + ) -> Result { + Ok(FloatStorageValueMetric { + client, + storage_key, + maybe_default_value, + metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + }) + } +} + +#[async_trait] +impl StandaloneMetrics for FloatStorageValueMetric +where + T: 'static + Decode + Send + Sync + FixedPointNumber, +{ + fn update_interval(&self) -> Duration { + C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS + } + + async fn update(&self) { + relay_utils::metrics::set_gauge_value( + &self.metric, + self.client + .storage_value::(self.storage_key.clone()) + .await + .map(|maybe_storage_value| { + maybe_storage_value.or(self.maybe_default_value).map(|storage_value| { + storage_value.into_inner().unique_saturated_into() as f64 + / T::DIV.unique_saturated_into() as f64 + }) + }), + ); + } +} diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/mod.rs b/polkadot/bridges/relays/client-substrate/src/metrics/mod.rs new file mode 100644 index 0000000000..177e2a709c --- /dev/null +++ b/polkadot/bridges/relays/client-substrate/src/metrics/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Contains several Substrate-specific metrics that may be exposed by relay. + +pub use float_storage_value::FloatStorageValueMetric; +pub use storage_proof_overhead::StorageProofOverheadMetric; + +mod float_storage_value; +mod storage_proof_overhead; diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs b/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs new file mode 100644 index 0000000000..526fe1e048 --- /dev/null +++ b/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs @@ -0,0 +1,104 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::chain::Chain; +use crate::client::Client; +use crate::error::Error; + +use async_trait::async_trait; +use relay_utils::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, U64}; +use sp_core::storage::StorageKey; +use sp_runtime::traits::Header as HeaderT; +use sp_storage::well_known_keys::CODE; +use std::time::Duration; + +/// Storage proof overhead update interval (in blocks). +const UPDATE_INTERVAL_IN_BLOCKS: u32 = 100; + +/// Metric that represents extra size of storage proof as unsigned integer gauge. +/// +/// There's one thing to keep in mind when using this metric: the overhead may be slightly +/// different for other values, but this metric gives a good estimation. +#[derive(Debug)] +pub struct StorageProofOverheadMetric { + client: Client, + metric: Gauge, +} + +impl Clone for StorageProofOverheadMetric { + fn clone(&self) -> Self { + StorageProofOverheadMetric { + client: self.client.clone(), + metric: self.metric.clone(), + } + } +} + +impl StorageProofOverheadMetric { + /// Create new metric instance with given name and help. + pub fn new( + registry: &Registry, + prefix: Option<&str>, + client: Client, + name: String, + help: String, + ) -> Result { + Ok(StorageProofOverheadMetric { + client, + metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + }) + } + + /// Returns approximate storage proof size overhead. + async fn compute_storage_proof_overhead(&self) -> Result { + let best_header_hash = self.client.best_finalized_header_hash().await?; + let best_header = self.client.header_by_hash(best_header_hash).await?; + + let storage_proof = self + .client + .prove_storage(vec![StorageKey(CODE.to_vec())], best_header_hash) + .await?; + let storage_proof_size: usize = storage_proof.clone().iter_nodes().map(|n| n.len()).sum(); + + let storage_value_reader = + bp_runtime::StorageProofChecker::::new(*best_header.state_root(), storage_proof) + .map_err(Error::StorageProofError)?; + let maybe_encoded_storage_value = storage_value_reader + .read_value(CODE) + .map_err(Error::StorageProofError)?; + let encoded_storage_value_size = maybe_encoded_storage_value + .ok_or(Error::MissingMandatoryCodeEntry)? + .len(); + + Ok(storage_proof_size - encoded_storage_value_size) + } +} + +#[async_trait] +impl StandaloneMetrics for StorageProofOverheadMetric { + fn update_interval(&self) -> Duration { + C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS + } + + async fn update(&self) { + relay_utils::metrics::set_gauge_value( + &self.metric, + self.compute_storage_proof_overhead() + .await + .map(|overhead| Some(overhead as u64)), + ); + } +} diff --git a/polkadot/bridges/relays/substrate-client/src/rpc.rs b/polkadot/bridges/relays/client-substrate/src/rpc.rs similarity index 69% rename from polkadot/bridges/relays/substrate-client/src/rpc.rs rename to polkadot/bridges/relays/client-substrate/src/rpc.rs index 2e832b4018..06df1f705d 100644 --- a/polkadot/bridges/relays/substrate-client/src/rpc.rs +++ b/polkadot/bridges/relays/client-substrate/src/rpc.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,23 +16,16 @@ //! The most generic Substrate node RPC interface. -// The compiler doesn't think we're using the -// code from rpc_api! -#![allow(dead_code)] -#![allow(unused_variables)] - use crate::chain::Chain; -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::InstanceId; -use sc_rpc_api::system::Health; +use sc_rpc_api::{state::ReadProof, system::Health}; use sp_core::{ storage::{StorageData, StorageKey}, Bytes, }; use sp_version::RuntimeVersion; -jsonrpsee::rpc_api! { +jsonrpsee_proc_macros::rpc_client_api! { pub(crate) Substrate { #[rpc(method = "system_health", positional_params)] fn system_health() -> Health; @@ -51,27 +44,10 @@ jsonrpsee::rpc_api! { #[rpc(method = "state_call", positional_params)] fn state_call(method: String, data: Bytes, at_block: Option) -> Bytes; #[rpc(method = "state_getStorage", positional_params)] - fn get_storage(key: StorageKey) -> Option; + fn state_get_storage(key: StorageKey) -> Option; + #[rpc(method = "state_getReadProof", positional_params)] + fn state_prove_storage(keys: Vec, hash: Option) -> ReadProof; #[rpc(method = "state_getRuntimeVersion", positional_params)] - fn runtime_version() -> RuntimeVersion; - } - - pub(crate) SubstrateMessageLane { - #[rpc(method = "messageLane_proveMessages", positional_params)] - fn prove_messages( - instance: InstanceId, - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - include_outbound_lane_state: bool, - block: Option, - ) -> Bytes; - - #[rpc(method = "messageLane_proveMessagesDelivery", positional_params)] - fn prove_messages_delivery( - instance: InstanceId, - lane: LaneId, - block: Option, - ) -> Bytes; + fn state_runtime_version() -> RuntimeVersion; } } diff --git a/polkadot/bridges/relays/substrate-client/src/sync_header.rs b/polkadot/bridges/relays/client-substrate/src/sync_header.rs similarity index 79% rename from polkadot/bridges/relays/substrate-client/src/sync_header.rs rename to polkadot/bridges/relays/client-substrate/src/sync_header.rs index fd1c582b94..0b74dee690 100644 --- a/polkadot/bridges/relays/substrate-client/src/sync_header.rs +++ b/polkadot/bridges/relays/client-substrate/src/sync_header.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . +use bp_header_chain::find_grandpa_authorities_scheduled_change; +use finality_relay::SourceHeader as FinalitySourceHeader; use headers_relay::sync_types::SourceHeader; use num_traits::{CheckedSub, One}; use relay_utils::HeaderId; @@ -47,7 +49,7 @@ impl

From
for SyncHeader
{ impl SourceHeader for SyncHeader
{ fn id(&self) -> HeaderId { - relay_utils::HeaderId(*self.number(), self.hash()) + relay_utils::HeaderId(*self.0.number(), self.hash()) } fn parent_id(&self) -> HeaderId { @@ -59,3 +61,13 @@ impl SourceHeader for SyncHeader< ) } } + +impl FinalitySourceHeader for SyncHeader
{ + fn number(&self) -> Header::Number { + *self.0.number() + } + + fn is_mandatory(&self) -> bool { + find_grandpa_authorities_scheduled_change(&self.0).is_some() + } +} diff --git a/polkadot/bridges/relays/client-westend/Cargo.toml b/polkadot/bridges/relays/client-westend/Cargo.toml new file mode 100644 index 0000000000..a408ae3a46 --- /dev/null +++ b/polkadot/bridges/relays/client-westend/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "relay-westend-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } + +# Bridge dependencies + +bp-westend = { path = "../../primitives/chain-westend" } + +# Substrate Dependencies + +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-westend/src/lib.rs b/polkadot/bridges/relays/client-westend/src/lib.rs new file mode 100644 index 0000000000..417938ccf5 --- /dev/null +++ b/polkadot/bridges/relays/client-westend/src/lib.rs @@ -0,0 +1,97 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types used to connect to the Westend chain. + +use codec::Encode; +use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use sp_core::{storage::StorageKey, Pair}; +use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; +use std::time::Duration; + +/// Westend header id. +pub type HeaderId = relay_utils::HeaderId; + +/// Westend header type used in headers sync. +pub type SyncHeader = relay_substrate_client::SyncHeader; + +/// Westend chain definition +#[derive(Debug, Clone, Copy)] +pub struct Westend; + +impl ChainBase for Westend { + type BlockNumber = bp_westend::BlockNumber; + type Hash = bp_westend::Hash; + type Hasher = bp_westend::Hasher; + type Header = bp_westend::Header; +} + +impl Chain for Westend { + const NAME: &'static str = "Westend"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + + type AccountId = bp_westend::AccountId; + type Index = bp_westend::Nonce; + type SignedBlock = bp_westend::SignedBlock; + type Call = bp_westend::Call; +} + +impl ChainWithBalances for Westend { + type NativeBalance = bp_westend::Balance; + + fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { + StorageKey(bp_westend::account_info_storage_key(account_id)) + } +} + +impl TransactionSignScheme for Westend { + type Chain = Westend; + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = bp_westend::UncheckedExtrinsic; + + fn sign_transaction( + genesis_hash: ::Hash, + signer: &Self::AccountKeyPair, + signer_nonce: ::Index, + call: ::Call, + ) -> Self::SignedTransaction { + let raw_payload = SignedPayload::new( + call, + bp_westend::SignedExtensions::new( + bp_westend::VERSION, + sp_runtime::generic::Era::Immortal, + genesis_hash, + signer_nonce, + 0, + ), + ) + .expect("SignedExtension never fails."); + + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); + let signer: sp_runtime::MultiSigner = signer.public().into(); + let (call, extra, _) = raw_payload.deconstruct(); + + bp_westend::UncheckedExtrinsic::new_signed( + call, + sp_runtime::MultiAddress::Id(signer.into_account()), + signature.into(), + extra, + ) + } +} + +/// Westend signing params. +pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/bridges/relays/exchange-relay/Cargo.toml b/polkadot/bridges/relays/exchange/Cargo.toml similarity index 100% rename from polkadot/bridges/relays/exchange-relay/Cargo.toml rename to polkadot/bridges/relays/exchange/Cargo.toml diff --git a/polkadot/bridges/relays/exchange-relay/src/exchange.rs b/polkadot/bridges/relays/exchange/src/exchange.rs similarity index 99% rename from polkadot/bridges/relays/exchange-relay/src/exchange.rs rename to polkadot/bridges/relays/exchange/src/exchange.rs index cdf9c1a9f3..cec0d7cba1 100644 --- a/polkadot/bridges/relays/exchange-relay/src/exchange.rs +++ b/polkadot/bridges/relays/exchange/src/exchange.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/exchange-relay/src/exchange_loop.rs b/polkadot/bridges/relays/exchange/src/exchange_loop.rs similarity index 90% rename from polkadot/bridges/relays/exchange-relay/src/exchange_loop.rs rename to polkadot/bridges/relays/exchange/src/exchange_loop.rs index 06f4d3f40a..b46d34e047 100644 --- a/polkadot/bridges/relays/exchange-relay/src/exchange_loop.rs +++ b/polkadot/bridges/relays/exchange/src/exchange_loop.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -26,7 +26,7 @@ use backoff::backoff::Backoff; use futures::{future::FutureExt, select}; use num_traits::One; use relay_utils::{ - metrics::{start as metrics_start, GlobalMetrics, MetricsParams}, + metrics::{GlobalMetrics, MetricsParams}, retry_backoff, FailedClient, MaybeConnectionError, }; use std::future::Future; @@ -78,48 +78,37 @@ impl TransactionProofsRelayStorage for InMemoryStorag } } +/// Return prefix that will be used by default to expose Prometheus metrics of the exchange loop. +pub fn metrics_prefix() -> String { + format!("{}_to_{}_Exchange", P::SOURCE_NAME, P::TARGET_NAME) +} + /// Run proofs synchronization. -pub fn run( +pub async fn run( storage: impl TransactionProofsRelayStorage>, source_client: impl SourceClient

, target_client: impl TargetClient

, - metrics_params: Option, + metrics_params: MetricsParams, exit_signal: impl Future, -) { +) -> Result<(), String> { let exit_signal = exit_signal.shared(); - let metrics_global = GlobalMetrics::default(); - let metrics_exch = ExchangeLoopMetrics::default(); - let metrics_enabled = metrics_params.is_some(); - metrics_start( - format!("{}_to_{}_Exchange", P::SOURCE_NAME, P::TARGET_NAME), - metrics_params, - &metrics_global, - &metrics_exch, - ); - relay_utils::relay_loop::run( - relay_utils::relay_loop::RECONNECT_DELAY, - source_client, - target_client, - |source_client, target_client| { + relay_utils::relay_loop(source_client, target_client) + .with_metrics(Some(metrics_prefix::

()), metrics_params) + .loop_metric(|registry, prefix| ExchangeLoopMetrics::new(registry, prefix))? + .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .expose() + .await? + .run(|source_client, target_client, metrics| { run_until_connection_lost( storage.clone(), source_client, target_client, - if metrics_enabled { - Some(metrics_global.clone()) - } else { - None - }, - if metrics_enabled { - Some(metrics_exch.clone()) - } else { - None - }, + metrics, exit_signal.clone(), ) - }, - ); + }) + .await } /// Run proofs synchronization. @@ -127,7 +116,6 @@ async fn run_until_connection_lost( mut storage: impl TransactionProofsRelayStorage>, source_client: impl SourceClient

, target_client: impl TargetClient

, - metrics_global: Option, metrics_exch: Option, exit_signal: impl Future, ) -> Result<(), FailedClient> { @@ -150,10 +138,6 @@ async fn run_until_connection_lost( ) .await; - if let Some(ref metrics_global) = metrics_global { - metrics_global.update().await; - } - if let Err((is_connection_error, failed_client)) = iteration_result { if is_connection_error { return Err(failed_client); @@ -320,12 +304,12 @@ mod tests { } })); - run( + let _ = async_std::task::block_on(run( storage, source, target, - None, + MetricsParams::disabled(), exit_receiver.into_future().map(|(_, _)| ()), - ); + )); } } diff --git a/polkadot/bridges/relays/exchange-relay/src/exchange_loop_metrics.rs b/polkadot/bridges/relays/exchange/src/exchange_loop_metrics.rs similarity index 65% rename from polkadot/bridges/relays/exchange-relay/src/exchange_loop_metrics.rs rename to polkadot/bridges/relays/exchange/src/exchange_loop_metrics.rs index bf8f0243b6..82d3e649d4 100644 --- a/polkadot/bridges/relays/exchange-relay/src/exchange_loop_metrics.rs +++ b/polkadot/bridges/relays/exchange/src/exchange_loop_metrics.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -17,7 +17,9 @@ //! Metrics for currency-exchange relay loop. use crate::exchange::{BlockNumberOf, RelayedBlockTransactions, TransactionProofPipeline}; -use relay_utils::metrics::{register, Counter, CounterVec, GaugeVec, Metrics, Opts, Registry, U64}; +use relay_utils::metrics::{ + metric_name, register, Counter, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64, +}; /// Exchange transactions relay metrics. #[derive(Clone)] @@ -30,31 +32,38 @@ pub struct ExchangeLoopMetrics { processed_transactions: CounterVec, } -impl Metrics for ExchangeLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), String> { - register(self.best_block_numbers.clone(), registry).map_err(|e| e.to_string())?; - register(self.processed_blocks.clone(), registry).map_err(|e| e.to_string())?; - register(self.processed_transactions.clone(), registry).map_err(|e| e.to_string())?; - Ok(()) - } -} - -impl Default for ExchangeLoopMetrics { - fn default() -> Self { - ExchangeLoopMetrics { - best_block_numbers: GaugeVec::new( - Opts::new("best_block_numbers", "Best finalized block numbers"), - &["type"], - ) - .expect("metric is static and thus valid; qed"), - processed_blocks: Counter::new("processed_blocks", "Total number of processed blocks") - .expect("metric is static and thus valid; qed"), - processed_transactions: CounterVec::new( - Opts::new("processed_transactions", "Total number of processed transactions"), - &["type"], - ) - .expect("metric is static and thus valid; qed"), - } +impl ExchangeLoopMetrics { + /// Create and register exchange loop metrics. + pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + Ok(ExchangeLoopMetrics { + best_block_numbers: register( + GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best finalized block numbers", + ), + &["type"], + )?, + registry, + )?, + processed_blocks: register( + Counter::new( + metric_name(prefix, "processed_blocks"), + "Total number of processed blocks", + )?, + registry, + )?, + processed_transactions: register( + CounterVec::new( + Opts::new( + metric_name(prefix, "processed_transactions"), + "Total number of processed transactions", + ), + &["type"], + )?, + registry, + )?, + }) } } diff --git a/polkadot/bridges/relays/exchange-relay/src/lib.rs b/polkadot/bridges/relays/exchange/src/lib.rs similarity index 95% rename from polkadot/bridges/relays/exchange-relay/src/lib.rs rename to polkadot/bridges/relays/exchange/src/lib.rs index f975ef2aa0..370f085b4b 100644 --- a/polkadot/bridges/relays/exchange-relay/src/lib.rs +++ b/polkadot/bridges/relays/exchange/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/finality/Cargo.toml b/polkadot/bridges/relays/finality/Cargo.toml new file mode 100644 index 0000000000..944da9837f --- /dev/null +++ b/polkadot/bridges/relays/finality/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "finality-relay" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +description = "Finality proofs relay" + +[dependencies] +async-std = "1.6.5" +async-trait = "0.1.40" +backoff = "0.2" +bp-header-chain = { path = "../../primitives/header-chain" } +futures = "0.3.5" +headers-relay = { path = "../headers" } +log = "0.4.11" +num-traits = "0.2" +relay-utils = { path = "../utils" } + +[dev-dependencies] +parking_lot = "0.11.0" diff --git a/polkadot/bridges/relays/finality/src/finality_loop.rs b/polkadot/bridges/relays/finality/src/finality_loop.rs new file mode 100644 index 0000000000..aff32e46de --- /dev/null +++ b/polkadot/bridges/relays/finality/src/finality_loop.rs @@ -0,0 +1,599 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The loop basically reads all missing headers and their finality proofs from the source client. +//! The proof for the best possible header is then submitted to the target node. The only exception +//! is the mandatory headers, which we always submit to the target node. For such headers, we +//! assume that the persistent proof either exists, or will eventually become available. + +use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader}; + +use async_trait::async_trait; +use backoff::backoff::Backoff; +use futures::{select, Future, FutureExt, Stream, StreamExt}; +use headers_relay::sync_loop_metrics::SyncLoopMetrics; +use num_traits::{One, Saturating}; +use relay_utils::{ + metrics::{GlobalMetrics, MetricsParams}, + relay_loop::Client as RelayClient, + retry_backoff, FailedClient, MaybeConnectionError, +}; +use std::{ + pin::Pin, + time::{Duration, Instant}, +}; + +/// Finality proof synchronization loop parameters. +#[derive(Debug, Clone)] +pub struct FinalitySyncParams { + /// Interval at which we check updates on both clients. Normally should be larger than + /// `min(source_block_time, target_block_time)`. + /// + /// This parameter may be used to limit transactions rate. Increase the value && you'll get + /// infrequent updates => sparse headers => potential slow down of bridge applications, but pallet storage + /// won't be super large. Decrease the value to near `source_block_time` and you'll get + /// transaction for (almost) every block of the source chain => all source headers will be known + /// to the target chain => bridge applications will run faster, but pallet storage may explode + /// (but if pruning is there, then it's fine). + pub tick: Duration, + /// Number of finality proofs to keep in internal buffer between loop wakeups. + /// + /// While in "major syncing" state, we still read finality proofs from the stream. They're stored + /// in the internal buffer between loop wakeups. When we're close to the tip of the chain, we may + /// meet finality delays if headers are not finalized frequently. So instead of waiting for next + /// finality proof to appear in the stream, we may use existing proof from that buffer. + pub recent_finality_proofs_limit: usize, + /// Timeout before we treat our transactions as lost and restart the whole sync process. + pub stall_timeout: Duration, +} + +/// Source client used in finality synchronization loop. +#[async_trait] +pub trait SourceClient: RelayClient { + /// Stream of new finality proofs. The stream is allowed to miss proofs for some + /// headers, even if those headers are mandatory. + type FinalityProofsStream: Stream; + + /// Get best finalized block number. + async fn best_finalized_block_number(&self) -> Result; + + /// Get canonical header and its finality proof by number. + async fn header_and_finality_proof( + &self, + number: P::Number, + ) -> Result<(P::Header, Option), Self::Error>; + + /// Subscribe to new finality proofs. + async fn finality_proofs(&self) -> Result; +} + +/// Target client used in finality synchronization loop. +#[async_trait] +pub trait TargetClient: RelayClient { + /// Get best finalized source block number. + async fn best_finalized_source_block_number(&self) -> Result; + + /// Submit header finality proof. + async fn submit_finality_proof(&self, header: P::Header, proof: P::FinalityProof) -> Result<(), Self::Error>; +} + +/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop. +pub fn metrics_prefix() -> String { + format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) +} + +/// Run finality proofs synchronization loop. +pub async fn run( + source_client: impl SourceClient

, + target_client: impl TargetClient

, + sync_params: FinalitySyncParams, + metrics_params: MetricsParams, + exit_signal: impl Future, +) -> Result<(), String> { + let exit_signal = exit_signal.shared(); + relay_utils::relay_loop(source_client, target_client) + .with_metrics(Some(metrics_prefix::

()), metrics_params) + .loop_metric(|registry, prefix| SyncLoopMetrics::new(registry, prefix))? + .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .expose() + .await? + .run(|source_client, target_client, metrics| { + run_until_connection_lost( + source_client, + target_client, + sync_params.clone(), + metrics, + exit_signal.clone(), + ) + }) + .await +} + +/// Unjustified headers container. Ordered by header number. +pub(crate) type UnjustifiedHeaders = Vec; +/// Finality proofs container. Ordered by target header number. +pub(crate) type FinalityProofs

= Vec<( +

::Number, +

::FinalityProof, +)>; +/// Reference to finality proofs container. +pub(crate) type FinalityProofsRef<'a, P> = &'a [( +

::Number, +

::FinalityProof, +)]; + +/// Error that may happen inside finality synchronization loop. +#[derive(Debug)] +pub(crate) enum Error { + /// Source client request has failed with given error. + Source(SourceError), + /// Target client request has failed with given error. + Target(TargetError), + /// Finality proof for mandatory header is missing from the source node. + MissingMandatoryFinalityProof(P::Number), + /// The synchronization has stalled. + Stalled, +} + +impl Error +where + P: FinalitySyncPipeline, + SourceError: MaybeConnectionError, + TargetError: MaybeConnectionError, +{ + fn fail_if_connection_error(&self) -> Result<(), FailedClient> { + match *self { + Error::Source(ref error) if error.is_connection_error() => Err(FailedClient::Source), + Error::Target(ref error) if error.is_connection_error() => Err(FailedClient::Target), + Error::Stalled => Err(FailedClient::Both), + _ => Ok(()), + } + } +} + +/// Information about transaction that we have submitted. +#[derive(Debug, Clone)] +struct Transaction { + /// Time when we have submitted this transaction. + pub time: Instant, + /// The number of the header we have submitted. + pub submitted_header_number: Number, +} + +/// Finality proofs stream that may be restarted. +pub(crate) struct RestartableFinalityProofsStream { + /// Flag that the stream needs to be restarted. + pub(crate) needs_restart: bool, + /// The stream itself. + stream: Pin>, +} + +#[cfg(test)] +impl From for RestartableFinalityProofsStream { + fn from(stream: S) -> Self { + RestartableFinalityProofsStream { + needs_restart: false, + stream: Box::pin(stream), + } + } +} + +/// Finality synchronization loop state. +struct FinalityLoopState<'a, P: FinalitySyncPipeline, FinalityProofsStream> { + /// Synchronization loop progress. + progress: &'a mut (Instant, Option), + /// Finality proofs stream. + finality_proofs_stream: &'a mut RestartableFinalityProofsStream, + /// Recent finality proofs that we have read from the stream. + recent_finality_proofs: &'a mut FinalityProofs

, + /// Last transaction that we have submitted to the target node. + last_transaction: Option>, +} + +async fn run_until_connection_lost( + source_client: impl SourceClient

, + target_client: impl TargetClient

, + sync_params: FinalitySyncParams, + metrics_sync: Option, + exit_signal: impl Future, +) -> Result<(), FailedClient> { + let restart_finality_proofs_stream = || async { + source_client.finality_proofs().await.map_err(|error| { + log::error!( + target: "bridge", + "Failed to subscribe to {} justifications: {:?}. Going to reconnect", + P::SOURCE_NAME, + error, + ); + + FailedClient::Source + }) + }; + + let exit_signal = exit_signal.fuse(); + futures::pin_mut!(exit_signal); + + let mut finality_proofs_stream = RestartableFinalityProofsStream { + needs_restart: false, + stream: Box::pin(restart_finality_proofs_stream().await?), + }; + let mut recent_finality_proofs = Vec::new(); + + let mut progress = (Instant::now(), None); + let mut retry_backoff = retry_backoff(); + let mut last_transaction = None; + + loop { + // run loop iteration + let iteration_result = run_loop_iteration( + &source_client, + &target_client, + FinalityLoopState { + progress: &mut progress, + finality_proofs_stream: &mut finality_proofs_stream, + recent_finality_proofs: &mut recent_finality_proofs, + last_transaction: last_transaction.clone(), + }, + &sync_params, + &metrics_sync, + ) + .await; + + // deal with errors + let next_tick = match iteration_result { + Ok(updated_last_transaction) => { + last_transaction = updated_last_transaction; + retry_backoff.reset(); + sync_params.tick + } + Err(error) => { + log::error!(target: "bridge", "Finality sync loop iteration has failed with error: {:?}", error); + error.fail_if_connection_error()?; + retry_backoff + .next_backoff() + .unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY) + } + }; + if finality_proofs_stream.needs_restart { + log::warn!(target: "bridge", "{} finality proofs stream is being restarted", P::SOURCE_NAME); + + finality_proofs_stream.needs_restart = false; + finality_proofs_stream.stream = Box::pin(restart_finality_proofs_stream().await?); + } + + // wait till exit signal, or new source block + select! { + _ = async_std::task::sleep(next_tick).fuse() => {}, + _ = exit_signal => return Ok(()), + } + } +} + +async fn run_loop_iteration( + source_client: &SC, + target_client: &TC, + state: FinalityLoopState<'_, P, SC::FinalityProofsStream>, + sync_params: &FinalitySyncParams, + metrics_sync: &Option, +) -> Result>, Error> +where + P: FinalitySyncPipeline, + SC: SourceClient

, + TC: TargetClient

, +{ + // read best source headers ids from source and target nodes + let best_number_at_source = source_client + .best_finalized_block_number() + .await + .map_err(Error::Source)?; + let best_number_at_target = target_client + .best_finalized_source_block_number() + .await + .map_err(Error::Target)?; + if let Some(ref metrics_sync) = *metrics_sync { + metrics_sync.update_best_block_at_source(best_number_at_source); + metrics_sync.update_best_block_at_target(best_number_at_target); + } + *state.progress = print_sync_progress::

(*state.progress, best_number_at_source, best_number_at_target); + + // if we have already submitted header, then we just need to wait for it + // if we're waiting too much, then we believe our transaction has been lost and restart sync + if let Some(last_transaction) = state.last_transaction { + if best_number_at_target >= last_transaction.submitted_header_number { + // transaction has been mined && we can continue + } else if last_transaction.time.elapsed() > sync_params.stall_timeout { + log::error!( + target: "bridge", + "Finality synchronization from {} to {} has stalled. Going to restart", + P::SOURCE_NAME, + P::TARGET_NAME, + ); + + return Err(Error::Stalled); + } else { + return Ok(Some(last_transaction)); + } + } + + // submit new header if we have something new + match select_header_to_submit( + source_client, + target_client, + state.finality_proofs_stream, + state.recent_finality_proofs, + best_number_at_source, + best_number_at_target, + sync_params, + ) + .await? + { + Some((header, justification)) => { + let new_transaction = Transaction { + time: Instant::now(), + submitted_header_number: header.number(), + }; + + log::debug!( + target: "bridge", + "Going to submit finality proof of {} header #{:?} to {}", + P::SOURCE_NAME, + new_transaction.submitted_header_number, + P::TARGET_NAME, + ); + + target_client + .submit_finality_proof(header, justification) + .await + .map_err(Error::Target)?; + Ok(Some(new_transaction)) + } + None => Ok(None), + } +} + +async fn select_header_to_submit( + source_client: &SC, + target_client: &TC, + finality_proofs_stream: &mut RestartableFinalityProofsStream, + recent_finality_proofs: &mut FinalityProofs

, + best_number_at_source: P::Number, + best_number_at_target: P::Number, + sync_params: &FinalitySyncParams, +) -> Result, Error> +where + P: FinalitySyncPipeline, + SC: SourceClient

, + TC: TargetClient

, +{ + // to see that the loop is progressing + log::trace!( + target: "bridge", + "Considering range of headers ({:?}; {:?}]", + best_number_at_target, + best_number_at_source, + ); + + // read missing headers. if we see that the header schedules GRANDPA change, we need to + // submit this header + let selected_finality_proof = read_missing_headers::( + source_client, + target_client, + best_number_at_source, + best_number_at_target, + ) + .await?; + let (mut unjustified_headers, mut selected_finality_proof) = match selected_finality_proof { + SelectedFinalityProof::Mandatory(header, finality_proof) => return Ok(Some((header, finality_proof))), + SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) => { + (unjustified_headers, Some((header, finality_proof))) + } + SelectedFinalityProof::None(unjustified_headers) => (unjustified_headers, None), + }; + + // all headers that are missing from the target client are non-mandatory + // => even if we have already selected some header and its persistent finality proof, + // we may try to select better header by reading non-persistent proofs from the stream + read_finality_proofs_from_stream::(finality_proofs_stream, recent_finality_proofs); + selected_finality_proof = select_better_recent_finality_proof::

( + recent_finality_proofs, + &mut unjustified_headers, + selected_finality_proof, + ); + + // remove obsolete 'recent' finality proofs + keep its size under certain limit + let oldest_finality_proof_to_keep = selected_finality_proof + .as_ref() + .map(|(header, _)| header.number()) + .unwrap_or(best_number_at_target); + prune_recent_finality_proofs::

( + oldest_finality_proof_to_keep, + recent_finality_proofs, + sync_params.recent_finality_proofs_limit, + ); + + Ok(selected_finality_proof) +} + +/// Finality proof that has been selected by the `read_missing_headers` function. +pub(crate) enum SelectedFinalityProof { + /// Mandatory header and its proof has been selected. We shall submit proof for this header. + Mandatory(Header, FinalityProof), + /// Regular header and its proof has been selected. We may submit this proof, or proof for + /// some better header. + Regular(UnjustifiedHeaders

, Header, FinalityProof), + /// We haven't found any missing header with persistent proof at the target client. + None(UnjustifiedHeaders
), +} + +/// Read missing headers and their persistent finality proofs from the target client. +/// +/// If we have found some header with known proof, it is returned. +/// Otherwise, `SelectedFinalityProof::None` is returned. +/// +/// Unless we have found mandatory header, all missing headers are collected and returned. +pub(crate) async fn read_missing_headers, TC: TargetClient

>( + source_client: &SC, + _target_client: &TC, + best_number_at_source: P::Number, + best_number_at_target: P::Number, +) -> Result, Error> { + let mut unjustified_headers = Vec::new(); + let mut selected_finality_proof = None; + let mut header_number = best_number_at_target + One::one(); + while header_number <= best_number_at_source { + let (header, finality_proof) = source_client + .header_and_finality_proof(header_number) + .await + .map_err(Error::Source)?; + let is_mandatory = header.is_mandatory(); + + match (is_mandatory, finality_proof) { + (true, Some(finality_proof)) => { + log::trace!(target: "bridge", "Header {:?} is mandatory", header_number); + return Ok(SelectedFinalityProof::Mandatory(header, finality_proof)); + } + (true, None) => return Err(Error::MissingMandatoryFinalityProof(header.number())), + (false, Some(finality_proof)) => { + log::trace!(target: "bridge", "Header {:?} has persistent finality proof", header_number); + unjustified_headers.clear(); + selected_finality_proof = Some((header, finality_proof)); + } + (false, None) => { + unjustified_headers.push(header); + } + } + + header_number = header_number + One::one(); + } + + Ok(match selected_finality_proof { + Some((header, proof)) => SelectedFinalityProof::Regular(unjustified_headers, header, proof), + None => SelectedFinalityProof::None(unjustified_headers), + }) +} + +/// Read finality proofs from the stream. +pub(crate) fn read_finality_proofs_from_stream>( + finality_proofs_stream: &mut RestartableFinalityProofsStream, + recent_finality_proofs: &mut FinalityProofs

, +) { + loop { + let next_proof = finality_proofs_stream.stream.next(); + let finality_proof = match next_proof.now_or_never() { + Some(Some(finality_proof)) => finality_proof, + Some(None) => { + finality_proofs_stream.needs_restart = true; + break; + } + None => break, + }; + + recent_finality_proofs.push((finality_proof.target_header_number(), finality_proof)); + } +} + +/// Try to select better header and its proof, given finality proofs that we +/// have recently read from the stream. +pub(crate) fn select_better_recent_finality_proof( + recent_finality_proofs: FinalityProofsRef

, + unjustified_headers: &mut UnjustifiedHeaders, + selected_finality_proof: Option<(P::Header, P::FinalityProof)>, +) -> Option<(P::Header, P::FinalityProof)> { + if unjustified_headers.is_empty() || recent_finality_proofs.is_empty() { + return selected_finality_proof; + } + + const NOT_EMPTY_PROOF: &str = "we have checked that the vec is not empty; qed"; + + // we need proofs for headers in range unjustified_range_begin..=unjustified_range_end + let unjustified_range_begin = unjustified_headers.first().expect(NOT_EMPTY_PROOF).number(); + let unjustified_range_end = unjustified_headers.last().expect(NOT_EMPTY_PROOF).number(); + + // we have proofs for headers in range buffered_range_begin..=buffered_range_end + let buffered_range_begin = recent_finality_proofs.first().expect(NOT_EMPTY_PROOF).0; + let buffered_range_end = recent_finality_proofs.last().expect(NOT_EMPTY_PROOF).0; + + // we have two ranges => find intersection + let intersection_begin = std::cmp::max(unjustified_range_begin, buffered_range_begin); + let intersection_end = std::cmp::min(unjustified_range_end, buffered_range_end); + let intersection = intersection_begin..=intersection_end; + + // find last proof from intersection + let selected_finality_proof_index = recent_finality_proofs + .binary_search_by_key(intersection.end(), |(number, _)| *number) + .unwrap_or_else(|index| index.saturating_sub(1)); + let (selected_header_number, finality_proof) = &recent_finality_proofs[selected_finality_proof_index]; + if !intersection.contains(selected_header_number) { + return selected_finality_proof; + } + + // now remove all obsolete headers and extract selected header + let selected_header_position = unjustified_headers + .binary_search_by_key(selected_header_number, |header| header.number()) + .expect("unjustified_headers contain all headers from intersection; qed"); + let selected_header = unjustified_headers.swap_remove(selected_header_position); + Some((selected_header, finality_proof.clone())) +} + +pub(crate) fn prune_recent_finality_proofs( + justified_header_number: P::Number, + recent_finality_proofs: &mut FinalityProofs

, + recent_finality_proofs_limit: usize, +) { + let position = + recent_finality_proofs.binary_search_by_key(&justified_header_number, |(header_number, _)| *header_number); + + // remove all obsolete elements + *recent_finality_proofs = recent_finality_proofs.split_off( + position + .map(|position| position + 1) + .unwrap_or_else(|position| position), + ); + + // now - limit vec by size + let split_index = recent_finality_proofs + .len() + .saturating_sub(recent_finality_proofs_limit); + *recent_finality_proofs = recent_finality_proofs.split_off(split_index); +} + +fn print_sync_progress( + progress_context: (Instant, Option), + best_number_at_source: P::Number, + best_number_at_target: P::Number, +) -> (Instant, Option) { + let (prev_time, prev_best_number_at_target) = progress_context; + let now = Instant::now(); + + let need_update = now - prev_time > Duration::from_secs(10) + || prev_best_number_at_target + .map(|prev_best_number_at_target| { + best_number_at_target.saturating_sub(prev_best_number_at_target) > 10.into() + }) + .unwrap_or(true); + + if !need_update { + return (prev_time, prev_best_number_at_target); + } + + log::info!( + target: "bridge", + "Synced {:?} of {:?} headers", + best_number_at_target, + best_number_at_source, + ); + (now, Some(best_number_at_target)) +} diff --git a/polkadot/bridges/relays/finality/src/finality_loop_tests.rs b/polkadot/bridges/relays/finality/src/finality_loop_tests.rs new file mode 100644 index 0000000000..eedd902003 --- /dev/null +++ b/polkadot/bridges/relays/finality/src/finality_loop_tests.rs @@ -0,0 +1,404 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tests for finality synchronization loop. + +#![cfg(test)] + +use crate::finality_loop::{ + prune_recent_finality_proofs, read_finality_proofs_from_stream, run, select_better_recent_finality_proof, + FinalityProofs, FinalitySyncParams, SourceClient, TargetClient, +}; +use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader}; + +use async_trait::async_trait; +use futures::{FutureExt, Stream, StreamExt}; +use parking_lot::Mutex; +use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, MaybeConnectionError}; +use std::{collections::HashMap, pin::Pin, sync::Arc, time::Duration}; + +type IsMandatory = bool; +type TestNumber = u64; + +#[derive(Debug, Clone)] +enum TestError { + NonConnection, +} + +impl MaybeConnectionError for TestError { + fn is_connection_error(&self) -> bool { + false + } +} + +#[derive(Debug, Clone)] +struct TestFinalitySyncPipeline; + +impl FinalitySyncPipeline for TestFinalitySyncPipeline { + const SOURCE_NAME: &'static str = "TestSource"; + const TARGET_NAME: &'static str = "TestTarget"; + + type Hash = u64; + type Number = TestNumber; + type Header = TestSourceHeader; + type FinalityProof = TestFinalityProof; +} + +#[derive(Debug, Clone, PartialEq)] +struct TestSourceHeader(IsMandatory, TestNumber); + +impl SourceHeader for TestSourceHeader { + fn number(&self) -> TestNumber { + self.1 + } + + fn is_mandatory(&self) -> bool { + self.0 + } +} + +#[derive(Debug, Clone, PartialEq)] +struct TestFinalityProof(TestNumber); + +impl FinalityProof for TestFinalityProof { + fn target_header_number(&self) -> TestNumber { + self.0 + } +} + +#[derive(Debug, Clone, Default)] +struct ClientsData { + source_best_block_number: TestNumber, + source_headers: HashMap)>, + source_proofs: Vec, + + target_best_block_number: TestNumber, + target_headers: Vec<(TestSourceHeader, TestFinalityProof)>, +} + +#[derive(Clone)] +struct TestSourceClient { + on_method_call: Arc, + data: Arc>, +} + +#[async_trait] +impl RelayClient for TestSourceClient { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + unreachable!() + } +} + +#[async_trait] +impl SourceClient for TestSourceClient { + type FinalityProofsStream = Pin>>; + + async fn best_finalized_block_number(&self) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + Ok(data.source_best_block_number) + } + + async fn header_and_finality_proof( + &self, + number: TestNumber, + ) -> Result<(TestSourceHeader, Option), TestError> { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + data.source_headers + .get(&number) + .cloned() + .ok_or(TestError::NonConnection) + } + + async fn finality_proofs(&self) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + Ok(futures::stream::iter(data.source_proofs.clone()).boxed()) + } +} + +#[derive(Clone)] +struct TestTargetClient { + on_method_call: Arc, + data: Arc>, +} + +#[async_trait] +impl RelayClient for TestTargetClient { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + unreachable!() + } +} + +#[async_trait] +impl TargetClient for TestTargetClient { + async fn best_finalized_source_block_number(&self) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + Ok(data.target_best_block_number) + } + + async fn submit_finality_proof(&self, header: TestSourceHeader, proof: TestFinalityProof) -> Result<(), TestError> { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + data.target_best_block_number = header.number(); + data.target_headers.push((header, proof)); + Ok(()) + } +} + +fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static) -> ClientsData { + let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); + let internal_state_function: Arc = Arc::new(move |data| { + if state_function(data) { + exit_sender.unbounded_send(()).unwrap(); + } + }); + let clients_data = Arc::new(Mutex::new(ClientsData { + source_best_block_number: 10, + source_headers: vec![ + (6, (TestSourceHeader(false, 6), None)), + (7, (TestSourceHeader(false, 7), Some(TestFinalityProof(7)))), + (8, (TestSourceHeader(true, 8), Some(TestFinalityProof(8)))), + (9, (TestSourceHeader(false, 9), Some(TestFinalityProof(9)))), + (10, (TestSourceHeader(false, 10), None)), + ] + .into_iter() + .collect(), + source_proofs: vec![TestFinalityProof(12), TestFinalityProof(14)], + + target_best_block_number: 5, + target_headers: vec![], + })); + let source_client = TestSourceClient { + on_method_call: internal_state_function.clone(), + data: clients_data.clone(), + }; + let target_client = TestTargetClient { + on_method_call: internal_state_function, + data: clients_data.clone(), + }; + let sync_params = FinalitySyncParams { + tick: Duration::from_secs(0), + recent_finality_proofs_limit: 1024, + stall_timeout: Duration::from_secs(1), + }; + + let _ = async_std::task::block_on(run( + source_client, + target_client, + sync_params, + MetricsParams::disabled(), + exit_receiver.into_future().map(|(_, _)| ()), + )); + + let clients_data = clients_data.lock().clone(); + clients_data +} + +#[test] +fn finality_sync_loop_works() { + let client_data = run_sync_loop(|data| { + // header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted, because + // header#8 has persistent finality proof && it is mandatory => it is submitted + // header#9 has persistent finality proof, but it isn't mandatory => it is submitted, because + // there are no more persistent finality proofs + // + // once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 from the stream + if data.target_best_block_number == 9 { + data.source_best_block_number = 14; + data.source_headers.insert(11, (TestSourceHeader(false, 11), None)); + data.source_headers + .insert(12, (TestSourceHeader(false, 12), Some(TestFinalityProof(12)))); + data.source_headers.insert(13, (TestSourceHeader(false, 13), None)); + data.source_headers + .insert(14, (TestSourceHeader(false, 14), Some(TestFinalityProof(14)))); + } + // once this ^^^ is done, we generate more blocks && read persistent proof for block 16 + if data.target_best_block_number == 14 { + data.source_best_block_number = 17; + data.source_headers.insert(15, (TestSourceHeader(false, 15), None)); + data.source_headers + .insert(16, (TestSourceHeader(false, 16), Some(TestFinalityProof(16)))); + data.source_headers.insert(17, (TestSourceHeader(false, 17), None)); + } + + data.target_best_block_number == 16 + }); + + assert_eq!( + client_data.target_headers, + vec![ + // before adding 11..14: finality proof for mandatory header#8 + (TestSourceHeader(true, 8), TestFinalityProof(8)), + // before adding 11..14: persistent finality proof for non-mandatory header#9 + (TestSourceHeader(false, 9), TestFinalityProof(9)), + // after adding 11..14: ephemeral finality proof for non-mandatory header#14 + (TestSourceHeader(false, 14), TestFinalityProof(14)), + // after adding 15..17: persistent finality proof for non-mandatory header#16 + (TestSourceHeader(false, 16), TestFinalityProof(16)), + ], + ); +} + +#[test] +fn select_better_recent_finality_proof_works() { + // if there are no unjustified headers, nothing is changed + assert_eq!( + select_better_recent_finality_proof::( + &[(5, TestFinalityProof(5))], + &mut vec![], + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ); + + // if there are no recent finality proofs, nothing is changed + assert_eq!( + select_better_recent_finality_proof::( + &[], + &mut vec![TestSourceHeader(false, 5)], + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ); + + // if there's no intersection between recent finality proofs and unjustified headers, nothing is changed + let mut unjustified_headers = vec![TestSourceHeader(false, 9), TestSourceHeader(false, 10)]; + assert_eq!( + select_better_recent_finality_proof::( + &[(1, TestFinalityProof(1)), (4, TestFinalityProof(4))], + &mut unjustified_headers, + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ); + + // if there's intersection between recent finality proofs and unjustified headers, but there are no + // proofs in this intersection, nothing is changed + let mut unjustified_headers = vec![ + TestSourceHeader(false, 8), + TestSourceHeader(false, 9), + TestSourceHeader(false, 10), + ]; + assert_eq!( + select_better_recent_finality_proof::( + &[(7, TestFinalityProof(7)), (11, TestFinalityProof(11))], + &mut unjustified_headers, + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ); + assert_eq!( + unjustified_headers, + vec![ + TestSourceHeader(false, 8), + TestSourceHeader(false, 9), + TestSourceHeader(false, 10) + ] + ); + + // if there's intersection between recent finality proofs and unjustified headers and there's + // a proof in this intersection: + // - this better (last from intersection) proof is selected; + // - 'obsolete' unjustified headers are pruned. + let mut unjustified_headers = vec![ + TestSourceHeader(false, 8), + TestSourceHeader(false, 9), + TestSourceHeader(false, 10), + ]; + assert_eq!( + select_better_recent_finality_proof::( + &[(7, TestFinalityProof(7)), (9, TestFinalityProof(9))], + &mut unjustified_headers, + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 9), TestFinalityProof(9))), + ); +} + +#[test] +fn read_finality_proofs_from_stream_works() { + // when stream is currently empty, nothing is changed + let mut recent_finality_proofs = vec![(1, TestFinalityProof(1))]; + let mut stream = futures::stream::pending().into(); + read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); + assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1))]); + assert_eq!(stream.needs_restart, false); + + // when stream has entry with target, it is added to the recent proofs container + let mut stream = futures::stream::iter(vec![TestFinalityProof(4)]) + .chain(futures::stream::pending()) + .into(); + read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); + assert_eq!( + recent_finality_proofs, + vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))] + ); + assert_eq!(stream.needs_restart, false); + + // when stream has ended, we'll need to restart it + let mut stream = futures::stream::empty().into(); + read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); + assert_eq!( + recent_finality_proofs, + vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))] + ); + assert_eq!(stream.needs_restart, true); +} + +#[test] +fn prune_recent_finality_proofs_works() { + let original_recent_finality_proofs: FinalityProofs = vec![ + (10, TestFinalityProof(10)), + (13, TestFinalityProof(13)), + (15, TestFinalityProof(15)), + (17, TestFinalityProof(17)), + (19, TestFinalityProof(19)), + ] + .into_iter() + .collect(); + + // when there's proof for justified header in the vec + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(10, &mut recent_finality_proofs, 1024); + assert_eq!(&original_recent_finality_proofs[1..], recent_finality_proofs,); + + // when there are no proof for justified header in the vec + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(11, &mut recent_finality_proofs, 1024); + assert_eq!(&original_recent_finality_proofs[1..], recent_finality_proofs,); + + // when there are too many entries after initial prune && they also need to be pruned + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(10, &mut recent_finality_proofs, 2); + assert_eq!(&original_recent_finality_proofs[3..], recent_finality_proofs,); + + // when last entry is pruned + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(19, &mut recent_finality_proofs, 2); + assert_eq!(&original_recent_finality_proofs[5..], recent_finality_proofs,); + + // when post-last entry is pruned + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(20, &mut recent_finality_proofs, 2); + assert_eq!(&original_recent_finality_proofs[5..], recent_finality_proofs,); +} diff --git a/polkadot/bridges/relays/finality/src/lib.rs b/polkadot/bridges/relays/finality/src/lib.rs new file mode 100644 index 0000000000..d5048aa160 --- /dev/null +++ b/polkadot/bridges/relays/finality/src/lib.rs @@ -0,0 +1,53 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! This crate has single entrypoint to run synchronization loop that is built around finality +//! proofs, as opposed to headers synchronization loop, which is built around headers. The headers +//! are still submitted to the target node, but are treated as auxiliary data as we are not trying +//! to submit all source headers to the target node. + +pub use crate::finality_loop::{metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient}; + +use bp_header_chain::FinalityProof; +use std::fmt::Debug; + +mod finality_loop; +mod finality_loop_tests; + +/// Finality proofs synchronization pipeline. +pub trait FinalitySyncPipeline: Clone + Debug + Send + Sync { + /// Name of the finality proofs source. + const SOURCE_NAME: &'static str; + /// Name of the finality proofs target. + const TARGET_NAME: &'static str; + + /// Headers we're syncing are identified by this hash. + type Hash: Eq + Clone + Copy + Send + Sync + Debug; + /// Headers we're syncing are identified by this number. + type Number: relay_utils::BlockNumberBase; + /// Type of header that we're syncing. + type Header: SourceHeader; + /// Finality proof type. + type FinalityProof: FinalityProof; +} + +/// Header that we're receiving from source node. +pub trait SourceHeader: Clone + Debug + PartialEq + Send + Sync { + /// Returns number of header. + fn number(&self) -> Number; + /// Returns true if this header needs to be submitted to target node. + fn is_mandatory(&self) -> bool; +} diff --git a/polkadot/bridges/relays/headers-relay/Cargo.toml b/polkadot/bridges/relays/headers/Cargo.toml similarity index 100% rename from polkadot/bridges/relays/headers-relay/Cargo.toml rename to polkadot/bridges/relays/headers/Cargo.toml diff --git a/polkadot/bridges/relays/headers-relay/src/headers.rs b/polkadot/bridges/relays/headers/src/headers.rs similarity index 99% rename from polkadot/bridges/relays/headers-relay/src/headers.rs rename to polkadot/bridges/relays/headers/src/headers.rs index 99ccc197af..be3e2cb6e6 100644 --- a/polkadot/bridges/relays/headers-relay/src/headers.rs +++ b/polkadot/bridges/relays/headers/src/headers.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/headers-relay/src/lib.rs b/polkadot/bridges/relays/headers/src/lib.rs similarity index 91% rename from polkadot/bridges/relays/headers-relay/src/lib.rs rename to polkadot/bridges/relays/headers/src/lib.rs index d91fe94d9d..8946355921 100644 --- a/polkadot/bridges/relays/headers-relay/src/lib.rs +++ b/polkadot/bridges/relays/headers/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,7 +16,7 @@ //! Relaying source chain headers to target chain. This module provides entrypoint //! that starts reading new headers from source chain and submit these headers as -//! module/contract transactions to the target chain. Module/contract on the target +//! module/contract transactions to the target chain. Pallet/contract on the target //! chain is a light-client of the source chain. All other trustless bridge //! applications are built using this light-client, so running headers-relay is //! essential for running all other bridge applications. diff --git a/polkadot/bridges/relays/headers-relay/src/sync.rs b/polkadot/bridges/relays/headers/src/sync.rs similarity index 99% rename from polkadot/bridges/relays/headers-relay/src/sync.rs rename to polkadot/bridges/relays/headers/src/sync.rs index 8e4c671dba..e992b1f8e5 100644 --- a/polkadot/bridges/relays/headers-relay/src/sync.rs +++ b/polkadot/bridges/relays/headers/src/sync.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/headers-relay/src/sync_loop.rs b/polkadot/bridges/relays/headers/src/sync_loop.rs similarity index 95% rename from polkadot/bridges/relays/headers-relay/src/sync_loop.rs rename to polkadot/bridges/relays/headers/src/sync_loop.rs index d2584f2ccb..e4f1b7b045 100644 --- a/polkadot/bridges/relays/headers-relay/src/sync_loop.rs +++ b/polkadot/bridges/relays/headers/src/sync_loop.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -25,7 +25,7 @@ use futures::{future::FutureExt, stream::StreamExt}; use num_traits::{Saturating, Zero}; use relay_utils::{ format_ids, interval, - metrics::{start as metrics_start, GlobalMetrics, MetricsParams}, + metrics::{GlobalMetrics, MetricsParams}, process_future_result, relay_loop::Client as RelayClient, retry_backoff, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError, @@ -110,35 +110,31 @@ pub trait SyncMaintain: Clone + Send + Sync { impl SyncMaintain

for () {} +/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop. +pub fn metrics_prefix() -> String { + format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) +} + /// Run headers synchronization. #[allow(clippy::too_many_arguments)] -pub fn run>( +pub async fn run>( source_client: impl SourceClient

, source_tick: Duration, target_client: TC, target_tick: Duration, sync_maintain: impl SyncMaintain

, sync_params: HeadersSyncParams, - metrics_params: Option, + metrics_params: MetricsParams, exit_signal: impl Future, -) { +) -> Result<(), String> { let exit_signal = exit_signal.shared(); - - let metrics_global = GlobalMetrics::default(); - let metrics_sync = SyncLoopMetrics::default(); - let metrics_enabled = metrics_params.is_some(); - metrics_start( - format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME), - metrics_params, - &metrics_global, - &metrics_sync, - ); - - relay_utils::relay_loop::run( - relay_utils::relay_loop::RECONNECT_DELAY, - source_client, - target_client, - |source_client, target_client| { + relay_utils::relay_loop(source_client, target_client) + .with_metrics(Some(metrics_prefix::

()), metrics_params) + .loop_metric(|registry, prefix| SyncLoopMetrics::new(registry, prefix))? + .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .expose() + .await? + .run(|source_client, target_client, metrics| { run_until_connection_lost( source_client, source_tick, @@ -146,20 +142,11 @@ pub fn run>( target_tick, sync_maintain.clone(), sync_params.clone(), - if metrics_enabled { - Some(metrics_global.clone()) - } else { - None - }, - if metrics_enabled { - Some(metrics_sync.clone()) - } else { - None - }, + metrics, exit_signal.clone(), ) - }, - ); + }) + .await } /// Run headers synchronization. @@ -171,7 +158,6 @@ async fn run_until_connection_lost>( target_tick: Duration, sync_maintain: impl SyncMaintain

, sync_params: HeadersSyncParams, - metrics_global: Option, metrics_sync: Option, exit_signal: impl Future, ) -> Result<(), FailedClient> { @@ -437,9 +423,6 @@ async fn run_until_connection_lost>( } // update metrics - if let Some(ref metrics_global) = metrics_global { - metrics_global.update().await; - } if let Some(ref metrics_sync) = metrics_sync { metrics_sync.update(&sync); } @@ -650,5 +633,5 @@ fn print_sync_progress( now_best_header.map(|id| id.0), now_target_header, ); - (now_time, now_best_header.clone().map(|id| id.0), *now_target_header) + (now_time, (*now_best_header).map(|id| id.0), *now_target_header) } diff --git a/polkadot/bridges/relays/headers-relay/src/sync_loop_metrics.rs b/polkadot/bridges/relays/headers/src/sync_loop_metrics.rs similarity index 68% rename from polkadot/bridges/relays/headers-relay/src/sync_loop_metrics.rs rename to polkadot/bridges/relays/headers/src/sync_loop_metrics.rs index 456aa0a6b0..37dae11340 100644 --- a/polkadot/bridges/relays/headers-relay/src/sync_loop_metrics.rs +++ b/polkadot/bridges/relays/headers/src/sync_loop_metrics.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -20,7 +20,7 @@ use crate::sync::HeadersSync; use crate::sync_types::{HeaderStatus, HeadersSyncPipeline}; use num_traits::Zero; -use relay_utils::metrics::{register, GaugeVec, Metrics, Opts, Registry, U64}; +use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64}; /// Headers sync metrics. #[derive(Clone)] @@ -31,44 +31,57 @@ pub struct SyncLoopMetrics { blocks_in_state: GaugeVec, } -impl Metrics for SyncLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), String> { - register(self.best_block_numbers.clone(), registry).map_err(|e| e.to_string())?; - register(self.blocks_in_state.clone(), registry).map_err(|e| e.to_string())?; - Ok(()) - } -} - -impl Default for SyncLoopMetrics { - fn default() -> Self { - SyncLoopMetrics { - best_block_numbers: GaugeVec::new( - Opts::new("best_block_numbers", "Best block numbers on source and target nodes"), - &["node"], - ) - .expect("metric is static and thus valid; qed"), - blocks_in_state: GaugeVec::new( - Opts::new("blocks_in_state", "Number of blocks in given state"), - &["state"], - ) - .expect("metric is static and thus valid; qed"), - } +impl SyncLoopMetrics { + /// Create and register headers loop metrics. + pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + Ok(SyncLoopMetrics { + best_block_numbers: register( + GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best block numbers on source and target nodes", + ), + &["node"], + )?, + registry, + )?, + blocks_in_state: register( + GaugeVec::new( + Opts::new( + metric_name(prefix, "blocks_in_state"), + "Number of blocks in given state", + ), + &["state"], + )?, + registry, + )?, + }) } } impl SyncLoopMetrics { + /// Update best block number at source. + pub fn update_best_block_at_source>(&self, source_best_number: Number) { + self.best_block_numbers + .with_label_values(&["source"]) + .set(source_best_number.into()); + } + + /// Update best block number at target. + pub fn update_best_block_at_target>(&self, target_best_number: Number) { + self.best_block_numbers + .with_label_values(&["target"]) + .set(target_best_number.into()); + } + /// Update metrics. pub fn update(&self, sync: &HeadersSync

) { let headers = sync.headers(); let source_best_number = sync.source_best_number().unwrap_or_else(Zero::zero); let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero); - self.best_block_numbers - .with_label_values(&["source"]) - .set(source_best_number.into()); - self.best_block_numbers - .with_label_values(&["target"]) - .set(target_best_number.into()); + self.update_best_block_at_source(source_best_number); + self.update_best_block_at_target(target_best_number); self.blocks_in_state .with_label_values(&["maybe_orphan"]) diff --git a/polkadot/bridges/relays/headers-relay/src/sync_loop_tests.rs b/polkadot/bridges/relays/headers/src/sync_loop_tests.rs similarity index 98% rename from polkadot/bridges/relays/headers-relay/src/sync_loop_tests.rs rename to polkadot/bridges/relays/headers/src/sync_loop_tests.rs index 5cfd5e4f57..3347c4d0d3 100644 --- a/polkadot/bridges/relays/headers-relay/src/sync_loop_tests.rs +++ b/polkadot/bridges/relays/headers/src/sync_loop_tests.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -24,7 +24,8 @@ use backoff::backoff::Backoff; use futures::{future::FutureExt, stream::StreamExt}; use parking_lot::Mutex; use relay_utils::{ - process_future_result, relay_loop::Client as RelayClient, retry_backoff, HeaderId, MaybeConnectionError, + metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, retry_backoff, HeaderId, + MaybeConnectionError, }; use std::{ collections::{HashMap, HashSet}, @@ -493,16 +494,16 @@ fn run_sync_loop_test(params: SyncLoopTestParams) { target.data.lock().requires_extra = target_requires_extra; target.data.lock().requires_completion = target_requires_completion; - run( + let _ = async_std::task::block_on(run( source, test_tick(), target, test_tick(), (), crate::sync::tests::default_sync_params(), - None, + MetricsParams::disabled(), exit_receiver.into_future().map(|(_, _)| ()), - ); + )); } #[test] diff --git a/polkadot/bridges/relays/headers-relay/src/sync_types.rs b/polkadot/bridges/relays/headers/src/sync_types.rs similarity index 99% rename from polkadot/bridges/relays/headers-relay/src/sync_types.rs rename to polkadot/bridges/relays/headers/src/sync_types.rs index e1da623225..e6500ad5fa 100644 --- a/polkadot/bridges/relays/headers-relay/src/sync_types.rs +++ b/polkadot/bridges/relays/headers/src/sync_types.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/messages-relay/Cargo.toml b/polkadot/bridges/relays/messages/Cargo.toml similarity index 85% rename from polkadot/bridges/relays/messages-relay/Cargo.toml rename to polkadot/bridges/relays/messages/Cargo.toml index 9c2daefdb4..e02f8ccc86 100644 --- a/polkadot/bridges/relays/messages-relay/Cargo.toml +++ b/polkadot/bridges/relays/messages/Cargo.toml @@ -15,5 +15,5 @@ parking_lot = "0.11.0" # Bridge Dependencies -bp-message-lane = { path = "../../primitives/message-lane" } +bp-messages = { path = "../../primitives/messages" } relay-utils = { path = "../utils" } diff --git a/polkadot/bridges/relays/messages-relay/src/lib.rs b/polkadot/bridges/relays/messages/src/lib.rs similarity index 89% rename from polkadot/bridges/relays/messages-relay/src/lib.rs rename to polkadot/bridges/relays/messages/src/lib.rs index 99222f0e02..cdd94bca95 100644 --- a/polkadot/bridges/relays/messages-relay/src/lib.rs +++ b/polkadot/bridges/relays/messages/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Relaying [`message-lane`](../pallet_message_lane/index.html) application specific +//! Relaying [`pallet-bridge-messages`](../pallet_bridge_messages/index.html) application specific //! data. Message lane allows sending arbitrary messages between bridged chains. This //! module provides entrypoint that starts reading messages from given message lane //! of source chain and submits proof-of-message-at-source-chain transactions to the diff --git a/polkadot/bridges/relays/messages-relay/src/message_lane.rs b/polkadot/bridges/relays/messages/src/message_lane.rs similarity index 97% rename from polkadot/bridges/relays/messages-relay/src/message_lane.rs rename to polkadot/bridges/relays/messages/src/message_lane.rs index 0eab02ae29..5090ef124e 100644 --- a/polkadot/bridges/relays/messages-relay/src/message_lane.rs +++ b/polkadot/bridges/relays/messages/src/message_lane.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/relays/messages-relay/src/message_lane_loop.rs b/polkadot/bridges/relays/messages/src/message_lane_loop.rs similarity index 89% rename from polkadot/bridges/relays/messages-relay/src/message_lane_loop.rs rename to polkadot/bridges/relays/messages/src/message_lane_loop.rs index 28b55dba47..41eee606d8 100644 --- a/polkadot/bridges/relays/messages-relay/src/message_lane_loop.rs +++ b/polkadot/bridges/relays/messages/src/message_lane_loop.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Message delivery loop. Designed to work with message-lane pallet. +//! Message delivery loop. Designed to work with messages pallet. //! //! Single relay instance delivers messages of single lane in single direction. //! To serve two-way lane, you would need two instances of relay. @@ -30,11 +30,11 @@ use crate::message_race_receiving::run as run_message_receiving_race; use crate::metrics::MessageLaneLoopMetrics; use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; use relay_utils::{ interval, - metrics::{start as metrics_start, GlobalMetrics, MetricsParams}, + metrics::{GlobalMetrics, MetricsParams}, process_future_result, relay_loop::Client as RelayClient, retry_backoff, FailedClient, @@ -139,6 +139,9 @@ pub trait SourceClient: RelayClient { generated_at_block: TargetHeaderIdOf

, proof: P::MessagesReceivingProof, ) -> Result<(), Self::Error>; + + /// We need given finalized target header on source to continue synchronization. + async fn require_target_header_on_source(&self, id: TargetHeaderIdOf

); } /// Target client trait. @@ -177,6 +180,9 @@ pub trait TargetClient: RelayClient { nonces: RangeInclusive, proof: P::MessagesProof, ) -> Result, Self::Error>; + + /// We need given finalized source header on target to continue synchronization. + async fn require_source_header_on_target(&self, id: SourceHeaderIdOf

); } /// State of the client. @@ -205,53 +211,42 @@ pub struct ClientsState { pub target: Option>, } +/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop. +pub fn metrics_prefix(lane: &LaneId) -> String { + format!( + "{}_to_{}_MessageLane_{}", + P::SOURCE_NAME, + P::TARGET_NAME, + hex::encode(lane) + ) +} + /// Run message lane service loop. -pub fn run( +pub async fn run( params: Params, source_client: impl SourceClient

, target_client: impl TargetClient

, - metrics_params: Option, + metrics_params: MetricsParams, exit_signal: impl Future, -) { +) -> Result<(), String> { let exit_signal = exit_signal.shared(); - let metrics_global = GlobalMetrics::default(); - let metrics_msg = MessageLaneLoopMetrics::default(); - let metrics_enabled = metrics_params.is_some(); - metrics_start( - format!( - "{}_to_{}_MessageLane_{}", - P::SOURCE_NAME, - P::TARGET_NAME, - hex::encode(params.lane) - ), - metrics_params, - &metrics_global, - &metrics_msg, - ); - - relay_utils::relay_loop::run( - params.reconnect_delay, - source_client, - target_client, - |source_client, target_client| { + relay_utils::relay_loop(source_client, target_client) + .reconnect_delay(params.reconnect_delay) + .with_metrics(Some(metrics_prefix::

(¶ms.lane)), metrics_params) + .loop_metric(|registry, prefix| MessageLaneLoopMetrics::new(registry, prefix))? + .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .expose() + .await? + .run(|source_client, target_client, metrics| { run_until_connection_lost( params.clone(), source_client, target_client, - if metrics_enabled { - Some(metrics_global.clone()) - } else { - None - }, - if metrics_enabled { - Some(metrics_msg.clone()) - } else { - None - }, + metrics, exit_signal.clone(), ) - }, - ); + }) + .await } /// Run one-way message delivery loop until connection with target or source node is lost, or exit signal is received. @@ -259,7 +254,6 @@ async fn run_until_connection_lost, TC: Targ params: Params, source_client: SC, target_client: TC, - metrics_global: Option, metrics_msg: Option, exit_signal: impl Future, ) -> Result<(), FailedClient> { @@ -403,10 +397,6 @@ async fn run_until_connection_lost, TC: Targ } } - if let Some(ref metrics_global) = metrics_global { - metrics_global.update().await; - } - if source_client_is_online && source_state_required { log::debug!(target: "bridge", "Asking {} node about its state", P::SOURCE_NAME); source_state.set(source_client.state().fuse()); @@ -485,6 +475,10 @@ pub(crate) mod tests { target_latest_received_nonce: MessageNonce, target_latest_confirmed_received_nonce: MessageNonce, submitted_messages_proofs: Vec, + target_to_source_header_required: Option, + target_to_source_header_requirements: Vec, + source_to_target_header_required: Option, + source_to_target_header_requirements: Vec, } #[derive(Clone)] @@ -589,6 +583,13 @@ pub(crate) mod tests { data.source_latest_confirmed_received_nonce = proof; Ok(()) } + + async fn require_target_header_on_source(&self, id: TargetHeaderIdOf) { + let mut data = self.data.lock(); + data.target_to_source_header_required = Some(id); + data.target_to_source_header_requirements.push(id); + (self.tick)(&mut *data); + } } #[derive(Clone)] @@ -687,6 +688,13 @@ pub(crate) mod tests { data.submitted_messages_proofs.push(proof); Ok(nonces) } + + async fn require_source_header_on_target(&self, id: SourceHeaderIdOf) { + let mut data = self.data.lock(); + data.source_to_target_header_required = Some(id); + data.source_to_target_header_requirements.push(id); + (self.tick)(&mut *data); + } } fn run_loop_test( @@ -706,7 +714,7 @@ pub(crate) mod tests { data: data.clone(), tick: target_tick, }; - run( + let _ = run( Params { lane: [0, 0, 0, 0], source_tick: Duration::from_millis(100), @@ -723,9 +731,10 @@ pub(crate) mod tests { }, source_client, target_client, - None, + MetricsParams::disabled(), exit_signal, - ); + ) + .await; let result = data.lock().clone(); result }) @@ -799,8 +808,19 @@ pub(crate) mod tests { target_latest_received_nonce: 0, ..Default::default() }, - Arc::new(|_: &mut TestClientData| {}), + Arc::new(|data: &mut TestClientData| { + // headers relay must only be started when we need new target headers at source node + if data.target_to_source_header_required.is_some() { + assert!(data.source_state.best_finalized_peer_at_best_self.0 < data.target_state.best_self.0); + data.target_to_source_header_required = None; + } + }), Arc::new(move |data: &mut TestClientData| { + // headers relay must only be started when we need new source headers at target node + if data.source_to_target_header_required.is_some() { + assert!(data.target_state.best_finalized_peer_at_best_self.0 < data.source_state.best_self.0); + data.source_to_target_header_required = None; + } // syncing source headers -> target chain (all at once) if data.target_state.best_finalized_peer_at_best_self.0 < data.source_state.best_finalized_self.0 { data.target_state.best_finalized_peer_at_best_self = data.source_state.best_finalized_self; @@ -821,7 +841,7 @@ pub(crate) mod tests { HeaderId(data.source_state.best_self.0 + 1, data.source_state.best_self.0 + 1); data.source_state.best_finalized_self = data.source_state.best_self; } - // if source has received all messages receiving confirmations => increase source block so that confirmations may be sent + // if source has received all messages receiving confirmations => stop if data.source_latest_confirmed_received_nonce == 10 { exit_sender.unbounded_send(()).unwrap(); } @@ -837,5 +857,9 @@ pub(crate) mod tests { assert_eq!(result.submitted_messages_proofs[1].0, 5..=8); assert_eq!(result.submitted_messages_proofs[2].0, 9..=10); assert!(!result.submitted_messages_receiving_proofs.is_empty()); + + // check that we have at least once required new source->target or target->source headers + assert!(!result.target_to_source_header_requirements.is_empty()); + assert!(!result.source_to_target_header_requirements.is_empty()); } } diff --git a/polkadot/bridges/relays/messages-relay/src/message_race_delivery.rs b/polkadot/bridges/relays/messages/src/message_race_delivery.rs similarity index 98% rename from polkadot/bridges/relays/messages-relay/src/message_race_delivery.rs rename to polkadot/bridges/relays/messages/src/message_race_delivery.rs index c7e308fee1..225c59f23c 100644 --- a/polkadot/bridges/relays/messages-relay/src/message_race_delivery.rs +++ b/polkadot/bridges/relays/messages/src/message_race_delivery.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -26,7 +26,7 @@ use crate::message_race_strategy::BasicStrategy; use crate::metrics::MessageLaneLoopMetrics; use async_trait::async_trait; -use bp_message_lane::{MessageNonce, UnrewardedRelayersState, Weight}; +use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight}; use futures::stream::FusedStream; use relay_utils::FailedClient; use std::{ @@ -166,6 +166,10 @@ where type Error = C::Error; type TargetNoncesData = DeliveryRaceTargetNoncesData; + async fn require_source_header(&self, id: SourceHeaderIdOf

) { + self.client.require_source_header_on_target(id).await + } + async fn nonces( &self, at_block: TargetHeaderIdOf

, @@ -287,6 +291,10 @@ impl RaceStrategy, TargetHeaderIdOf

, P::M self.strategy.is_empty() } + fn required_source_header_at_target(&self, current_best: &SourceHeaderIdOf

) -> Option> { + self.strategy.required_source_header_at_target(current_best) + } + fn best_at_source(&self) -> Option { self.strategy.best_at_source() } diff --git a/polkadot/bridges/relays/messages-relay/src/message_race_loop.rs b/polkadot/bridges/relays/messages/src/message_race_loop.rs similarity index 95% rename from polkadot/bridges/relays/messages-relay/src/message_race_loop.rs rename to polkadot/bridges/relays/messages/src/message_race_loop.rs index a11a1d7ff5..41f5ede103 100644 --- a/polkadot/bridges/relays/messages-relay/src/message_race_loop.rs +++ b/polkadot/bridges/relays/messages/src/message_race_loop.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -23,7 +23,7 @@ use crate::message_lane_loop::ClientState; use async_trait::async_trait; -use bp_message_lane::MessageNonce; +use bp_messages::MessageNonce; use futures::{ future::FutureExt, stream::{FusedStream, StreamExt}, @@ -123,6 +123,10 @@ pub trait TargetClient { /// Type of the additional data from the target client, used by the race. type TargetNoncesData: std::fmt::Debug; + /// Ask headers relay to relay finalized headers up to (and including) given header + /// from race source to race target. + async fn require_source_header(&self, id: P::SourceHeaderId); + /// Return nonces that are known to the target client. async fn nonces( &self, @@ -149,6 +153,8 @@ pub trait RaceStrategy: Debug { /// Should return true if nothing has to be synced. fn is_empty(&self) -> bool; + /// Return id of source header that is required to be on target to continue synchronization. + fn required_source_header_at_target(&self, current_best: &SourceHeaderId) -> Option; /// Return best nonce at source node. /// /// `Some` is returned only if we are sure that the value is greater or equal @@ -303,6 +309,15 @@ pub async fn run, TC: TargetClient

>( async_std::task::sleep, || format!("Error retrieving nonces from {}", P::source_name()), ).fail_if_connection_error(FailedClient::Source)?; + + // ask for more headers if we have nonces to deliver and required headers are missing + let required_source_header_id = race_state + .best_finalized_source_header_id_at_best_target + .as_ref() + .and_then(|best|strategy.required_source_header_at_target(best)); + if let Some(required_source_header_id) = required_source_header_id { + race_target.require_source_header(required_source_header_id).await; + } }, nonces = target_best_nonces => { target_best_nonces_required = false; diff --git a/polkadot/bridges/relays/messages-relay/src/message_race_receiving.rs b/polkadot/bridges/relays/messages/src/message_race_receiving.rs similarity index 97% rename from polkadot/bridges/relays/messages-relay/src/message_race_receiving.rs rename to polkadot/bridges/relays/messages/src/message_race_receiving.rs index cba6ee3858..4381b63591 100644 --- a/polkadot/bridges/relays/messages-relay/src/message_race_receiving.rs +++ b/polkadot/bridges/relays/messages/src/message_race_receiving.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -25,7 +25,7 @@ use crate::message_race_strategy::BasicStrategy; use crate::metrics::MessageLaneLoopMetrics; use async_trait::async_trait; -use bp_message_lane::MessageNonce; +use bp_messages::MessageNonce; use futures::stream::FusedStream; use relay_utils::FailedClient; use std::{marker::PhantomData, ops::RangeInclusive, time::Duration}; @@ -159,6 +159,10 @@ where type Error = C::Error; type TargetNoncesData = (); + async fn require_source_header(&self, id: TargetHeaderIdOf

) { + self.client.require_target_header_on_source(id).await + } + async fn nonces( &self, at_block: SourceHeaderIdOf

, diff --git a/polkadot/bridges/relays/messages-relay/src/message_race_strategy.rs b/polkadot/bridges/relays/messages/src/message_race_strategy.rs similarity index 97% rename from polkadot/bridges/relays/messages-relay/src/message_race_strategy.rs rename to polkadot/bridges/relays/messages/src/message_race_strategy.rs index dcbcbc4d37..7088f8d74b 100644 --- a/polkadot/bridges/relays/messages-relay/src/message_race_strategy.rs +++ b/polkadot/bridges/relays/messages/src/message_race_strategy.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -19,7 +19,7 @@ use crate::message_race_loop::{NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces}; -use bp_message_lane::MessageNonce; +use bp_messages::MessageNonce; use relay_utils::HeaderId; use std::{collections::VecDeque, fmt::Debug, marker::PhantomData, ops::RangeInclusive}; @@ -162,6 +162,15 @@ where self.source_queue.is_empty() } + fn required_source_header_at_target( + &self, + current_best: &HeaderId, + ) -> Option> { + self.source_queue + .back() + .and_then(|(h, _)| if h.0 > current_best.0 { Some(h.clone()) } else { None }) + } + fn best_at_source(&self) -> Option { let best_in_queue = self.source_queue.back().map(|(_, range)| range.end()); match (best_in_queue, self.best_target_nonce) { diff --git a/polkadot/bridges/relays/messages-relay/src/metrics.rs b/polkadot/bridges/relays/messages/src/metrics.rs similarity index 78% rename from polkadot/bridges/relays/messages-relay/src/metrics.rs rename to polkadot/bridges/relays/messages/src/metrics.rs index b001d32926..51a4118be8 100644 --- a/polkadot/bridges/relays/messages-relay/src/metrics.rs +++ b/polkadot/bridges/relays/messages/src/metrics.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -19,8 +19,8 @@ use crate::message_lane::MessageLane; use crate::message_lane_loop::{SourceClientState, TargetClientState}; -use bp_message_lane::MessageNonce; -use relay_utils::metrics::{register, GaugeVec, Metrics, Opts, Registry, U64}; +use bp_messages::MessageNonce; +use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64}; /// Message lane relay metrics. /// @@ -34,25 +34,28 @@ pub struct MessageLaneLoopMetrics { lane_state_nonces: GaugeVec, } -impl Metrics for MessageLaneLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), String> { - register(self.best_block_numbers.clone(), registry).map_err(|e| e.to_string())?; - register(self.lane_state_nonces.clone(), registry).map_err(|e| e.to_string())?; - Ok(()) - } -} - -impl Default for MessageLaneLoopMetrics { - fn default() -> Self { - MessageLaneLoopMetrics { - best_block_numbers: GaugeVec::new( - Opts::new("best_block_numbers", "Best finalized block numbers"), - &["type"], - ) - .expect("metric is static and thus valid; qed"), - lane_state_nonces: GaugeVec::new(Opts::new("lane_state_nonces", "Nonces of the lane state"), &["type"]) - .expect("metric is static and thus valid; qed"), - } +impl MessageLaneLoopMetrics { + /// Create and register messages loop metrics. + pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + Ok(MessageLaneLoopMetrics { + best_block_numbers: register( + GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best finalized block numbers", + ), + &["type"], + )?, + registry, + )?, + lane_state_nonces: register( + GaugeVec::new( + Opts::new(metric_name(prefix, "lane_state_nonces"), "Nonces of the lane state"), + &["type"], + )?, + registry, + )?, + }) } } diff --git a/polkadot/bridges/relays/substrate/Cargo.toml b/polkadot/bridges/relays/substrate/Cargo.toml deleted file mode 100644 index 120501e511..0000000000 --- a/polkadot/bridges/relays/substrate/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -[package] -name = "substrate-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.9.0" -async-trait = "0.1.42" -codec = { package = "parity-scale-codec", version = "2.0.0" } -futures = "0.3.12" -hex = "0.4" -log = "0.4.14" -num-traits = "0.2" -paste = "1.0" -structopt = "0.3" - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bp-kusama = { path = "../../primitives/kusama" } -bp-message-lane = { path = "../../primitives/message-lane" } -bp-millau = { path = "../../primitives/millau" } -bp-polkadot = { path = "../../primitives/polkadot" } -bp-runtime = { path = "../../primitives/runtime" } -bp-rialto = { path = "../../primitives/rialto" } -bridge-runtime-common = { path = "../../bin/runtime-common" } -headers-relay = { path = "../headers-relay" } -messages-relay = { path = "../messages-relay" } -millau-runtime = { path = "../../bin/millau/runtime" } -pallet-bridge-call-dispatch = { path = "../../modules/call-dispatch" } -pallet-message-lane = { path = "../../modules/message-lane" } -pallet-substrate-bridge = { path = "../../modules/substrate" } -relay-kusama-client = { path = "../kusama-client" } -relay-millau-client = { path = "../millau-client" } -relay-polkadot-client = { path = "../polkadot-client" } -relay-rialto-client = { path = "../rialto-client" } -relay-substrate-client = { path = "../substrate-client" } -relay-utils = { path = "../utils" } -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/substrate/src/cli.rs b/polkadot/bridges/relays/substrate/src/cli.rs deleted file mode 100644 index 8dc241f069..0000000000 --- a/polkadot/bridges/relays/substrate/src/cli.rs +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Deal with CLI args of substrate-to-substrate relay. - -use bp_message_lane::LaneId; -use frame_support::weights::Weight; -use sp_core::Bytes; -use sp_finality_grandpa::SetId as GrandpaAuthoritiesSetId; -use structopt::{clap::arg_enum, StructOpt}; - -/// Parse relay CLI args. -pub fn parse_args() -> Command { - Command::from_args() -} - -/// Substrate-to-Substrate bridge utilities. -#[derive(StructOpt)] -#[structopt(about = "Substrate-to-Substrate relay")] -pub enum Command { - /// Start headers relay between two chains. - /// - /// The on-chain bridge component should have been already initialized with - /// `init-bridge` sub-command. - RelayHeaders(RelayHeaders), - /// Start messages relay between two chains. - /// - /// Ties up to `MessageLane` pallets on both chains and starts relaying messages. - /// Requires the header relay to be already running. - RelayMessages(RelayMessages), - /// Initialize on-chain bridge pallet with current header data. - /// - /// Sends initialization transaction to bootstrap the bridge with current finalized block data. - InitBridge(InitBridge), - /// Send custom message over the bridge. - /// - /// Allows interacting with the bridge by sending messages over `MessageLane` component. - /// The message is being sent to the source chain, delivered to the target chain and dispatched - /// there. - SendMessage(SendMessage), -} - -#[derive(StructOpt)] -pub enum RelayHeaders { - /// Relay Millau headers to Rialto. - MillauToRialto { - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, - }, - /// Relay Rialto headers to Millau. - RialtoToMillau { - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, - }, -} - -#[derive(StructOpt)] -pub enum RelayMessages { - /// Serve given lane of Millau -> Rialto messages. - MillauToRialto { - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, - /// Hex-encoded id of lane that should be served by relay. - #[structopt(long)] - lane: HexLaneId, - }, - /// Serve given lane of Rialto -> Millau messages. - RialtoToMillau { - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, - /// Hex-encoded id of lane that should be served by relay. - #[structopt(long)] - lane: HexLaneId, - }, -} - -#[derive(StructOpt)] -pub enum InitBridge { - /// Initialize Millau headers bridge in Rialto. - MillauToRialto { - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - millau_bridge_params: MillauBridgeInitializationParams, - }, - /// Initialize Rialto headers bridge in Millau. - RialtoToMillau { - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - rialto_bridge_params: RialtoBridgeInitializationParams, - }, -} - -#[derive(StructOpt)] -pub enum SendMessage { - /// Submit message to given Millau -> Rialto lane. - MillauToRialto { - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - /// Hex-encoded lane id. - #[structopt(long)] - lane: HexLaneId, - /// Dispatch weight of the message. If not passed, determined automatically. - #[structopt(long)] - dispatch_weight: Option>, - /// Delivery and dispatch fee. If not passed, determined automatically. - #[structopt(long)] - fee: Option, - /// Message type. - #[structopt(subcommand)] - message: ToRialtoMessage, - /// The origin to use when dispatching the message on the target chain. - #[structopt(long, possible_values = &Origins::variants())] - origin: Origins, - }, - /// Submit message to given Rialto -> Millau lane. - RialtoToMillau { - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - /// Hex-encoded lane id. - #[structopt(long)] - lane: HexLaneId, - /// Dispatch weight of the message. If not passed, determined automatically. - #[structopt(long)] - dispatch_weight: Option>, - /// Delivery and dispatch fee. If not passed, determined automatically. - #[structopt(long)] - fee: Option, - /// Message type. - #[structopt(subcommand)] - message: ToMillauMessage, - /// The origin to use when dispatching the message on the target chain. - #[structopt(long, possible_values = &Origins::variants())] - origin: Origins, - }, -} - -/// All possible messages that may be delivered to the Rialto chain. -#[derive(StructOpt, Debug)] -pub enum ToRialtoMessage { - /// Make an on-chain remark (comment). - Remark { - /// Remark size. If not passed, small UTF8-encoded string is generated by relay as remark. - #[structopt(long)] - remark_size: Option>, - }, - /// Transfer the specified `amount` of native tokens to a particular `recipient`. - Transfer { - #[structopt(long)] - recipient: bp_rialto::AccountId, - #[structopt(long)] - amount: bp_rialto::Balance, - }, -} - -/// All possible messages that may be delivered to the Millau chain. -#[derive(StructOpt, Debug)] -pub enum ToMillauMessage { - /// Make an on-chain remark (comment). - Remark { - /// Size of the remark. If not passed, small UTF8-encoded string is generated by relay as remark. - #[structopt(long)] - remark_size: Option>, - }, - /// Transfer the specified `amount` of native tokens to a particular `recipient`. - Transfer { - #[structopt(long)] - recipient: bp_millau::AccountId, - #[structopt(long)] - amount: bp_millau::Balance, - }, -} - -arg_enum! { - #[derive(Debug)] - /// The origin to use when dispatching the message on the target chain. - /// - /// - `Target` uses account existing on the target chain (requires target private key). - /// - `Origin` uses account derived from the source-chain account. - pub enum Origins { - Target, - Source, - } -} - -/// Lane id. -#[derive(Debug)] -pub struct HexLaneId(LaneId); - -impl From for LaneId { - fn from(lane_id: HexLaneId) -> LaneId { - lane_id.0 - } -} - -impl std::str::FromStr for HexLaneId { - type Err = hex::FromHexError; - - fn from_str(s: &str) -> Result { - let mut lane_id = LaneId::default(); - hex::decode_to_slice(s, &mut lane_id)?; - Ok(HexLaneId(lane_id)) - } -} - -/// Prometheus metrics params. -#[derive(StructOpt)] -pub struct PrometheusParams { - /// Do not expose a Prometheus metric endpoint. - #[structopt(long)] - pub no_prometheus: bool, - /// Expose Prometheus endpoint at given interface. - #[structopt(long, default_value = "127.0.0.1")] - pub prometheus_host: String, - /// Expose Prometheus endpoint at given port. - #[structopt(long, default_value = "9616")] - pub prometheus_port: u16, -} - -impl From for Option { - fn from(cli_params: PrometheusParams) -> Option { - if !cli_params.no_prometheus { - Some(relay_utils::metrics::MetricsParams { - host: cli_params.prometheus_host, - port: cli_params.prometheus_port, - }) - } else { - None - } - } -} - -/// Either explicit or maximal allowed value. -#[derive(Debug)] -pub enum ExplicitOrMaximal { - /// User has explicitly specified argument value. - Explicit(V), - /// Maximal allowed value for this argument. - Maximal, -} - -impl std::str::FromStr for ExplicitOrMaximal -where - V::Err: std::fmt::Debug, -{ - type Err = String; - - fn from_str(s: &str) -> Result { - if s.to_lowercase() == "max" { - return Ok(ExplicitOrMaximal::Maximal); - } - - V::from_str(s) - .map(ExplicitOrMaximal::Explicit) - .map_err(|e| format!("Failed to parse '{:?}'. Expected 'max' or explicit value", e)) - } -} - -macro_rules! declare_chain_options { - ($chain:ident, $chain_prefix:ident) => { - paste::item! { - #[doc = $chain " connection params."] - #[derive(StructOpt)] - pub struct [<$chain ConnectionParams>] { - #[doc = "Connect to " $chain " node at given host."] - #[structopt(long)] - pub [<$chain_prefix _host>]: String, - #[doc = "Connect to " $chain " node websocket server at given port."] - #[structopt(long)] - pub [<$chain_prefix _port>]: u16, - } - - #[doc = $chain " signing params."] - #[derive(StructOpt)] - pub struct [<$chain SigningParams>] { - #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _signer>]: String, - #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _signer_password>]: Option, - } - - #[doc = $chain " headers bridge initialization params."] - #[derive(StructOpt)] - pub struct [<$chain BridgeInitializationParams>] { - #[doc = "Hex-encoded " $chain " header to initialize bridge with. If not specified, genesis header is used."] - #[structopt(long)] - pub [<$chain_prefix _initial_header>]: Option, - #[doc = "Hex-encoded " $chain " GRANDPA authorities set to initialize bridge with. If not specified, set from genesis block is used."] - #[structopt(long)] - pub [<$chain_prefix _initial_authorities>]: Option, - #[doc = "Id of the " $chain " GRANDPA authorities set to initialize bridge with. If not specified, zero is used."] - #[structopt(long)] - pub [<$chain_prefix _initial_authorities_set_id>]: Option, - } - } - }; -} - -declare_chain_options!(Rialto, rialto); -declare_chain_options!(Millau, millau); diff --git a/polkadot/bridges/relays/substrate/src/headers_initialize.rs b/polkadot/bridges/relays/substrate/src/headers_initialize.rs deleted file mode 100644 index 6b66a2e9bd..0000000000 --- a/polkadot/bridges/relays/substrate/src/headers_initialize.rs +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Initialize Substrate -> Substrate headers bridge. -//! -//! Initialization is a transaction that calls `initialize()` function of the -//! `pallet-substrate-bridge` pallet. This transaction brings initial header -//! and authorities set from source to target chain. The headers sync starts -//! with this header. - -use codec::Decode; -use pallet_substrate_bridge::InitializationData; -use relay_substrate_client::{Chain, Client}; -use sp_core::Bytes; -use sp_finality_grandpa::{AuthorityList as GrandpaAuthoritiesSet, SetId as GrandpaAuthoritiesSetId}; - -/// Submit headers-bridge initialization transaction. -pub async fn initialize( - source_client: Client, - target_client: Client, - raw_initial_header: Option, - raw_initial_authorities_set: Option, - initial_authorities_set_id: Option, - prepare_initialize_transaction: impl FnOnce(InitializationData) -> Result, -) { - let result = do_initialize( - source_client, - target_client, - raw_initial_header, - raw_initial_authorities_set, - initial_authorities_set_id, - prepare_initialize_transaction, - ) - .await; - - match result { - Ok(tx_hash) => log::info!( - target: "bridge", - "Successfully submitted {}-headers bridge initialization transaction to {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - tx_hash, - ), - Err(err) => log::error!( - target: "bridge", - "Failed to submit {}-headers bridge initialization transaction to {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - err, - ), - } -} - -/// Craft and submit initialization transaction, returning any error that may occur. -async fn do_initialize( - source_client: Client, - target_client: Client, - raw_initial_header: Option, - raw_initial_authorities_set: Option, - initial_authorities_set_id: Option, - prepare_initialize_transaction: impl FnOnce(InitializationData) -> Result, -) -> Result { - let initialization_data = prepare_initialization_data( - source_client, - raw_initial_header, - raw_initial_authorities_set, - initial_authorities_set_id, - ) - .await?; - let initialization_tx = prepare_initialize_transaction(initialization_data)?; - let initialization_tx_hash = target_client - .submit_extrinsic(initialization_tx) - .await - .map_err(|err| format!("Failed to submit {} transaction: {:?}", TargetChain::NAME, err))?; - Ok(initialization_tx_hash) -} - -/// Prepare initialization data for the headers-bridge pallet. -async fn prepare_initialization_data( - source_client: Client, - raw_initial_header: Option, - raw_initial_authorities_set: Option, - initial_authorities_set_id: Option, -) -> Result, String> { - let source_genesis_hash = *source_client.genesis_hash(); - - let initial_header = match raw_initial_header { - Some(raw_initial_header) => SourceChain::Header::decode(&mut &raw_initial_header.0[..]) - .map_err(|err| format!("Failed to decode {} initial header: {:?}", SourceChain::NAME, err))?, - None => source_client - .header_by_hash(source_genesis_hash) - .await - .map_err(|err| format!("Failed to retrive {} genesis header: {:?}", SourceChain::NAME, err))?, - }; - - let raw_initial_authorities_set = match raw_initial_authorities_set { - Some(raw_initial_authorities_set) => raw_initial_authorities_set.0, - None => source_client - .grandpa_authorities_set(source_genesis_hash) - .await - .map_err(|err| { - format!( - "Failed to retrive {} authorities set at genesis header: {:?}", - SourceChain::NAME, - err - ) - })?, - }; - let initial_authorities_set = - GrandpaAuthoritiesSet::decode(&mut &raw_initial_authorities_set[..]).map_err(|err| { - format!( - "Failed to decode {} initial authorities set: {:?}", - SourceChain::NAME, - err - ) - })?; - - Ok(InitializationData { - header: initial_header, - authority_list: initial_authorities_set, - set_id: initial_authorities_set_id.unwrap_or(0), - // There may be multiple scheduled changes, so on real chains we should select proper - // moment, when there's nothing scheduled. On ephemeral (temporary) chains, it is ok to - // start with genesis. - scheduled_change: None, - is_halted: false, - }) -} diff --git a/polkadot/bridges/relays/substrate/src/headers_maintain.rs b/polkadot/bridges/relays/substrate/src/headers_maintain.rs deleted file mode 100644 index 14432487ea..0000000000 --- a/polkadot/bridges/relays/substrate/src/headers_maintain.rs +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate-to-Substrate headers synchronization maintain procedure. -//! -//! Regular headers synchronization only depends on persistent justifications -//! that are generated when authorities set changes. This happens rarely on -//! real-word chains. So some other way to finalize headers is required. -//! -//! Full nodes are listening to GRANDPA messages, so they may have track authorities -//! votes on their own. They're returning both persistent and ephemeral justifications -//! (justifications that are not stored in the database and not broadcasted over network) -//! throught `grandpa_subscribeJustifications` RPC subscription. -//! -//! The idea of this maintain procedure is that when we see justification that 'improves' -//! best finalized header on the target chain, we submit this justification to the target -//! node. - -use crate::headers_pipeline::SubstrateHeadersSyncPipeline; - -use async_std::sync::{Arc, Mutex}; -use async_trait::async_trait; -use codec::{Decode, Encode}; -use futures::future::{poll_fn, FutureExt, TryFutureExt}; -use headers_relay::{ - sync::HeadersSync, - sync_loop::SyncMaintain, - sync_types::{HeaderIdOf, HeaderStatus}, -}; -use relay_substrate_client::{Chain, Client, Error as SubstrateError, JustificationsSubscription}; -use relay_utils::HeaderId; -use sp_core::Bytes; -use sp_runtime::{traits::Header as HeaderT, Justification}; -use std::{collections::VecDeque, marker::PhantomData, task::Poll}; - -/// Substrate-to-Substrate headers synchronization maintain procedure. -pub struct SubstrateHeadersToSubstrateMaintain { - pipeline: P, - target_client: Client, - justifications: Arc>>, - _marker: PhantomData, -} - -/// Future and already received justifications from the source chain. -struct Justifications { - /// Justifications stream. - stream: JustificationsSubscription, - /// Justifications that we have read from the stream but have not sent to the - /// target node, because their targets were still not synced. - queue: VecDeque<(HeaderIdOf

, Justification)>, -} - -impl - SubstrateHeadersToSubstrateMaintain -{ - /// Create new maintain procedure. - pub fn new(pipeline: P, target_client: Client, justifications: JustificationsSubscription) -> Self { - SubstrateHeadersToSubstrateMaintain { - pipeline, - target_client, - justifications: Arc::new(Mutex::new(Justifications { - stream: justifications, - queue: VecDeque::new(), - })), - _marker: Default::default(), - } - } -} - -#[async_trait] -impl Clone - for SubstrateHeadersToSubstrateMaintain -{ - fn clone(&self) -> Self { - SubstrateHeadersToSubstrateMaintain { - pipeline: self.pipeline.clone(), - target_client: self.target_client.clone(), - justifications: self.justifications.clone(), - _marker: Default::default(), - } - } -} - -#[async_trait] -impl SyncMaintain

for SubstrateHeadersToSubstrateMaintain -where - SourceChain: Chain, - ::Number: Into, - ::Hash: Into, - TargetChain: Chain, - P::Number: Decode, - P::Hash: Decode, - P: SubstrateHeadersSyncPipeline, -{ - async fn maintain(&self, sync: &mut HeadersSync

) { - // lock justifications before doing anything else - let mut justifications = match self.justifications.try_lock() { - Some(justifications) => justifications, - None => { - // this should never happen, as we use single-thread executor - log::warn!(target: "bridge", "Failed to acquire {} justifications lock", P::SOURCE_NAME); - return; - } - }; - - // we need to read best finalized header from the target node to be able to - // choose justification to submit - let best_finalized = match best_finalized_header_id::(&self.target_client).await { - Ok(best_finalized) => best_finalized, - Err(error) => { - log::warn!( - target: "bridge", - "Failed to read best finalized {} block from maintain: {:?}", - P::SOURCE_NAME, - error, - ); - return; - } - }; - - log::debug!( - target: "bridge", - "Read best finalized {} block from {}: {:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - best_finalized, - ); - - // Select justification to submit to the target node. We're submitting at most one justification - // on every maintain call. So maintain rate directly affects finalization rate. - let justification_to_submit = poll_fn(|context| { - // read justifications from the stream and push to the queue - justifications.read_from_stream::(context); - - // remove all obsolete justifications from the queue - remove_obsolete::

(&mut justifications.queue, best_finalized); - - // select justification to submit - Poll::Ready(select_justification(&mut justifications.queue, sync)) - }) - .await; - - // finally - submit selected justification - if let Some((target, justification)) = justification_to_submit { - let submit_result = self - .pipeline - .make_complete_header_transaction(target, justification) - .and_then(|tx| self.target_client.submit_extrinsic(Bytes(tx.encode()))) - .await; - - match submit_result { - Ok(_) => log::debug!( - target: "bridge", - "Submitted justification received over {} subscription. Target: {:?}", - P::SOURCE_NAME, - target, - ), - Err(error) => log::warn!( - target: "bridge", - "Failed to submit justification received over {} subscription for {:?}: {:?}", - P::SOURCE_NAME, - target, - error, - ), - } - } - } -} - -impl

Justifications

-where - P::Number: Decode, - P::Hash: Decode, - P: SubstrateHeadersSyncPipeline, -{ - /// Read justifications from the subscription stream without blocking. - fn read_from_stream<'a, SourceHeader>(&mut self, context: &mut std::task::Context<'a>) - where - SourceHeader: HeaderT, - SourceHeader::Number: Into, - SourceHeader::Hash: Into, - { - loop { - let maybe_next_justification = self.stream.next(); - futures::pin_mut!(maybe_next_justification); - - let maybe_next_justification = maybe_next_justification.poll_unpin(context); - let justification = match maybe_next_justification { - Poll::Ready(justification) => justification, - Poll::Pending => return, - }; - - // decode justification target - let target = bp_header_chain::justification::decode_justification_target::(&justification); - let target = match target { - Ok((target_hash, target_number)) => HeaderId(target_number.into(), target_hash.into()), - Err(error) => { - log::warn!( - target: "bridge", - "Failed to decode justification from {} subscription: {:?}", - P::SOURCE_NAME, - error, - ); - continue; - } - }; - - log::debug!( - target: "bridge", - "Received {} justification over subscription. Target: {:?}", - P::SOURCE_NAME, - target, - ); - - self.queue.push_back((target, justification.0)); - } - } -} - -/// Clean queue of all justifications that are justifying already finalized blocks. -fn remove_obsolete( - queue: &mut VecDeque<(HeaderIdOf

, Justification)>, - best_finalized: HeaderIdOf

, -) { - while queue - .front() - .map(|(target, _)| target.0 <= best_finalized.0) - .unwrap_or(false) - { - queue.pop_front(); - } -} - -/// Select appropriate justification that would improve best finalized block on target node. -/// -/// It is assumed that the selected justification will be submitted to the target node. The -/// justification itself and all preceeding justifications are removed from the queue. -fn select_justification

( - queue: &mut VecDeque<(HeaderIdOf

, Justification)>, - sync: &mut HeadersSync

, -) -> Option<(HeaderIdOf

, Justification)> -where - P: SubstrateHeadersSyncPipeline, -{ - let mut selected_justification = None; - while let Some((target, justification)) = queue.pop_front() { - // if we're waiting for this justification, report it - if sync.headers().requires_completion_data(&target) { - sync.headers_mut().completion_response(&target, Some(justification)); - // we won't submit previous justifications as we going to submit justification for - // next header - selected_justification = None; - // we won't submit next justifications as we need to submit previous justifications - // first - break; - } - - // if we know that the header is already synced (it is known to the target node), let's - // select it for submission. We still may select better justification on the next iteration. - if sync.headers().status(&target) == HeaderStatus::Synced { - selected_justification = Some((target, justification)); - continue; - } - - // finally - return justification back to the queue - queue.push_back((target, justification)); - break; - } - - selected_justification -} - -/// Returns best finalized source header on the target chain. -async fn best_finalized_header_id(client: &Client) -> Result, SubstrateError> -where - P: SubstrateHeadersSyncPipeline, - P::Number: Decode, - P::Hash: Decode, - C: Chain, -{ - let call = P::FINALIZED_BLOCK_METHOD.into(); - let data = Bytes(Vec::new()); - - let encoded_response = client.state_call(call, data, None).await?; - let decoded_response: (P::Number, P::Hash) = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::headers_pipeline::sync_params; - use crate::millau_headers_to_rialto::MillauHeadersToRialto; - - fn parent_hash(index: u8) -> bp_millau::Hash { - if index == 1 { - Default::default() - } else { - header(index - 1).hash() - } - } - - fn header_hash(index: u8) -> bp_millau::Hash { - header(index).hash() - } - - fn header(index: u8) -> bp_millau::Header { - bp_millau::Header::new( - index as _, - Default::default(), - Default::default(), - parent_hash(index), - Default::default(), - ) - } - - #[test] - fn obsolete_justifications_are_removed() { - let mut queue = vec![ - (HeaderId(1, header_hash(1)), vec![1]), - (HeaderId(2, header_hash(2)), vec![2]), - (HeaderId(3, header_hash(3)), vec![3]), - ] - .into_iter() - .collect(); - - remove_obsolete::(&mut queue, HeaderId(2, header_hash(2))); - - assert_eq!( - queue, - vec![(HeaderId(3, header_hash(3)), vec![3])] - .into_iter() - .collect::>(), - ); - } - - #[test] - fn latest_justification_is_selected() { - let mut queue = vec![ - (HeaderId(1, header_hash(1)), vec![1]), - (HeaderId(2, header_hash(2)), vec![2]), - (HeaderId(3, header_hash(3)), vec![3]), - ] - .into_iter() - .collect(); - let mut sync = HeadersSync::::new(sync_params()); - sync.headers_mut().header_response(header(1).into()); - sync.headers_mut().header_response(header(2).into()); - sync.headers_mut().header_response(header(3).into()); - sync.target_best_header_response(HeaderId(2, header_hash(2))); - - assert_eq!( - select_justification(&mut queue, &mut sync), - Some((HeaderId(2, header_hash(2)), vec![2])), - ); - } - - #[test] - fn required_justification_is_reported() { - let mut queue = vec![ - (HeaderId(1, header_hash(1)), vec![1]), - (HeaderId(2, header_hash(2)), vec![2]), - (HeaderId(3, header_hash(3)), vec![3]), - ] - .into_iter() - .collect(); - let mut sync = HeadersSync::::new(sync_params()); - sync.headers_mut().header_response(header(1).into()); - sync.headers_mut().header_response(header(2).into()); - sync.headers_mut().header_response(header(3).into()); - sync.headers_mut() - .incomplete_headers_response(vec![HeaderId(2, header_hash(2))].into_iter().collect()); - sync.target_best_header_response(HeaderId(2, header_hash(2))); - - assert_eq!(sync.headers_mut().header_to_complete(), None,); - - assert_eq!(select_justification(&mut queue, &mut sync), None,); - - assert_eq!( - sync.headers_mut().header_to_complete(), - Some((HeaderId(2, header_hash(2)), &vec![2])), - ); - } -} diff --git a/polkadot/bridges/relays/substrate/src/headers_pipeline.rs b/polkadot/bridges/relays/substrate/src/headers_pipeline.rs deleted file mode 100644 index 8ad6fc50b9..0000000000 --- a/polkadot/bridges/relays/substrate/src/headers_pipeline.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate-to-Substrate headers sync entrypoint. - -use crate::{headers_maintain::SubstrateHeadersToSubstrateMaintain, headers_target::SubstrateHeadersTarget}; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::{HeadersSyncParams, TargetTransactionMode}, - sync_types::{HeaderIdOf, HeadersSyncPipeline, QueuedHeader, SourceHeader}, -}; -use relay_substrate_client::{ - headers_source::HeadersSource, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, -}; -use relay_utils::BlockNumberBase; -use sp_runtime::Justification; -use std::marker::PhantomData; - -/// Headers sync pipeline for Substrate <-> Substrate relays. -#[async_trait] -pub trait SubstrateHeadersSyncPipeline: HeadersSyncPipeline { - /// Name of the `best_block` runtime method. - const BEST_BLOCK_METHOD: &'static str; - /// Name of the `finalized_block` runtime method. - const FINALIZED_BLOCK_METHOD: &'static str; - /// Name of the `is_known_block` runtime method. - const IS_KNOWN_BLOCK_METHOD: &'static str; - /// Name of the `incomplete_headers` runtime method. - const INCOMPLETE_HEADERS_METHOD: &'static str; - - /// Signed transaction type. - type SignedTransaction: Send + Sync + Encode; - - /// Make submit header transaction. - async fn make_submit_header_transaction( - &self, - header: QueuedHeader, - ) -> Result; - - /// Make completion transaction for the header. - async fn make_complete_header_transaction( - &self, - id: HeaderIdOf, - completion: Justification, - ) -> Result; -} - -/// Substrate-to-Substrate headers pipeline. -#[derive(Debug, Clone)] -pub struct SubstrateHeadersToSubstrate { - /// Client for the target chain. - pub(crate) target_client: Client, - /// Data required to sign target chain transactions. - pub(crate) target_sign: TargetSign, - /// Unused generic arguments dump. - _marker: PhantomData<(SourceChain, SourceSyncHeader)>, -} - -impl - SubstrateHeadersToSubstrate -{ - /// Create new Substrate-to-Substrate headers pipeline. - pub fn new(target_client: Client, target_sign: TargetSign) -> Self { - SubstrateHeadersToSubstrate { - target_client, - target_sign, - _marker: Default::default(), - } - } -} - -impl HeadersSyncPipeline - for SubstrateHeadersToSubstrate -where - SourceChain: Clone + Chain, - BlockNumberOf: BlockNumberBase, - SourceSyncHeader: - SourceHeader, BlockNumberOf> + std::ops::Deref, - TargetChain: Clone + Chain, - TargetSign: Clone + Send + Sync, -{ - const SOURCE_NAME: &'static str = SourceChain::NAME; - const TARGET_NAME: &'static str = TargetChain::NAME; - - type Hash = HashOf; - type Number = BlockNumberOf; - type Header = SourceSyncHeader; - type Extra = (); - type Completion = Justification; - - fn estimate_size(source: &QueuedHeader) -> usize { - source.header().encode().len() - } -} - -/// Return sync parameters for Substrate-to-Substrate headers sync. -pub fn sync_params() -> HeadersSyncParams { - HeadersSyncParams { - max_future_headers_to_download: 32, - max_headers_in_submitted_status: 8, - max_headers_in_single_submit: 1, - max_headers_size_in_single_submit: 1024 * 1024, - prune_depth: 256, - target_tx_mode: TargetTransactionMode::Signed, - } -} - -/// Run Substrate-to-Substrate headers sync. -pub async fn run( - pipeline: P, - source_client: Client, - target_client: Client, - metrics_params: Option, -) where - P: SubstrateHeadersSyncPipeline< - Hash = HashOf, - Number = BlockNumberOf, - Completion = Justification, - Extra = (), - >, - P::Header: SourceHeader, BlockNumberOf>, - SourceChain: Clone + Chain, - SourceChain::Header: Into, - BlockNumberOf: BlockNumberBase, - TargetChain: Clone + Chain, -{ - let source_justifications = match source_client.clone().subscribe_justifications().await { - Ok(source_justifications) => source_justifications, - Err(error) => { - log::warn!( - target: "bridge", - "Failed to subscribe to {} justifications: {:?}", - SourceChain::NAME, - error, - ); - - return; - } - }; - - let sync_maintain = SubstrateHeadersToSubstrateMaintain::<_, SourceChain, _>::new( - pipeline.clone(), - target_client.clone(), - source_justifications, - ); - - log::info!( - target: "bridge", - "Starting {} -> {} headers relay", - SourceChain::NAME, - TargetChain::NAME, - ); - - headers_relay::sync_loop::run( - HeadersSource::new(source_client), - SourceChain::AVERAGE_BLOCK_INTERVAL, - SubstrateHeadersTarget::new(target_client, pipeline), - TargetChain::AVERAGE_BLOCK_INTERVAL, - sync_maintain, - sync_params(), - metrics_params, - futures::future::pending(), - ); -} diff --git a/polkadot/bridges/relays/substrate/src/headers_target.rs b/polkadot/bridges/relays/substrate/src/headers_target.rs deleted file mode 100644 index 2b5f63a7fe..0000000000 --- a/polkadot/bridges/relays/substrate/src/headers_target.rs +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate headers target. The chain we connect to should have -//! runtime that implements `HeaderApi` to allow bridging with -//! chain. - -use crate::headers_pipeline::SubstrateHeadersSyncPipeline; - -use async_trait::async_trait; -use codec::{Decode, Encode}; -use futures::TryFutureExt; -use headers_relay::{ - sync_loop::TargetClient, - sync_types::{HeaderIdOf, QueuedHeader, SubmittedHeaders}, -}; -use relay_substrate_client::{Chain, Client, Error as SubstrateError}; -use relay_utils::{relay_loop::Client as RelayClient, HeaderId}; -use sp_core::Bytes; -use sp_runtime::Justification; -use std::collections::HashSet; - -/// Substrate client as Substrate headers target. -pub struct SubstrateHeadersTarget { - client: Client, - pipeline: P, -} - -impl SubstrateHeadersTarget { - /// Create new Substrate headers target. - pub fn new(client: Client, pipeline: P) -> Self { - SubstrateHeadersTarget { client, pipeline } - } -} - -impl Clone for SubstrateHeadersTarget { - fn clone(&self) -> Self { - SubstrateHeadersTarget { - client: self.client.clone(), - pipeline: self.pipeline.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateHeadersTarget { - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.client.reconnect().await - } -} - -#[async_trait] -impl TargetClient

for SubstrateHeadersTarget -where - C: Chain, - P::Number: Decode, - P::Hash: Decode + Encode, - P: SubstrateHeadersSyncPipeline, -{ - async fn best_header_id(&self) -> Result, SubstrateError> { - // we can't continue to relay headers if target node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - let call = P::BEST_BLOCK_METHOD.into(); - let data = Bytes(Vec::new()); - - let encoded_response = self.client.state_call(call, data, None).await?; - let decoded_response: Vec<(P::Number, P::Hash)> = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - - // If we parse an empty list of headers it means that bridge pallet has not been initalized - // yet. Otherwise we expect to always have at least one header. - decoded_response - .last() - .ok_or(SubstrateError::UninitializedBridgePallet) - .map(|(num, hash)| HeaderId(*num, *hash)) - } - - async fn is_known_header(&self, id: HeaderIdOf

) -> Result<(HeaderIdOf

, bool), SubstrateError> { - let call = P::IS_KNOWN_BLOCK_METHOD.into(); - let data = Bytes(id.1.encode()); - - let encoded_response = self.client.state_call(call, data, None).await?; - let is_known_block: bool = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - - Ok((id, is_known_block)) - } - - async fn submit_headers( - &self, - mut headers: Vec>, - ) -> SubmittedHeaders, SubstrateError> { - debug_assert_eq!( - headers.len(), - 1, - "Substrate pallet only supports single header / transaction" - ); - - let header = headers.remove(0); - let id = header.id(); - let submit_transaction_result = self - .pipeline - .make_submit_header_transaction(header) - .and_then(|tx| self.client.submit_extrinsic(Bytes(tx.encode()))) - .await; - - match submit_transaction_result { - Ok(_) => SubmittedHeaders { - submitted: vec![id], - incomplete: Vec::new(), - rejected: Vec::new(), - fatal_error: None, - }, - Err(error) => SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: vec![id], - fatal_error: Some(error), - }, - } - } - - async fn incomplete_headers_ids(&self) -> Result>, SubstrateError> { - let call = P::INCOMPLETE_HEADERS_METHOD.into(); - let data = Bytes(Vec::new()); - - let encoded_response = self.client.state_call(call, data, None).await?; - let decoded_response: Vec<(P::Number, P::Hash)> = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - - let incomplete_headers = decoded_response - .into_iter() - .map(|(number, hash)| HeaderId(number, hash)) - .collect(); - Ok(incomplete_headers) - } - - async fn complete_header( - &self, - id: HeaderIdOf

, - completion: Justification, - ) -> Result, SubstrateError> { - let tx = self.pipeline.make_complete_header_transaction(id, completion).await?; - self.client.submit_extrinsic(Bytes(tx.encode())).await?; - Ok(id) - } - - async fn requires_extra(&self, header: QueuedHeader

) -> Result<(HeaderIdOf

, bool), SubstrateError> { - Ok((header.id(), false)) - } -} diff --git a/polkadot/bridges/relays/substrate/src/main.rs b/polkadot/bridges/relays/substrate/src/main.rs deleted file mode 100644 index c73533cdfc..0000000000 --- a/polkadot/bridges/relays/substrate/src/main.rs +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate-to-substrate relay entrypoint. - -#![warn(missing_docs)] - -use codec::{Decode, Encode}; -use frame_support::weights::{GetDispatchInfo, Weight}; -use pallet_bridge_call_dispatch::{CallOrigin, MessagePayload}; -use relay_kusama_client::Kusama; -use relay_millau_client::{Millau, SigningParams as MillauSigningParams}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Chain, ConnectionParams, TransactionSignScheme}; -use relay_utils::initialize::initialize_relay; -use sp_core::{Bytes, Pair}; -use sp_runtime::traits::IdentifyAccount; - -/// Kusama node client. -pub type KusamaClient = relay_substrate_client::Client; -/// Millau node client. -pub type MillauClient = relay_substrate_client::Client; -/// Rialto node client. -pub type RialtoClient = relay_substrate_client::Client; - -mod cli; -mod headers_initialize; -mod headers_maintain; -mod headers_pipeline; -mod headers_target; -mod messages_lane; -mod messages_source; -mod messages_target; -mod millau_headers_to_rialto; -mod millau_messages_to_rialto; -mod rialto_headers_to_millau; -mod rialto_messages_to_millau; - -fn main() { - initialize_relay(); - - let result = async_std::task::block_on(run_command(cli::parse_args())); - if let Err(error) = result { - log::error!(target: "bridge", "Failed to start relay: {}", error); - } -} - -async fn run_command(command: cli::Command) -> Result<(), String> { - match command { - cli::Command::InitBridge(arg) => run_init_bridge(arg).await, - cli::Command::RelayHeaders(arg) => run_relay_headers(arg).await, - cli::Command::RelayMessages(arg) => run_relay_messages(arg).await, - cli::Command::SendMessage(arg) => run_send_message(arg).await, - } -} - -async fn run_init_bridge(command: cli::InitBridge) -> Result<(), String> { - match command { - cli::InitBridge::MillauToRialto { - millau, - rialto, - rialto_sign, - millau_bridge_params, - } => { - let millau_client = millau.into_client().await?; - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - - let rialto_signer_next_index = rialto_client - .next_account_index(rialto_sign.signer.public().into()) - .await?; - - headers_initialize::initialize( - millau_client, - rialto_client.clone(), - millau_bridge_params.millau_initial_header, - millau_bridge_params.millau_initial_authorities, - millau_bridge_params.millau_initial_authorities_set_id, - move |initialization_data| { - Ok(Bytes( - Rialto::sign_transaction( - &rialto_client, - &rialto_sign.signer, - rialto_signer_next_index, - rialto_runtime::SudoCall::sudo(Box::new( - rialto_runtime::BridgeMillauCall::initialize(initialization_data).into(), - )) - .into(), - ) - .encode(), - )) - }, - ) - .await; - } - cli::InitBridge::RialtoToMillau { - rialto, - millau, - millau_sign, - rialto_bridge_params, - } => { - let rialto_client = rialto.into_client().await?; - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - let millau_signer_next_index = millau_client - .next_account_index(millau_sign.signer.public().into()) - .await?; - - headers_initialize::initialize( - rialto_client, - millau_client.clone(), - rialto_bridge_params.rialto_initial_header, - rialto_bridge_params.rialto_initial_authorities, - rialto_bridge_params.rialto_initial_authorities_set_id, - move |initialization_data| { - Ok(Bytes( - Millau::sign_transaction( - &millau_client, - &millau_sign.signer, - millau_signer_next_index, - millau_runtime::SudoCall::sudo(Box::new( - millau_runtime::BridgeRialtoCall::initialize(initialization_data).into(), - )) - .into(), - ) - .encode(), - )) - }, - ) - .await; - } - } - Ok(()) -} - -async fn run_relay_headers(command: cli::RelayHeaders) -> Result<(), String> { - match command { - cli::RelayHeaders::MillauToRialto { - millau, - rialto, - rialto_sign, - prometheus_params, - } => { - let millau_client = millau.into_client().await?; - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - millau_headers_to_rialto::run(millau_client, rialto_client, rialto_sign, prometheus_params.into()).await; - } - cli::RelayHeaders::RialtoToMillau { - rialto, - millau, - millau_sign, - prometheus_params, - } => { - let rialto_client = rialto.into_client().await?; - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - rialto_headers_to_millau::run(rialto_client, millau_client, millau_sign, prometheus_params.into()).await; - } - } - Ok(()) -} - -async fn run_relay_messages(command: cli::RelayMessages) -> Result<(), String> { - match command { - cli::RelayMessages::MillauToRialto { - millau, - millau_sign, - rialto, - rialto_sign, - prometheus_params, - lane, - } => { - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - - millau_messages_to_rialto::run( - millau_client, - millau_sign, - rialto_client, - rialto_sign, - lane.into(), - prometheus_params.into(), - ); - } - cli::RelayMessages::RialtoToMillau { - rialto, - rialto_sign, - millau, - millau_sign, - prometheus_params, - lane, - } => { - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - - rialto_messages_to_millau::run( - rialto_client, - rialto_sign, - millau_client, - millau_sign, - lane.into(), - prometheus_params.into(), - ); - } - } - Ok(()) -} - -async fn run_send_message(command: cli::SendMessage) -> Result<(), String> { - match command { - cli::SendMessage::MillauToRialto { - millau, - millau_sign, - rialto_sign, - lane, - message, - dispatch_weight, - fee, - origin, - .. - } => { - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - let rialto_sign = rialto_sign.parse()?; - let rialto_call = message.into_call(); - - let payload = - millau_to_rialto_message_payload(&millau_sign, &rialto_sign, &rialto_call, origin, dispatch_weight); - let dispatch_weight = payload.weight; - - let lane = lane.into(); - let fee = get_fee(fee, || { - estimate_message_delivery_and_dispatch_fee( - &millau_client, - bp_rialto::TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD, - lane, - payload.clone(), - ) - }) - .await?; - - let millau_call = millau_runtime::Call::BridgeRialtoMessageLane( - millau_runtime::MessageLaneCall::send_message(lane, payload, fee), - ); - - let signed_millau_call = Millau::sign_transaction( - &millau_client, - &millau_sign.signer, - millau_client - .next_account_index(millau_sign.signer.public().clone().into()) - .await?, - millau_call, - ) - .encode(); - - log::info!( - target: "bridge", - "Sending message to Rialto. Size: {}. Dispatch weight: {}. Fee: {}", - signed_millau_call.len(), - dispatch_weight, - fee, - ); - - millau_client.submit_extrinsic(Bytes(signed_millau_call)).await?; - } - cli::SendMessage::RialtoToMillau { - rialto, - rialto_sign, - millau_sign, - lane, - message, - dispatch_weight, - fee, - origin, - .. - } => { - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - let millau_sign = millau_sign.parse()?; - let millau_call = message.into_call(); - - let payload = - rialto_to_millau_message_payload(&rialto_sign, &millau_sign, &millau_call, origin, dispatch_weight); - let dispatch_weight = payload.weight; - - let lane = lane.into(); - let fee = get_fee(fee, || { - estimate_message_delivery_and_dispatch_fee( - &rialto_client, - bp_millau::TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD, - lane, - payload.clone(), - ) - }) - .await?; - - let rialto_call = rialto_runtime::Call::BridgeMillauMessageLane( - rialto_runtime::MessageLaneCall::send_message(lane, payload, fee), - ); - - let signed_rialto_call = Rialto::sign_transaction( - &rialto_client, - &rialto_sign.signer, - rialto_client - .next_account_index(rialto_sign.signer.public().clone().into()) - .await?, - rialto_call, - ) - .encode(); - - log::info!( - target: "bridge", - "Sending message to Millau. Size: {}. Dispatch weight: {}. Fee: {}", - signed_rialto_call.len(), - dispatch_weight, - fee, - ); - - rialto_client.submit_extrinsic(Bytes(signed_rialto_call)).await?; - } - } - Ok(()) -} - -async fn estimate_message_delivery_and_dispatch_fee( - client: &relay_substrate_client::Client, - estimate_fee_method: &str, - lane: bp_message_lane::LaneId, - payload: P, -) -> Result, relay_substrate_client::Error> { - let encoded_response = client - .state_call(estimate_fee_method.into(), (lane, payload).encode().into(), None) - .await?; - let decoded_response: Option = - Decode::decode(&mut &encoded_response.0[..]).map_err(relay_substrate_client::Error::ResponseParseFailed)?; - Ok(decoded_response) -} - -fn remark_payload(remark_size: Option>, maximal_allowed_size: u32) -> Vec { - match remark_size { - Some(cli::ExplicitOrMaximal::Explicit(remark_size)) => vec![0; remark_size], - Some(cli::ExplicitOrMaximal::Maximal) => vec![0; maximal_allowed_size as _], - None => format!( - "Unix time: {}", - std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .unwrap_or_default() - .as_secs(), - ) - .as_bytes() - .to_vec(), - } -} - -fn rialto_to_millau_message_payload( - rialto_sign: &RialtoSigningParams, - millau_sign: &MillauSigningParams, - millau_call: &millau_runtime::Call, - origin: cli::Origins, - user_specified_dispatch_weight: Option>, -) -> rialto_runtime::millau_messages::ToMillauMessagePayload { - let millau_call_weight = prepare_call_dispatch_weight( - user_specified_dispatch_weight, - cli::ExplicitOrMaximal::Explicit(millau_call.get_dispatch_info().weight), - compute_maximal_message_dispatch_weight(bp_millau::max_extrinsic_weight()), - ); - let rialto_sender_public: bp_rialto::AccountSigner = rialto_sign.signer.public().clone().into(); - let rialto_account_id: bp_rialto::AccountId = rialto_sender_public.into_account(); - let millau_origin_public = millau_sign.signer.public(); - - MessagePayload { - spec_version: millau_runtime::VERSION.spec_version, - weight: millau_call_weight, - origin: match origin { - cli::Origins::Source => CallOrigin::SourceAccount(rialto_account_id), - cli::Origins::Target => { - let digest = rialto_runtime::millau_account_ownership_digest( - &millau_call, - rialto_account_id.clone(), - millau_runtime::VERSION.spec_version, - ); - - let digest_signature = millau_sign.signer.sign(&digest); - - CallOrigin::TargetAccount(rialto_account_id, millau_origin_public.into(), digest_signature.into()) - } - }, - call: millau_call.encode(), - } -} - -fn millau_to_rialto_message_payload( - millau_sign: &MillauSigningParams, - rialto_sign: &RialtoSigningParams, - rialto_call: &rialto_runtime::Call, - origin: cli::Origins, - user_specified_dispatch_weight: Option>, -) -> millau_runtime::rialto_messages::ToRialtoMessagePayload { - let rialto_call_weight = prepare_call_dispatch_weight( - user_specified_dispatch_weight, - cli::ExplicitOrMaximal::Explicit(rialto_call.get_dispatch_info().weight), - compute_maximal_message_dispatch_weight(bp_rialto::max_extrinsic_weight()), - ); - let millau_sender_public: bp_millau::AccountSigner = millau_sign.signer.public().clone().into(); - let millau_account_id: bp_millau::AccountId = millau_sender_public.into_account(); - let rialto_origin_public = rialto_sign.signer.public(); - - MessagePayload { - spec_version: rialto_runtime::VERSION.spec_version, - weight: rialto_call_weight, - origin: match origin { - cli::Origins::Source => CallOrigin::SourceAccount(millau_account_id), - cli::Origins::Target => { - let digest = millau_runtime::rialto_account_ownership_digest( - &rialto_call, - millau_account_id.clone(), - rialto_runtime::VERSION.spec_version, - ); - - let digest_signature = rialto_sign.signer.sign(&digest); - - CallOrigin::TargetAccount(millau_account_id, rialto_origin_public.into(), digest_signature.into()) - } - }, - call: rialto_call.encode(), - } -} - -fn prepare_call_dispatch_weight( - user_specified_dispatch_weight: Option>, - weight_from_pre_dispatch_call: cli::ExplicitOrMaximal, - maximal_allowed_weight: Weight, -) -> Weight { - match user_specified_dispatch_weight.unwrap_or(weight_from_pre_dispatch_call) { - cli::ExplicitOrMaximal::Explicit(weight) => weight, - cli::ExplicitOrMaximal::Maximal => maximal_allowed_weight, - } -} - -async fn get_fee(fee: Option, f: F) -> Result -where - Fee: Decode, - F: FnOnce() -> R, - R: std::future::Future, E>>, - E: std::fmt::Debug, -{ - match fee { - Some(fee) => Ok(fee), - None => match f().await { - Ok(Some(fee)) => Ok(fee), - Ok(None) => Err("Failed to estimate message fee. Message is too heavy?".into()), - Err(error) => Err(format!("Failed to estimate message fee: {:?}", error)), - }, - } -} - -fn compute_maximal_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { - bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight) -} - -fn compute_maximal_message_arguments_size( - maximal_source_extrinsic_size: u32, - maximal_target_extrinsic_size: u32, -) -> u32 { - // assume that both signed extensions and other arguments fit 1KB - let service_tx_bytes_on_source_chain = 1024; - let maximal_source_extrinsic_size = maximal_source_extrinsic_size - service_tx_bytes_on_source_chain; - let maximal_call_size = - bridge_runtime_common::messages::target::maximal_incoming_message_size(maximal_target_extrinsic_size); - let maximal_call_size = if maximal_call_size > maximal_source_extrinsic_size { - maximal_source_extrinsic_size - } else { - maximal_call_size - }; - - // bytes in Call encoding that are used to encode everything except arguments - let service_bytes = 1 + 1 + 4; - maximal_call_size - service_bytes -} - -impl crate::cli::RialtoSigningParams { - /// Parse CLI parameters into typed signing params. - pub fn parse(self) -> Result { - RialtoSigningParams::from_suri(&self.rialto_signer, self.rialto_signer_password.as_deref()) - .map_err(|e| format!("Failed to parse rialto-signer: {:?}", e)) - } -} - -impl crate::cli::MillauSigningParams { - /// Parse CLI parameters into typed signing params. - pub fn parse(self) -> Result { - MillauSigningParams::from_suri(&self.millau_signer, self.millau_signer_password.as_deref()) - .map_err(|e| format!("Failed to parse millau-signer: {:?}", e)) - } -} - -impl crate::cli::MillauConnectionParams { - /// Convert CLI connection parameters into Millau RPC Client. - pub async fn into_client(self) -> relay_substrate_client::Result { - MillauClient::new(ConnectionParams { - host: self.millau_host, - port: self.millau_port, - }) - .await - } -} -impl crate::cli::RialtoConnectionParams { - /// Convert CLI connection parameters into Rialto RPC Client. - pub async fn into_client(self) -> relay_substrate_client::Result { - RialtoClient::new(ConnectionParams { - host: self.rialto_host, - port: self.rialto_port, - }) - .await - } -} - -impl crate::cli::ToRialtoMessage { - /// Convert CLI call request into runtime `Call` instance. - pub fn into_call(self) -> rialto_runtime::Call { - match self { - cli::ToRialtoMessage::Remark { remark_size } => { - rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(remark_payload( - remark_size, - compute_maximal_message_arguments_size( - bp_millau::max_extrinsic_size(), - bp_rialto::max_extrinsic_size(), - ), - ))) - } - cli::ToRialtoMessage::Transfer { recipient, amount } => { - rialto_runtime::Call::Balances(rialto_runtime::BalancesCall::transfer(recipient, amount)) - } - } - } -} - -impl crate::cli::ToMillauMessage { - /// Convert CLI call request into runtime `Call` instance. - pub fn into_call(self) -> millau_runtime::Call { - match self { - cli::ToMillauMessage::Remark { remark_size } => { - millau_runtime::Call::System(millau_runtime::SystemCall::remark(remark_payload( - remark_size, - compute_maximal_message_arguments_size( - bp_rialto::max_extrinsic_size(), - bp_millau::max_extrinsic_size(), - ), - ))) - } - cli::ToMillauMessage::Transfer { recipient, amount } => { - millau_runtime::Call::Balances(millau_runtime::BalancesCall::transfer(recipient, amount)) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_message_lane::source_chain::TargetHeaderChain; - use sp_core::Pair; - use sp_runtime::traits::{IdentifyAccount, Verify}; - - #[test] - fn millau_signature_is_valid_on_rialto() { - let millau_sign = relay_millau_client::SigningParams::from_suri("//Dave", None).unwrap(); - - let call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![])); - - let millau_public: bp_millau::AccountSigner = millau_sign.signer.public().clone().into(); - let millau_account_id: bp_millau::AccountId = millau_public.into_account(); - - let digest = millau_runtime::rialto_account_ownership_digest( - &call, - millau_account_id, - rialto_runtime::VERSION.spec_version, - ); - - let rialto_signer = relay_rialto_client::SigningParams::from_suri("//Dave", None).unwrap(); - let signature = rialto_signer.signer.sign(&digest); - - assert!(signature.verify(&digest[..], &rialto_signer.signer.public())); - } - - #[test] - fn rialto_signature_is_valid_on_millau() { - let rialto_sign = relay_rialto_client::SigningParams::from_suri("//Dave", None).unwrap(); - - let call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![])); - - let rialto_public: bp_rialto::AccountSigner = rialto_sign.signer.public().clone().into(); - let rialto_account_id: bp_rialto::AccountId = rialto_public.into_account(); - - let digest = rialto_runtime::millau_account_ownership_digest( - &call, - rialto_account_id, - millau_runtime::VERSION.spec_version, - ); - - let millau_signer = relay_millau_client::SigningParams::from_suri("//Dave", None).unwrap(); - let signature = millau_signer.signer.sign(&digest); - - assert!(signature.verify(&digest[..], &millau_signer.signer.public())); - } - - #[test] - fn maximal_rialto_to_millau_message_arguments_size_is_computed_correctly() { - use rialto_runtime::millau_messages::Millau; - - let maximal_remark_size = - compute_maximal_message_arguments_size(bp_rialto::max_extrinsic_size(), bp_millau::max_extrinsic_size()); - - let call: millau_runtime::Call = millau_runtime::SystemCall::remark(vec![42; maximal_remark_size as _]).into(); - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: call.get_dispatch_info().weight, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert_eq!(Millau::verify_message(&payload), Ok(())); - - let call: millau_runtime::Call = - millau_runtime::SystemCall::remark(vec![42; (maximal_remark_size + 1) as _]).into(); - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: call.get_dispatch_info().weight, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert!(Millau::verify_message(&payload).is_err()); - } - - #[test] - fn maximal_size_remark_to_rialto_is_generated_correctly() { - assert!( - bridge_runtime_common::messages::target::maximal_incoming_message_size( - bp_rialto::max_extrinsic_size() - ) > bp_millau::max_extrinsic_size(), - "We can't actually send maximal messages to Rialto from Millau, because Millau extrinsics can't be that large", - ) - } - - #[test] - fn maximal_rialto_to_millau_message_dispatch_weight_is_computed_correctly() { - use rialto_runtime::millau_messages::Millau; - - let maximal_dispatch_weight = compute_maximal_message_dispatch_weight(bp_millau::max_extrinsic_weight()); - let call: millau_runtime::Call = rialto_runtime::SystemCall::remark(vec![]).into(); - - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: maximal_dispatch_weight, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert_eq!(Millau::verify_message(&payload), Ok(())); - - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: maximal_dispatch_weight + 1, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert!(Millau::verify_message(&payload).is_err()); - } - - #[test] - fn maximal_weight_fill_block_to_rialto_is_generated_correctly() { - use millau_runtime::rialto_messages::Rialto; - - let maximal_dispatch_weight = compute_maximal_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); - let call: rialto_runtime::Call = millau_runtime::SystemCall::remark(vec![]).into(); - - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: maximal_dispatch_weight, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert_eq!(Rialto::verify_message(&payload), Ok(())); - - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: maximal_dispatch_weight + 1, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert!(Rialto::verify_message(&payload).is_err()); - } -} diff --git a/polkadot/bridges/relays/substrate/src/millau_headers_to_rialto.rs b/polkadot/bridges/relays/substrate/src/millau_headers_to_rialto.rs deleted file mode 100644 index 8b77e71657..0000000000 --- a/polkadot/bridges/relays/substrate/src/millau_headers_to_rialto.rs +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Millau-to-Rialto headers sync entrypoint. - -use crate::{ - headers_pipeline::{SubstrateHeadersSyncPipeline, SubstrateHeadersToSubstrate}, - MillauClient, RialtoClient, -}; - -use async_trait::async_trait; -use bp_millau::{ - BEST_MILLAU_BLOCKS_METHOD, FINALIZED_MILLAU_BLOCK_METHOD, INCOMPLETE_MILLAU_HEADERS_METHOD, - IS_KNOWN_MILLAU_BLOCK_METHOD, -}; -use headers_relay::sync_types::QueuedHeader; -use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SyncHeader as MillauSyncHeader}; -use relay_rialto_client::{BridgeMillauCall, Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Error as SubstrateError, TransactionSignScheme}; -use sp_core::Pair; -use sp_runtime::Justification; - -/// Millau-to-Rialto headers sync pipeline. -pub(crate) type MillauHeadersToRialto = - SubstrateHeadersToSubstrate; -/// Millau header in-the-queue. -type QueuedMillauHeader = QueuedHeader; - -#[async_trait] -impl SubstrateHeadersSyncPipeline for MillauHeadersToRialto { - const BEST_BLOCK_METHOD: &'static str = BEST_MILLAU_BLOCKS_METHOD; - const FINALIZED_BLOCK_METHOD: &'static str = FINALIZED_MILLAU_BLOCK_METHOD; - const IS_KNOWN_BLOCK_METHOD: &'static str = IS_KNOWN_MILLAU_BLOCK_METHOD; - const INCOMPLETE_HEADERS_METHOD: &'static str = INCOMPLETE_MILLAU_HEADERS_METHOD; - - type SignedTransaction = ::SignedTransaction; - - async fn make_submit_header_transaction( - &self, - header: QueuedMillauHeader, - ) -> Result { - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call = BridgeMillauCall::import_signed_header(header.header().clone().into_inner()).into(); - let transaction = Rialto::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - Ok(transaction) - } - - async fn make_complete_header_transaction( - &self, - id: MillauHeaderId, - completion: Justification, - ) -> Result { - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call = BridgeMillauCall::finalize_header(id.1, completion).into(); - let transaction = Rialto::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - Ok(transaction) - } -} - -/// Run Millau-to-Rialto headers sync. -pub async fn run( - millau_client: MillauClient, - rialto_client: RialtoClient, - rialto_sign: RialtoSigningParams, - metrics_params: Option, -) { - crate::headers_pipeline::run( - MillauHeadersToRialto::new(rialto_client.clone(), rialto_sign), - millau_client, - rialto_client, - metrics_params, - ) - .await; -} diff --git a/polkadot/bridges/relays/substrate/src/rialto_headers_to_millau.rs b/polkadot/bridges/relays/substrate/src/rialto_headers_to_millau.rs deleted file mode 100644 index 3a13c6e148..0000000000 --- a/polkadot/bridges/relays/substrate/src/rialto_headers_to_millau.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto-to-Millau headers sync entrypoint. - -use crate::{ - headers_pipeline::{SubstrateHeadersSyncPipeline, SubstrateHeadersToSubstrate}, - MillauClient, RialtoClient, -}; - -use async_trait::async_trait; -use bp_rialto::{ - BEST_RIALTO_BLOCKS_METHOD, FINALIZED_RIALTO_BLOCK_METHOD, INCOMPLETE_RIALTO_HEADERS_METHOD, - IS_KNOWN_RIALTO_BLOCK_METHOD, -}; -use headers_relay::sync_types::QueuedHeader; -use relay_millau_client::{BridgeRialtoCall, Millau, SigningParams as MillauSigningParams}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SyncHeader as RialtoSyncHeader}; -use relay_substrate_client::{Error as SubstrateError, TransactionSignScheme}; -use sp_core::Pair; -use sp_runtime::Justification; - -/// Rialto-to-Millau headers sync pipeline. -type RialtoHeadersToMillau = SubstrateHeadersToSubstrate; -/// Rialto header in-the-queue. -type QueuedRialtoHeader = QueuedHeader; - -#[async_trait] -impl SubstrateHeadersSyncPipeline for RialtoHeadersToMillau { - const BEST_BLOCK_METHOD: &'static str = BEST_RIALTO_BLOCKS_METHOD; - const FINALIZED_BLOCK_METHOD: &'static str = FINALIZED_RIALTO_BLOCK_METHOD; - const IS_KNOWN_BLOCK_METHOD: &'static str = IS_KNOWN_RIALTO_BLOCK_METHOD; - const INCOMPLETE_HEADERS_METHOD: &'static str = INCOMPLETE_RIALTO_HEADERS_METHOD; - - type SignedTransaction = ::SignedTransaction; - - async fn make_submit_header_transaction( - &self, - header: QueuedRialtoHeader, - ) -> Result { - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call = BridgeRialtoCall::import_signed_header(header.header().clone().into_inner()).into(); - let transaction = Millau::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - Ok(transaction) - } - - async fn make_complete_header_transaction( - &self, - id: RialtoHeaderId, - completion: Justification, - ) -> Result { - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call = BridgeRialtoCall::finalize_header(id.1, completion).into(); - let transaction = Millau::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - Ok(transaction) - } -} - -/// Run Rialto-to-Millau headers sync. -pub async fn run( - rialto_client: RialtoClient, - millau_client: MillauClient, - millau_sign: MillauSigningParams, - metrics_params: Option, -) { - crate::headers_pipeline::run( - RialtoHeadersToMillau::new(millau_client.clone(), millau_sign), - rialto_client, - millau_client, - metrics_params, - ) - .await; -} diff --git a/polkadot/bridges/relays/utils/Cargo.toml b/polkadot/bridges/relays/utils/Cargo.toml index ce6a20bbc4..ff80cab533 100644 --- a/polkadot/bridges/relays/utils/Cargo.toml +++ b/polkadot/bridges/relays/utils/Cargo.toml @@ -10,10 +10,13 @@ ansi_term = "0.12" async-std = "1.6.5" async-trait = "0.1.40" backoff = "0.2" +isahc = "1.2" env_logger = "0.8.2" futures = "0.3.5" +jsonpath_lib = "0.2" log = "0.4.11" num-traits = "0.2" +serde_json = "1.0" sysinfo = "0.15" time = "0.2" diff --git a/polkadot/bridges/relays/utils/src/initialize.rs b/polkadot/bridges/relays/utils/src/initialize.rs index 6b0efb49e4..7d5f66a538 100644 --- a/polkadot/bridges/relays/utils/src/initialize.rs +++ b/polkadot/bridges/relays/utils/src/initialize.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,44 +16,80 @@ //! Relayer initialization functions. -use std::io::Write; +use std::{fmt::Display, io::Write}; /// Initialize relay environment. pub fn initialize_relay() { + initialize_logger(true); +} + +/// Initialize Relay logger instance. +pub fn initialize_logger(with_timestamp: bool) { let mut builder = env_logger::Builder::new(); - - let filters = match std::env::var("RUST_LOG") { - Ok(env_filters) => format!("bridge=info,{}", env_filters), - Err(_) => "bridge=info".into(), - }; - - builder.parse_filters(&filters); - builder.format(move |buf, record| { - writeln!(buf, "{}", { + builder.filter_level(log::LevelFilter::Warn); + builder.filter_module("bridge", log::LevelFilter::Info); + builder.parse_default_env(); + if with_timestamp { + builder.format(move |buf, record| { let timestamp = time::OffsetDateTime::try_now_local() .unwrap_or_else(|_| time::OffsetDateTime::now_utc()) .format("%Y-%m-%d %H:%M:%S %z"); - if cfg!(windows) { - format!("{} {} {} {}", timestamp, record.level(), record.target(), record.args()) + + let log_level = color_level(record.level()); + let log_target = color_target(record.target()); + let timestamp = if cfg!(windows) { + Either::Left(timestamp) } else { - use ansi_term::Colour as Color; - let log_level = match record.level() { - log::Level::Error => Color::Fixed(9).bold().paint(record.level().to_string()), - log::Level::Warn => Color::Fixed(11).bold().paint(record.level().to_string()), - log::Level::Info => Color::Fixed(10).paint(record.level().to_string()), - log::Level::Debug => Color::Fixed(14).paint(record.level().to_string()), - log::Level::Trace => Color::Fixed(12).paint(record.level().to_string()), - }; - format!( - "{} {} {} {}", - Color::Fixed(8).bold().paint(timestamp), - log_level, - Color::Fixed(8).paint(record.target()), - record.args() - ) - } - }) - }); + Either::Right(ansi_term::Colour::Fixed(8).bold().paint(timestamp)) + }; + + writeln!(buf, "{} {} {} {}", timestamp, log_level, log_target, record.args(),) + }); + } else { + builder.format(move |buf, record| { + let log_level = color_level(record.level()); + let log_target = color_target(record.target()); + + writeln!(buf, "{} {} {}", log_level, log_target, record.args(),) + }); + } builder.init(); } + +enum Either { + Left(A), + Right(B), +} +impl Display for Either { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::Left(a) => write!(fmt, "{}", a), + Self::Right(b) => write!(fmt, "{}", b), + } + } +} + +fn color_target(target: &str) -> impl Display + '_ { + if cfg!(windows) { + Either::Left(target) + } else { + Either::Right(ansi_term::Colour::Fixed(8).paint(target)) + } +} + +fn color_level(level: log::Level) -> impl Display { + if cfg!(windows) { + Either::Left(level) + } else { + let s = level.to_string(); + use ansi_term::Colour as Color; + Either::Right(match level { + log::Level::Error => Color::Fixed(9).bold().paint(s), + log::Level::Warn => Color::Fixed(11).bold().paint(s), + log::Level::Info => Color::Fixed(10).paint(s), + log::Level::Debug => Color::Fixed(14).paint(s), + log::Level::Trace => Color::Fixed(12).paint(s), + }) + } +} diff --git a/polkadot/bridges/relays/utils/src/lib.rs b/polkadot/bridges/relays/utils/src/lib.rs index f787e8763a..446e00cd23 100644 --- a/polkadot/bridges/relays/utils/src/lib.rs +++ b/polkadot/bridges/relays/utils/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -16,6 +16,8 @@ //! Utilities used by different relays. +pub use relay_loop::{relay_loop, relay_metrics}; + use backoff::{backoff::Backoff, ExponentialBackoff}; use futures::future::FutureExt; use std::time::Duration; diff --git a/polkadot/bridges/relays/utils/src/metrics.rs b/polkadot/bridges/relays/utils/src/metrics.rs index f38d1bda3a..c0eaeae337 100644 --- a/polkadot/bridges/relays/utils/src/metrics.rs +++ b/polkadot/bridges/relays/utils/src/metrics.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,155 +14,149 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -pub use substrate_prometheus_endpoint::{register, Counter, CounterVec, Gauge, GaugeVec, Opts, Registry, F64, U64}; +pub use float_json_value::FloatJsonValueMetric; +pub use global::GlobalMetrics; +pub use substrate_prometheus_endpoint::{ + prometheus::core::{Atomic, Collector}, + register, Counter, CounterVec, Gauge, GaugeVec, Opts, PrometheusError, Registry, F64, U64, +}; -use async_std::sync::{Arc, Mutex}; -use std::net::SocketAddr; -use substrate_prometheus_endpoint::init_prometheus; -use sysinfo::{ProcessExt, RefreshKind, System, SystemExt}; +use async_trait::async_trait; +use std::{fmt::Debug, time::Duration}; -/// Prometheus endpoint MetricsParams. +mod float_json_value; +mod global; + +/// Unparsed address that needs to be used to expose Prometheus metrics. #[derive(Debug, Clone)] -pub struct MetricsParams { +pub struct MetricsAddress { /// Serve HTTP requests at given host. pub host: String, /// Serve HTTP requests at given port. pub port: u16, } -/// Metrics API. -pub trait Metrics { - /// Register metrics in the registry. - fn register(&self, registry: &Registry) -> Result<(), String>; -} - -/// Global Prometheus metrics. +/// Prometheus endpoint MetricsParams. #[derive(Debug, Clone)] -pub struct GlobalMetrics { - system: Arc>, - system_average_load: GaugeVec, - process_cpu_usage_percentage: Gauge, - process_memory_usage_bytes: Gauge, +pub struct MetricsParams { + /// Interface and TCP port to be used when exposing Prometheus metrics. + pub address: Option, + /// Metrics registry. May be `Some(_)` if several components share the same endpoint. + pub registry: Option, + /// Prefix that must be used in metric names. + pub metrics_prefix: Option, } -/// Start Prometheus endpoint with given metrics registry. -pub fn start( - prefix: String, - params: Option, - global_metrics: &GlobalMetrics, - extra_metrics: &impl Metrics, -) { - let params = match params { - Some(params) => params, - None => return, - }; +/// Metrics API. +pub trait Metrics: Clone + Send + Sync + 'static {} - assert!(!prefix.is_empty(), "Metrics prefix can not be empty"); +impl Metrics for T {} - let do_start = move || { - let prometheus_socket_addr = SocketAddr::new( - params - .host - .parse() - .map_err(|err| format!("Invalid Prometheus host {}: {}", params.host, err))?, - params.port, - ); - let metrics_registry = - Registry::new_custom(Some(prefix), None).expect("only fails if prefix is empty; prefix is not empty; qed"); - global_metrics.register(&metrics_registry)?; - extra_metrics.register(&metrics_registry)?; +/// Standalone metrics API. +/// +/// Metrics of this kind know how to update themselves, so we may just spawn and forget the +/// asynchronous self-update task. +#[async_trait] +pub trait StandaloneMetrics: Metrics { + /// Update metric values. + async fn update(&self); + + /// Metrics update interval. + fn update_interval(&self) -> Duration; + + /// Spawn the self update task that will keep update metric value at given intervals. + fn spawn(self) { async_std::task::spawn(async move { - init_prometheus(prometheus_socket_addr, metrics_registry) - .await - .map_err(|err| format!("Error starting Prometheus endpoint: {}", err)) + let update_interval = self.update_interval(); + loop { + self.update().await; + async_std::task::sleep(update_interval).await; + } }); - - Ok(()) - }; - - let result: Result<(), String> = do_start(); - if let Err(err) = result { - log::warn!( - target: "bridge", - "Failed to expose metrics: {}", - err, - ); } } -impl Default for MetricsParams { +impl Default for MetricsAddress { fn default() -> Self { - MetricsParams { + MetricsAddress { host: "127.0.0.1".into(), port: 9616, } } } -impl Metrics for GlobalMetrics { - fn register(&self, registry: &Registry) -> Result<(), String> { - register(self.system_average_load.clone(), registry).map_err(|e| e.to_string())?; - register(self.process_cpu_usage_percentage.clone(), registry).map_err(|e| e.to_string())?; - register(self.process_memory_usage_bytes.clone(), registry).map_err(|e| e.to_string())?; - Ok(()) +impl MetricsParams { + /// Creates metrics params so that metrics are not exposed. + pub fn disabled() -> Self { + MetricsParams { + address: None, + registry: None, + metrics_prefix: None, + } + } + + /// Do not expose metrics. + pub fn disable(mut self) -> Self { + self.address = None; + self + } + + /// Set prefix to use in metric names. + pub fn metrics_prefix(mut self, prefix: String) -> Self { + self.metrics_prefix = Some(prefix); + self } } -impl Default for GlobalMetrics { - fn default() -> Self { - GlobalMetrics { - system: Arc::new(Mutex::new(System::new_with_specifics(RefreshKind::everything()))), - system_average_load: GaugeVec::new(Opts::new("system_average_load", "System load average"), &["over"]) - .expect("metric is static and thus valid; qed"), - process_cpu_usage_percentage: Gauge::new("process_cpu_usage_percentage", "Process CPU usage") - .expect("metric is static and thus valid; qed"), - process_memory_usage_bytes: Gauge::new( - "process_memory_usage_bytes", - "Process memory (resident set size) usage", - ) - .expect("metric is static and thus valid; qed"), +impl From> for MetricsParams { + fn from(address: Option) -> Self { + MetricsParams { + address, + registry: None, + metrics_prefix: None, } } } -impl GlobalMetrics { - /// Update metrics. - pub async fn update(&self) { - // update system-wide metrics - let mut system = self.system.lock().await; - let load = system.get_load_average(); - self.system_average_load.with_label_values(&["1min"]).set(load.one); - self.system_average_load.with_label_values(&["5min"]).set(load.five); - self.system_average_load.with_label_values(&["15min"]).set(load.fifteen); - - // update process-related metrics - let pid = sysinfo::get_current_pid().expect( - "only fails where pid is unavailable (os=unknown || arch=wasm32);\ - relay is not supposed to run in such MetricsParamss;\ - qed", - ); - let is_process_refreshed = system.refresh_process(pid); - match (is_process_refreshed, system.get_process(pid)) { - (true, Some(process_info)) => { - let cpu_usage = process_info.cpu_usage() as f64; - let memory_usage = process_info.memory() * 1024; - log::trace!( - target: "bridge-metrics", - "Refreshed process metrics: CPU={}, memory={}", - cpu_usage, - memory_usage, - ); - - self.process_cpu_usage_percentage - .set(if cpu_usage.is_finite() { cpu_usage } else { 0f64 }); - self.process_memory_usage_bytes.set(memory_usage); - } - _ => { - log::warn!( - target: "bridge", - "Failed to refresh process information. Metrics may show obsolete values", - ); - } - } +/// Returns metric name optionally prefixed with given prefix. +pub fn metric_name(prefix: Option<&str>, name: &str) -> String { + if let Some(prefix) = prefix { + format!("{}_{}", prefix, name) + } else { + name.into() } } + +/// Set value of gauge metric. +/// +/// If value is `Ok(None)` or `Err(_)`, metric would have default value. +pub fn set_gauge_value, E: Debug>(gauge: &Gauge, value: Result, E>) { + gauge.set(match value { + Ok(Some(value)) => { + log::trace!( + target: "bridge-metrics", + "Updated value of metric '{:?}': {:?}", + gauge.desc().first().map(|d| &d.fq_name), + value, + ); + value + } + Ok(None) => { + log::warn!( + target: "bridge-metrics", + "Failed to update metric '{:?}': value is empty", + gauge.desc().first().map(|d| &d.fq_name), + ); + Default::default() + } + Err(error) => { + log::warn!( + target: "bridge-metrics", + "Failed to update metric '{:?}': {:?}", + gauge.desc().first().map(|d| &d.fq_name), + error, + ); + Default::default() + } + }) +} diff --git a/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs b/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs new file mode 100644 index 0000000000..d61f9cac7c --- /dev/null +++ b/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs @@ -0,0 +1,121 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, F64}; + +use async_trait::async_trait; +use std::time::Duration; + +/// Value update interval. +const UPDATE_INTERVAL: Duration = Duration::from_secs(60); + +/// Metric that represents float value received from HTTP service as float gauge. +#[derive(Debug, Clone)] +pub struct FloatJsonValueMetric { + url: String, + json_path: String, + metric: Gauge, +} + +impl FloatJsonValueMetric { + /// Create new metric instance with given name and help. + pub fn new( + registry: &Registry, + prefix: Option<&str>, + url: String, + json_path: String, + name: String, + help: String, + ) -> Result { + Ok(FloatJsonValueMetric { + url, + json_path, + metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + }) + } + + /// Read value from HTTP service. + async fn read_value(&self) -> Result { + use isahc::{AsyncReadResponseExt, HttpClient, Request}; + + fn map_isahc_err(err: impl std::fmt::Display) -> String { + format!("Failed to fetch token price from remote server: {}", err) + } + + let request = Request::get(&self.url) + .header("Accept", "application/json") + .body(()) + .map_err(map_isahc_err)?; + let raw_response = HttpClient::new() + .map_err(map_isahc_err)? + .send_async(request) + .await + .map_err(map_isahc_err)? + .text() + .await + .map_err(map_isahc_err)?; + + parse_service_response(&self.json_path, &raw_response) + } +} + +#[async_trait] +impl StandaloneMetrics for FloatJsonValueMetric { + fn update_interval(&self) -> Duration { + UPDATE_INTERVAL + } + + async fn update(&self) { + crate::metrics::set_gauge_value(&self.metric, self.read_value().await.map(Some)); + } +} + +/// Parse HTTP service response. +fn parse_service_response(json_path: &str, response: &str) -> Result { + let json = serde_json::from_str(response).map_err(|err| { + format!( + "Failed to parse HTTP service response: {:?}. Response: {:?}", + err, response, + ) + })?; + + let mut selector = jsonpath_lib::selector(&json); + let maybe_selected_value = selector(json_path).map_err(|err| { + format!( + "Failed to select value from response: {:?}. Response: {:?}", + err, response, + ) + })?; + let selected_value = maybe_selected_value + .first() + .and_then(|v| v.as_f64()) + .ok_or_else(|| format!("Missing required value from response: {:?}", response,))?; + + Ok(selected_value) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_service_response_works() { + assert_eq!( + parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":433.05}}"#).map_err(drop), + Ok(433.05), + ); + } +} diff --git a/polkadot/bridges/relays/utils/src/metrics/global.rs b/polkadot/bridges/relays/utils/src/metrics/global.rs new file mode 100644 index 0000000000..d212480510 --- /dev/null +++ b/polkadot/bridges/relays/utils/src/metrics/global.rs @@ -0,0 +1,111 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Global system-wide Prometheus metrics exposed by relays. + +use crate::metrics::{ + metric_name, register, Gauge, GaugeVec, Opts, PrometheusError, Registry, StandaloneMetrics, F64, U64, +}; + +use async_std::sync::{Arc, Mutex}; +use async_trait::async_trait; +use std::time::Duration; +use sysinfo::{ProcessExt, RefreshKind, System, SystemExt}; + +/// Global metrics update interval. +const UPDATE_INTERVAL: Duration = Duration::from_secs(10); + +/// Global Prometheus metrics. +#[derive(Debug, Clone)] +pub struct GlobalMetrics { + system: Arc>, + system_average_load: GaugeVec, + process_cpu_usage_percentage: Gauge, + process_memory_usage_bytes: Gauge, +} + +impl GlobalMetrics { + /// Create and register global metrics. + pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + Ok(GlobalMetrics { + system: Arc::new(Mutex::new(System::new_with_specifics(RefreshKind::everything()))), + system_average_load: register( + GaugeVec::new( + Opts::new(metric_name(prefix, "system_average_load"), "System load average"), + &["over"], + )?, + registry, + )?, + process_cpu_usage_percentage: register( + Gauge::new(metric_name(prefix, "process_cpu_usage_percentage"), "Process CPU usage")?, + registry, + )?, + process_memory_usage_bytes: register( + Gauge::new( + metric_name(prefix, "process_memory_usage_bytes"), + "Process memory (resident set size) usage", + )?, + registry, + )?, + }) + } +} + +#[async_trait] +impl StandaloneMetrics for GlobalMetrics { + async fn update(&self) { + // update system-wide metrics + let mut system = self.system.lock().await; + let load = system.get_load_average(); + self.system_average_load.with_label_values(&["1min"]).set(load.one); + self.system_average_load.with_label_values(&["5min"]).set(load.five); + self.system_average_load.with_label_values(&["15min"]).set(load.fifteen); + + // update process-related metrics + let pid = sysinfo::get_current_pid().expect( + "only fails where pid is unavailable (os=unknown || arch=wasm32);\ + relay is not supposed to run in such MetricsParamss;\ + qed", + ); + let is_process_refreshed = system.refresh_process(pid); + match (is_process_refreshed, system.get_process(pid)) { + (true, Some(process_info)) => { + let cpu_usage = process_info.cpu_usage() as f64; + let memory_usage = process_info.memory() * 1024; + log::trace!( + target: "bridge-metrics", + "Refreshed process metrics: CPU={}, memory={}", + cpu_usage, + memory_usage, + ); + + self.process_cpu_usage_percentage + .set(if cpu_usage.is_finite() { cpu_usage } else { 0f64 }); + self.process_memory_usage_bytes.set(memory_usage); + } + _ => { + log::warn!( + target: "bridge-metrics", + "Failed to refresh process information. Metrics may show obsolete values", + ); + } + } + } + + fn update_interval(&self) -> Duration { + UPDATE_INTERVAL + } +} diff --git a/polkadot/bridges/relays/utils/src/relay_loop.rs b/polkadot/bridges/relays/utils/src/relay_loop.rs index d750358eda..8790b0913e 100644 --- a/polkadot/bridges/relays/utils/src/relay_loop.rs +++ b/polkadot/bridges/relays/utils/src/relay_loop.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify @@ -14,10 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . +use crate::metrics::{Metrics, MetricsAddress, MetricsParams, PrometheusError, StandaloneMetrics}; use crate::{FailedClient, MaybeConnectionError}; use async_trait::async_trait; -use std::{fmt::Debug, future::Future, time::Duration}; +use std::{fmt::Debug, future::Future, net::SocketAddr, time::Duration}; +use substrate_prometheus_endpoint::{init_prometheus, Registry}; /// Default pause between reconnect attempts. pub const RECONNECT_DELAY: Duration = Duration::from_secs(10); @@ -32,38 +34,105 @@ pub trait Client: Clone + Send + Sync { async fn reconnect(&mut self) -> Result<(), Self::Error>; } -/// Run relay loop. -/// -/// This function represents an outer loop, which in turn calls provided `loop_run` function to do -/// actual job. When `loop_run` returns, this outer loop reconnects to failed client (source, -/// target or both) and calls `loop_run` again. -pub fn run( - reconnect_delay: Duration, - mut source_client: SC, - mut target_client: TC, - loop_run: R, -) where - R: Fn(SC, TC) -> F, - F: Future>, -{ - let mut local_pool = futures::executor::LocalPool::new(); +/// Returns generic loop that may be customized and started. +pub fn relay_loop(source_client: SC, target_client: TC) -> Loop { + Loop { + reconnect_delay: RECONNECT_DELAY, + source_client, + target_client, + loop_metric: None, + } +} - local_pool.run_until(async move { +/// Returns generic relay loop metrics that may be customized and used in one or several relay loops. +pub fn relay_metrics(prefix: Option, params: MetricsParams) -> LoopMetrics<(), (), ()> { + LoopMetrics { + relay_loop: Loop { + reconnect_delay: RECONNECT_DELAY, + source_client: (), + target_client: (), + loop_metric: None, + }, + address: params.address, + registry: params.registry.unwrap_or_else(|| create_metrics_registry(prefix)), + metrics_prefix: params.metrics_prefix, + loop_metric: None, + } +} + +/// Generic relay loop. +pub struct Loop { + reconnect_delay: Duration, + source_client: SC, + target_client: TC, + loop_metric: Option, +} + +/// Relay loop metrics builder. +pub struct LoopMetrics { + relay_loop: Loop, + address: Option, + registry: Registry, + metrics_prefix: Option, + loop_metric: Option, +} + +impl Loop { + /// Customize delay between reconnect attempts. + pub fn reconnect_delay(mut self, reconnect_delay: Duration) -> Self { + self.reconnect_delay = reconnect_delay; + self + } + + /// Start building loop metrics using given prefix. + pub fn with_metrics(self, prefix: Option, params: MetricsParams) -> LoopMetrics { + LoopMetrics { + relay_loop: Loop { + reconnect_delay: self.reconnect_delay, + source_client: self.source_client, + target_client: self.target_client, + loop_metric: None, + }, + address: params.address, + registry: params.registry.unwrap_or_else(|| create_metrics_registry(prefix)), + metrics_prefix: params.metrics_prefix, + loop_metric: None, + } + } + + /// Run relay loop. + /// + /// This function represents an outer loop, which in turn calls provided `run_loop` function to do + /// actual job. When `run_loop` returns, this outer loop reconnects to failed client (source, + /// target or both) and calls `run_loop` again. + pub async fn run(mut self, run_loop: R) -> Result<(), String> + where + R: Fn(SC, TC, Option) -> F, + F: Future>, + SC: Client, + TC: Client, + LM: Clone, + { loop { - let result = loop_run(source_client.clone(), target_client.clone()).await; + let result = run_loop( + self.source_client.clone(), + self.target_client.clone(), + self.loop_metric.clone(), + ) + .await; match result { Ok(()) => break, Err(failed_client) => loop { - async_std::task::sleep(reconnect_delay).await; + async_std::task::sleep(self.reconnect_delay).await; if failed_client == FailedClient::Both || failed_client == FailedClient::Source { - match source_client.reconnect().await { + match self.source_client.reconnect().await { Ok(()) => (), Err(error) => { log::warn!( target: "bridge", "Failed to reconnect to source client. Going to retry in {}s: {:?}", - reconnect_delay.as_secs(), + self.reconnect_delay.as_secs(), error, ); continue; @@ -71,13 +140,13 @@ pub fn run( } } if failed_client == FailedClient::Both || failed_client == FailedClient::Target { - match target_client.reconnect().await { + match self.target_client.reconnect().await { Ok(()) => (), Err(error) => { log::warn!( target: "bridge", "Failed to reconnect to target client. Going to retry in {}s: {:?}", - reconnect_delay.as_secs(), + self.reconnect_delay.as_secs(), error, ); continue; @@ -91,5 +160,97 @@ pub fn run( log::debug!(target: "bridge", "Restarting relay loop"); } - }); + + Ok(()) + } +} + +impl LoopMetrics { + /// Add relay loop metrics. + /// + /// Loop metrics will be passed to the loop callback. + pub fn loop_metric( + self, + create_metric: impl FnOnce(&Registry, Option<&str>) -> Result, + ) -> Result, String> { + let loop_metric = create_metric(&self.registry, self.metrics_prefix.as_deref()).map_err(|e| e.to_string())?; + + Ok(LoopMetrics { + relay_loop: self.relay_loop, + address: self.address, + registry: self.registry, + metrics_prefix: self.metrics_prefix, + loop_metric: Some(loop_metric), + }) + } + + /// Add standalone metrics. + pub fn standalone_metric( + self, + create_metric: impl FnOnce(&Registry, Option<&str>) -> Result, + ) -> Result { + // since standalone metrics are updating themselves, we may just ignore the fact that the same + // standalone metric is exposed by several loops && only spawn single metric + match create_metric(&self.registry, self.metrics_prefix.as_deref()) { + Ok(standalone_metrics) => standalone_metrics.spawn(), + Err(PrometheusError::AlreadyReg) => (), + Err(e) => return Err(e.to_string()), + } + + Ok(self) + } + + /// Convert into `MetricsParams` structure so that metrics registry may be extended later. + pub fn into_params(self) -> MetricsParams { + MetricsParams { + address: self.address, + registry: Some(self.registry), + metrics_prefix: self.metrics_prefix, + } + } + + /// Expose metrics using address passed at creation. + /// + /// If passed `address` is `None`, metrics are not exposed. + pub async fn expose(self) -> Result, String> { + if let Some(address) = self.address { + let socket_addr = SocketAddr::new( + address.host.parse().map_err(|err| { + format!( + "Invalid host {} is used to expose Prometheus metrics: {}", + address.host, err, + ) + })?, + address.port, + ); + + let registry = self.registry; + async_std::task::spawn(async move { + let result = init_prometheus(socket_addr, registry).await; + log::trace!( + target: "bridge-metrics", + "Prometheus endpoint has exited with result: {:?}", + result, + ); + }); + } + + Ok(Loop { + reconnect_delay: self.relay_loop.reconnect_delay, + source_client: self.relay_loop.source_client, + target_client: self.relay_loop.target_client, + loop_metric: self.loop_metric, + }) + } +} + +/// Create new registry with global metrics. +fn create_metrics_registry(prefix: Option) -> Registry { + match prefix { + Some(prefix) => { + assert!(!prefix.is_empty(), "Metrics prefix can not be empty"); + Registry::new_custom(Some(prefix), None).expect("only fails if prefix is empty; prefix is not empty; qed") + } + None => Registry::new(), + } } diff --git a/polkadot/bridges/scripts/license_header b/polkadot/bridges/scripts/license_header index b989aaa1cf..f9b301209b 100644 --- a/polkadot/bridges/scripts/license_header +++ b/polkadot/bridges/scripts/license_header @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2019-2021 Parity Technologies (UK) Ltd. // This file is part of Parity Bridges Common. // Parity Bridges Common is free software: you can redistribute it and/or modify diff --git a/polkadot/bridges/scripts/send-message.sh b/polkadot/bridges/scripts/send-message-from-millau-rialto.sh similarity index 66% rename from polkadot/bridges/scripts/send-message.sh rename to polkadot/bridges/scripts/send-message-from-millau-rialto.sh index f7ceac13c0..10fe24087f 100755 --- a/polkadot/bridges/scripts/send-message.sh +++ b/polkadot/bridges/scripts/send-message-from-millau-rialto.sh @@ -6,25 +6,27 @@ # we have (to make sure the message relays are running), but remove the message # generator service. From there you may submit messages manually using this script. +MILLAU_PORT="${RIALTO_PORT:-9945}" + case "$1" in remark) RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message millau-to-rialto \ - --millau-host localhost \ - --millau-port 20044 \ - --millau-signer //Dave \ - --rialto-signer //Dave \ + ./target/debug/substrate-relay send-message MillauToRialto \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --source-signer //Alice \ + --target-signer //Bob \ --lane 00000000 \ --origin Target \ remark \ ;; transfer) RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message millau-to-rialto \ - --millau-host localhost \ - --millau-port 20044 \ - --millau-signer //Dave \ - --rialto-signer //Dave \ + ./target/debug/substrate-relay send-message MillauToRialto \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --source-signer //Alice \ + --target-signer //Bob \ --lane 00000000 \ --origin Target \ transfer \ diff --git a/polkadot/bridges/scripts/send-message-from-rialto-millau.sh b/polkadot/bridges/scripts/send-message-from-rialto-millau.sh new file mode 100755 index 0000000000..52d19e3af8 --- /dev/null +++ b/polkadot/bridges/scripts/send-message-from-rialto-millau.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Used for manually sending a message to a running network. +# +# You could for example spin up a full network using the Docker Compose files +# we have (to make sure the message relays are running), but remove the message +# generator service. From there you may submit messages manually using this script. + +RIALTO_PORT="${RIALTO_PORT:-9944}" + +case "$1" in + remark) + RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ + ./target/debug/substrate-relay send-message RialtoToMillau \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --target-signer //Alice \ + --source-signer //Bob \ + --lane 00000000 \ + --origin Target \ + remark \ + ;; + transfer) + RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ + ./target/debug/substrate-relay send-message RialtoToMillau \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --target-signer //Alice \ + --source-signer //Bob \ + --lane 00000000 \ + --origin Target \ + transfer \ + --amount 100000000000000 \ + --recipient 5DZvVvd1udr61vL7Xks17TFQ4fi9NiagYLaBobnbPCP14ewA \ + ;; + *) echo "A message type is require. Supported messages: remark, transfer."; exit 1;; +esac diff --git a/polkadot/bridges/scripts/update-weights.sh b/polkadot/bridges/scripts/update-weights.sh index cbf20730b4..0ac773e8d7 100755 --- a/polkadot/bridges/scripts/update-weights.sh +++ b/polkadot/bridges/scripts/update-weights.sh @@ -1,15 +1,31 @@ #!/bin/sh +# +# Runtime benchmarks for the `pallet-bridge-messages` and `pallet-bridge-grandpa` pallets. +# +# Run this script from root of the repo. -# Run this script from root of the repo +set -eux -cargo run --manifest-path=bin/rialto/node/Cargo.toml --release --features=runtime-benchmarks -- benchmark \ - --chain=local \ +time cargo run --release -p rialto-bridge-node --features=runtime-benchmarks -- benchmark \ + --chain=dev \ --steps=50 \ --repeat=20 \ - --pallet=pallet_message_lane \ + --pallet=pallet_bridge_messages \ --extrinsic=* \ --execution=wasm \ --wasm-execution=Compiled \ --heap-pages=4096 \ - --output=./modules/message-lane/src/weights.rs \ + --output=./modules/messages/src/weights.rs \ + --template=./.maintain/rialto-weight-template.hbs + +time cargo run --release -p rialto-bridge-node --features=runtime-benchmarks -- benchmark \ + --chain=dev \ + --steps=50 \ + --repeat=20 \ + --pallet=pallet_bridge_grandpa \ + --extrinsic=* \ + --execution=wasm \ + --wasm-execution=Compiled \ + --heap-pages=4096 \ + --output=./modules/grandpa/src/weights.rs \ --template=./.maintain/rialto-weight-template.hbs