mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 21:57:58 +00:00
Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cec992f80a | |||
| b53550e43e | |||
| 9491263857 | |||
| b41c0e61c0 | |||
| 9d1c71756f | |||
| 8b0a0c3518 | |||
| 94b04c0189 | |||
| 2d3602aaed | |||
| d38e6d419d | |||
| 62478ee2f9 | |||
| dda369c8b5 | |||
| 08c1572870 | |||
| cd6b7969ac | |||
| 78ac7ee381 | |||
| 3edaebdcae | |||
| 66feb36b4e | |||
| cc753a1a2c | |||
| 31dfd67569 | |||
| a6e4932a08 | |||
| 06c2e023a9 | |||
| 347dcb4488 |
@@ -0,0 +1,141 @@
|
||||
name: "Run Revive Differential Tests"
|
||||
description: "Builds and runs revive-differential-tests (retester) from this repo against the caller's Polkadot SDK."
|
||||
|
||||
inputs:
|
||||
# Setup arguments & environment
|
||||
polkadot-sdk-path:
|
||||
description: "The path of the polkadot-sdk that should be compiled for the tests to run against."
|
||||
required: false
|
||||
default: "."
|
||||
type: string
|
||||
cargo-command:
|
||||
description: "The cargo command to use in compilations and running of tests (e.g., forklift cargo)."
|
||||
required: false
|
||||
default: "cargo"
|
||||
type: string
|
||||
revive-differential-tests-ref:
|
||||
description: "The branch, tag or SHA to checkout for the revive-differential-tests."
|
||||
required: false
|
||||
default: "main"
|
||||
type: string
|
||||
resolc-version:
|
||||
description: "The version of resolc to install and use in tests."
|
||||
required: false
|
||||
default: "0.5.0"
|
||||
type: string
|
||||
use-compilation-caches:
|
||||
description: "Controls if the compilation caches will be used for the test run or not."
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
# Test Execution Arguments
|
||||
platform:
|
||||
description: "The identifier of the platform to run the tests on (e.g., geth-evm-solc, revive-dev-node-revm-solc)"
|
||||
required: true
|
||||
type: string
|
||||
polkadot-omnichain-node-chain-spec-path:
|
||||
description: "The path of the chain-spec of the chain we're spawning'. This is only required if the polkadot-omni-node is one of the selected platforms."
|
||||
required: false
|
||||
type: string
|
||||
polkadot-omnichain-node-parachain-id:
|
||||
description: "The id of the parachain to spawn with the polkadot-omni-node. This is only required if the polkadot-omni-node is one of the selected platforms."
|
||||
type: number
|
||||
required: false
|
||||
expectations-file-path:
|
||||
description: "Path to the expectations file to use to compare against."
|
||||
type: string
|
||||
required: false
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Checkout the Differential Tests Repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
repository: paritytech/revive-differential-tests
|
||||
ref: ${{ inputs['revive-differential-tests-ref'] }}
|
||||
path: revive-differential-tests
|
||||
submodules: recursive
|
||||
- name: Installing the Latest Resolc
|
||||
shell: bash
|
||||
if: ${{ runner.os == 'Linux' && runner.arch == 'X64' }}
|
||||
run: |
|
||||
VERSION="${{ inputs['resolc-version'] }}"
|
||||
ASSET_URL="https://github.com/paritytech/revive/releases/download/v$VERSION/resolc-x86_64-unknown-linux-musl"
|
||||
echo "Downloading resolc v$VERSION from $ASSET_URL"
|
||||
curl -Lsf --show-error -o resolc "$ASSET_URL"
|
||||
chmod +x resolc
|
||||
./resolc --version
|
||||
- name: Installing Retester
|
||||
shell: bash
|
||||
run: ${{ inputs['cargo-command'] }} install --locked --path revive-differential-tests/crates/core
|
||||
- name: Creating a workdir for retester
|
||||
shell: bash
|
||||
run: mkdir workdir
|
||||
- name: Downloading & Initializing the compilation caches
|
||||
shell: bash
|
||||
if: ${{ inputs['use-compilation-caches'] == true }}
|
||||
run: |
|
||||
curl -fL --retry 3 --retry-all-errors --connect-timeout 10 -o cache.tar.gz "https://github.com/paritytech/revive-differential-tests/releases/download/compilation-caches-v1.1/cache.tar.gz"
|
||||
tar -zxf cache.tar.gz -C ./workdir > /dev/null 2>&1
|
||||
- name: Building the dependencies from the Polkadot SDK
|
||||
shell: bash
|
||||
run: |
|
||||
${{ inputs['cargo-command'] }} build --locked --profile release -p pallet-revive-eth-rpc -p revive-dev-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
|
||||
${{ inputs['cargo-command'] }} build --locked --profile release --bin polkadot-omni-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
|
||||
- name: Installing retester
|
||||
shell: bash
|
||||
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/core
|
||||
- name: Installing report-processor
|
||||
shell: bash
|
||||
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/report-processor
|
||||
- name: Running the Differential Tests
|
||||
shell: bash
|
||||
run: |
|
||||
OMNI_ARGS=()
|
||||
if [[ -n "${{ inputs['polkadot-omnichain-node-parachain-id'] }}" ]]; then
|
||||
OMNI_ARGS+=(
|
||||
--polkadot-omni-node.parachain-id
|
||||
"${{ inputs['polkadot-omnichain-node-parachain-id'] }}"
|
||||
)
|
||||
fi
|
||||
if [[ -n "${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}" ]]; then
|
||||
OMNI_ARGS+=(
|
||||
--polkadot-omni-node.chain-spec-path
|
||||
"${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}"
|
||||
)
|
||||
fi
|
||||
|
||||
retester test \
|
||||
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/simple \
|
||||
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/complex \
|
||||
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/translated_semantic_tests \
|
||||
--platform ${{ inputs['platform'] }} \
|
||||
--report.file-name report.json \
|
||||
--concurrency.number-of-nodes 10 \
|
||||
--concurrency.number-of-threads 10 \
|
||||
--concurrency.number-of-concurrent-tasks 100 \
|
||||
--working-directory ./workdir \
|
||||
--revive-dev-node.consensus manual-seal-200 \
|
||||
--revive-dev-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/revive-dev-node \
|
||||
--eth-rpc.path ${{ inputs['polkadot-sdk-path'] }}/target/release/eth-rpc \
|
||||
--polkadot-omni-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/polkadot-omni-node \
|
||||
--resolc.path ./resolc \
|
||||
"${OMNI_ARGS[@]}" || true
|
||||
- name: Generate the expectation file
|
||||
shell: bash
|
||||
run: report-processor generate-expectations-file --report-path ./workdir/report.json --output-path ./workdir/expectations.json --remove-prefix ./revive-differential-tests/resolc-compiler-tests
|
||||
- name: Upload the Report to the CI
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ inputs['platform'] }}-report.json
|
||||
path: ./workdir/report.json
|
||||
- name: Upload the Report to the CI
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ inputs['platform'] }}.json
|
||||
path: ./workdir/expectations.json
|
||||
- name: Check Expectations
|
||||
shell: bash
|
||||
if: ${{ inputs['expectations-file-path'] != '' }}
|
||||
run: report-processor compare-expectation-files --base-expectation-path ${{ inputs['expectations-file-path'] }} --other-expectation-path ./workdir/expectations.json
|
||||
+138
-170
@@ -18,136 +18,95 @@ env:
|
||||
POLKADOT_VERSION: polkadot-stable2506-2
|
||||
|
||||
jobs:
|
||||
cache-polkadot:
|
||||
name: Build and cache Polkadot binaries on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-24.04, macos-14]
|
||||
|
||||
machete:
|
||||
name: Check for Unneeded Dependencies
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
SCCACHE_GHA_ENABLED: "true"
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
steps:
|
||||
- name: Checkout repo and submodules
|
||||
- name: Checkout This Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install dependencies (Linux)
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
||||
rustup target add wasm32-unknown-unknown
|
||||
rustup component add rust-src
|
||||
|
||||
- name: Install dependencies (macOS)
|
||||
if: matrix.os == 'macos-14'
|
||||
run: |
|
||||
brew install protobuf
|
||||
rustup target add wasm32-unknown-unknown
|
||||
rustup component add rust-src
|
||||
|
||||
- name: Cache binaries
|
||||
id: cache
|
||||
uses: actions/cache@v3
|
||||
- name: Run Sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- name: Install the Rust Toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Install the Cargo Make Binary
|
||||
uses: davidB/rust-cargo-make@v1
|
||||
- name: Run Cargo Machete
|
||||
run: cargo make machete
|
||||
check-fmt:
|
||||
name: Check Formatting
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
SCCACHE_GHA_ENABLED: "true"
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
steps:
|
||||
- name: Checkout This Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/revive-dev-node
|
||||
~/.cargo/bin/eth-rpc
|
||||
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}-with-dev-node
|
||||
|
||||
- name: Build revive-dev-node
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cd polkadot-sdk
|
||||
cargo install --locked --force --profile=production --path substrate/frame/revive/dev-node/node --bin revive-dev-node
|
||||
|
||||
- name: Build eth-rpc
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cd polkadot-sdk
|
||||
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
|
||||
|
||||
- name: Cache downloaded Polkadot binaries
|
||||
id: cache-polkadot
|
||||
uses: actions/cache@v3
|
||||
submodules: recursive
|
||||
- name: Run Sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- name: Install the Rust Toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Install the Cargo Make Binary
|
||||
uses: davidB/rust-cargo-make@v1
|
||||
- name: Run Cargo Formatter
|
||||
run: cargo make fmt-check
|
||||
check-clippy:
|
||||
name: Check Clippy Lints
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
SCCACHE_GHA_ENABLED: "true"
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
steps:
|
||||
- name: Checkout This Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: |
|
||||
~/polkadot-cache/polkadot
|
||||
~/polkadot-cache/polkadot-execute-worker
|
||||
~/polkadot-cache/polkadot-prepare-worker
|
||||
~/polkadot-cache/polkadot-parachain
|
||||
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||
|
||||
- name: Download Polkadot binaries on macOS
|
||||
if: matrix.os == 'macos-14' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
mkdir -p ~/polkadot-cache
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-aarch64-apple-darwin -o ~/polkadot-cache/polkadot
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-execute-worker
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-prepare-worker
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-parachain
|
||||
chmod +x ~/polkadot-cache/*
|
||||
|
||||
- name: Download Polkadot binaries on Ubuntu
|
||||
if: matrix.os == 'ubuntu-24.04' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
mkdir -p ~/polkadot-cache
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot -o ~/polkadot-cache/polkadot
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker -o ~/polkadot-cache/polkadot-execute-worker
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker -o ~/polkadot-cache/polkadot-prepare-worker
|
||||
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain -o ~/polkadot-cache/polkadot-parachain
|
||||
chmod +x ~/polkadot-cache/*
|
||||
|
||||
ci:
|
||||
name: CI on ${{ matrix.os }}
|
||||
submodules: recursive
|
||||
- name: Run Sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- name: Install the Rust Toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
- name: Install the Cargo Make Binary
|
||||
uses: davidB/rust-cargo-make@v1
|
||||
- name: Run Cargo Clippy
|
||||
run: cargo make clippy
|
||||
test:
|
||||
name: Unit Tests
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: cache-polkadot
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-24.04, macos-14]
|
||||
|
||||
env:
|
||||
SCCACHE_GHA_ENABLED: "true"
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
- name: Checkout This Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Restore binaries from cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/revive-dev-node
|
||||
~/.cargo/bin/eth-rpc
|
||||
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}-with-dev-node
|
||||
|
||||
- name: Restore downloaded Polkadot binaries from cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/polkadot-cache/polkadot
|
||||
~/polkadot-cache/polkadot-execute-worker
|
||||
~/polkadot-cache/polkadot-prepare-worker
|
||||
~/polkadot-cache/polkadot-parachain
|
||||
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||
|
||||
- name: Install Polkadot binaries
|
||||
run: |
|
||||
sudo cp ~/polkadot-cache/polkadot /usr/local/bin/
|
||||
sudo cp ~/polkadot-cache/polkadot-execute-worker /usr/local/bin/
|
||||
sudo cp ~/polkadot-cache/polkadot-prepare-worker /usr/local/bin/
|
||||
sudo cp ~/polkadot-cache/polkadot-parachain /usr/local/bin/
|
||||
sudo chmod +x /usr/local/bin/polkadot*
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
- name: Run Sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- name: Install the Rust Toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
rustflags: ""
|
||||
|
||||
- name: Add wasm32 target and formatting
|
||||
run: |
|
||||
rustup target add wasm32-unknown-unknown
|
||||
rustup component add rust-src rustfmt clippy
|
||||
|
||||
target: "wasm32-unknown-unknown"
|
||||
components: "rust-src,rust-std"
|
||||
- name: Install the Cargo Make Binary
|
||||
uses: davidB/rust-cargo-make@v1
|
||||
- name: Caching Step
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/eth-rpc
|
||||
~/.cargo/bin/revive-dev-node
|
||||
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
||||
- name: Install Geth on Ubuntu
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: |
|
||||
@@ -161,7 +120,7 @@ jobs:
|
||||
# Ubuntu. Eventually, we found out that the last version of geth that worked in our CI was
|
||||
# version 1.15.11. Thus, this is the version that we want to use in CI. The PPA sadly does
|
||||
# not have historic versions of Geth and therefore we need to resort to downloading pre
|
||||
# built binaries for Geth and the surrounding tools which is what the following parts of
|
||||
# built binaries for Geth and the surrounding tools which is what the following parts of
|
||||
# the script do.
|
||||
|
||||
sudo apt-get install -y wget ca-certificates tar
|
||||
@@ -180,7 +139,6 @@ jobs:
|
||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-x86_64-unknown-linux-musl -o resolc
|
||||
chmod +x resolc
|
||||
sudo mv resolc /usr/local/bin
|
||||
|
||||
- name: Install Geth on macOS
|
||||
if: matrix.os == 'macos-14'
|
||||
run: |
|
||||
@@ -192,69 +150,79 @@ jobs:
|
||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-universal-apple-darwin -o resolc
|
||||
chmod +x resolc
|
||||
sudo mv resolc /usr/local/bin
|
||||
|
||||
- name: Install Kurtosis on macOS
|
||||
if: matrix.os == 'macos-14'
|
||||
run: brew install kurtosis-tech/tap/kurtosis-cli
|
||||
|
||||
- name: Install Kurtosis on Ubuntu
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: |
|
||||
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
||||
sudo apt update
|
||||
sudo apt install kurtosis-cli
|
||||
|
||||
- name: Install cargo-machete
|
||||
uses: clechasseur/rs-cargo@v2
|
||||
- name: Run Tests
|
||||
run: cargo make test
|
||||
cache-polkadot:
|
||||
name: Build and Cache Polkadot Binaries on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-24.04, macos-14]
|
||||
env:
|
||||
SCCACHE_GHA_ENABLED: "true"
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
RUSTFLAGS: "-Awarnings"
|
||||
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
||||
steps:
|
||||
- name: Caching Step
|
||||
id: cache-step
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
command: install
|
||||
args: cargo-machete@0.7.0
|
||||
- name: Machete
|
||||
run: cargo machete crates
|
||||
path: |
|
||||
~/.cargo/bin/eth-rpc
|
||||
~/.cargo/bin/revive-dev-node
|
||||
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
||||
- name: Checkout the Polkadot SDK Repository
|
||||
uses: actions/checkout@v4
|
||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
||||
with:
|
||||
repository: paritytech/polkadot-sdk
|
||||
ref: ${{ env.POLKADOT_SDK_COMMIT_HASH }}
|
||||
submodules: recursive
|
||||
- name: Run Sccache
|
||||
uses: mozilla-actions/sccache-action@v0.0.9
|
||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
||||
- name: Install the Rust Toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
||||
with:
|
||||
target: "wasm32-unknown-unknown"
|
||||
components: "rust-src"
|
||||
toolchain: "1.90.0"
|
||||
|
||||
- name: Format
|
||||
run: make format
|
||||
|
||||
- name: Clippy
|
||||
run: make clippy
|
||||
|
||||
- name: Check revive-dev-node version
|
||||
run: revive-dev-node --version
|
||||
|
||||
- name: Check eth-rpc version
|
||||
run: eth-rpc --version
|
||||
|
||||
- name: Check resolc version
|
||||
run: resolc --version
|
||||
|
||||
- name: Check polkadot version
|
||||
run: polkadot --version
|
||||
|
||||
- name: Check polkadot-parachain version
|
||||
run: polkadot-parachain --version
|
||||
|
||||
- name: Check polkadot-execute-worker version
|
||||
run: polkadot-execute-worker --version
|
||||
|
||||
- name: Check polkadot-prepare-worker version
|
||||
run: polkadot-prepare-worker --version
|
||||
|
||||
- name: Test Formatting
|
||||
run: make format
|
||||
|
||||
- name: Test Clippy
|
||||
run: make clippy
|
||||
|
||||
- name: Test Machete
|
||||
run: make machete
|
||||
|
||||
- name: Unit Tests
|
||||
if: matrix.os == 'ubuntu-24.04'
|
||||
run: cargo test --workspace -- --nocapture
|
||||
|
||||
# We can't install docker in the MacOS image used in CI and therefore we need to skip the
|
||||
# Kurtosis and lighthouse related tests when running the CI on MacOS.
|
||||
- name: Unit Tests
|
||||
if: matrix.os == 'macos-14'
|
||||
- name: Install dependencies (Linux)
|
||||
if: matrix.os == 'ubuntu-24.04' && steps.cache-step.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cargo test --workspace -- --nocapture --skip lighthouse_geth::tests::
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
||||
- name: Install dependencies (macOS)
|
||||
if: matrix.os == 'macos-14' && steps.cache-step.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
brew install protobuf llvm
|
||||
LLVM_PREFIX="$(brew --prefix llvm)"
|
||||
echo "LDFLAGS=-L${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
||||
echo "CPPFLAGS=-I${LLVM_PREFIX}/include" >> "$GITHUB_ENV"
|
||||
echo "CMAKE_PREFIX_PATH=${LLVM_PREFIX}" >> "$GITHUB_ENV"
|
||||
echo "LIBCLANG_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
||||
echo "DYLD_FALLBACK_LIBRARY_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
||||
echo "${LLVM_PREFIX}/bin" >> "$GITHUB_PATH"
|
||||
- name: Build Polkadot Dependencies
|
||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
cargo build \
|
||||
--locked \
|
||||
--profile production \
|
||||
--package revive-dev-node \
|
||||
--package pallet-revive-eth-rpc;
|
||||
mv ./target/production/revive-dev-node ~/.cargo/bin
|
||||
mv ./target/production/eth-rpc ~/.cargo/bin
|
||||
chmod +x ~/.cargo/bin/*
|
||||
|
||||
+3
-5
@@ -3,17 +3,15 @@
|
||||
.DS_Store
|
||||
node_modules
|
||||
/*.json
|
||||
*.sh
|
||||
|
||||
# We do not want to commit any log files that we produce from running the code locally so this is
|
||||
# added to the .gitignore file.
|
||||
*.log
|
||||
|
||||
profile.json.gz
|
||||
workdir
|
||||
workdir*
|
||||
|
||||
!/schema.json
|
||||
!/dev-genesis.json
|
||||
|
||||
# Ignore all shell scripts except for the `run_tests.sh` script
|
||||
*.sh
|
||||
!run_tests.sh
|
||||
!/scripts/*
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
[submodule "polkadot-sdk"]
|
||||
path = polkadot-sdk
|
||||
url = https://github.com/paritytech/polkadot-sdk.git
|
||||
[submodule "resolc-compiler-tests"]
|
||||
path = resolc-compiler-tests
|
||||
url = https://github.com/paritytech/resolc-compiler-tests
|
||||
|
||||
Generated
+1268
-1097
File diff suppressed because it is too large
Load Diff
+11
-25
@@ -21,7 +21,9 @@ revive-dt-node-interaction = { version = "0.1.0", path = "crates/node-interactio
|
||||
revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
|
||||
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
||||
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
||||
revive-dt-report-processor = { version = "0.1.0", path = "crates/report-processor" }
|
||||
|
||||
alloy = { version = "1.4.1", features = ["full", "genesis", "json-rpc"] }
|
||||
ansi_term = "0.12.1"
|
||||
anyhow = "1.0"
|
||||
bson = { version = "2.15.0" }
|
||||
@@ -72,36 +74,20 @@ indexmap = { version = "2.10.0", default-features = false }
|
||||
itertools = { version = "0.14.0" }
|
||||
|
||||
# revive compiler
|
||||
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||
revive-solc-json-interface = { version = "0.5.0" }
|
||||
revive-common = { version = "0.3.0" }
|
||||
revive-differential = { version = "0.3.0" }
|
||||
|
||||
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
|
||||
|
||||
[workspace.dependencies.alloy]
|
||||
version = "1.0.37"
|
||||
default-features = false
|
||||
features = [
|
||||
"json-abi",
|
||||
"providers",
|
||||
"provider-ws",
|
||||
"provider-ipc",
|
||||
"provider-http",
|
||||
"provider-debug-api",
|
||||
"reqwest",
|
||||
"rpc-types",
|
||||
"signer-local",
|
||||
"std",
|
||||
"network",
|
||||
"serde",
|
||||
"rpc-types-eth",
|
||||
"genesis",
|
||||
"sol-types",
|
||||
]
|
||||
|
||||
[profile.bench]
|
||||
inherits = "release"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
lto = true
|
||||
|
||||
[profile.production]
|
||||
inherits = "release"
|
||||
codegen-units = 1
|
||||
lto = true
|
||||
|
||||
[workspace.lints.clippy]
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
.PHONY: format clippy test machete
|
||||
|
||||
format:
|
||||
cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
cargo clippy --all-features --workspace -- --deny warnings
|
||||
|
||||
machete:
|
||||
cargo install cargo-machete
|
||||
cargo machete crates
|
||||
|
||||
test: format clippy machete
|
||||
cargo test --workspace -- --nocapture
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
[config]
|
||||
default_to_workspace = false
|
||||
|
||||
[tasks.machete]
|
||||
command = "cargo"
|
||||
args = ["machete", "crates"]
|
||||
install_crate = "cargo-machete"
|
||||
|
||||
[tasks.fmt-check]
|
||||
command = "cargo"
|
||||
args = ["fmt", "--all", "--", "--check"]
|
||||
install_crate = "rustfmt"
|
||||
|
||||
[tasks.clippy]
|
||||
command = "cargo"
|
||||
args = ["clippy", "--all-features", "--workspace", "--", "--deny", "warnings"]
|
||||
install_crate = "clippy"
|
||||
|
||||
[tasks.test]
|
||||
command = "cargo"
|
||||
args = ["test", "--workspace", "--", "--nocapture"]
|
||||
Binary file not shown.
@@ -19,7 +19,6 @@ semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
tokio = { workspace = true, default-features = false, features = ["time"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
mod poll;
|
||||
|
||||
pub use poll::*;
|
||||
@@ -1,72 +0,0 @@
|
||||
use std::ops::ControlFlow;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context as _, Result, anyhow};
|
||||
|
||||
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
|
||||
|
||||
/// A function that polls for a fallible future for some period of time and errors if it fails to
|
||||
/// get a result after polling.
|
||||
///
|
||||
/// Given a future that returns a [`Result<ControlFlow<O, ()>>`], this function calls the future
|
||||
/// repeatedly (with some wait period) until the future returns a [`ControlFlow::Break`] or until it
|
||||
/// returns an [`Err`] in which case the function stops polling and returns the error.
|
||||
///
|
||||
/// If the future keeps returning [`ControlFlow::Continue`] and fails to return a [`Break`] within
|
||||
/// the permitted polling duration then this function returns an [`Err`]
|
||||
///
|
||||
/// [`Break`]: ControlFlow::Break
|
||||
/// [`Continue`]: ControlFlow::Continue
|
||||
pub async fn poll<F, O>(
|
||||
polling_duration: Duration,
|
||||
polling_wait_behavior: PollingWaitBehavior,
|
||||
mut future: impl FnMut() -> F,
|
||||
) -> Result<O>
|
||||
where
|
||||
F: Future<Output = Result<ControlFlow<O, ()>>>,
|
||||
{
|
||||
let mut retries = 0;
|
||||
let mut total_wait_duration = Duration::ZERO;
|
||||
let max_allowed_wait_duration = polling_duration;
|
||||
|
||||
loop {
|
||||
if total_wait_duration >= max_allowed_wait_duration {
|
||||
break Err(anyhow!(
|
||||
"Polling failed after {} retries and a total of {:?} of wait time",
|
||||
retries,
|
||||
total_wait_duration
|
||||
));
|
||||
}
|
||||
|
||||
match future()
|
||||
.await
|
||||
.context("Polled future returned an error during polling loop")?
|
||||
{
|
||||
ControlFlow::Continue(()) => {
|
||||
let next_wait_duration = match polling_wait_behavior {
|
||||
PollingWaitBehavior::Constant(duration) => duration,
|
||||
PollingWaitBehavior::ExponentialBackoff => {
|
||||
Duration::from_secs(2u64.pow(retries))
|
||||
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION)
|
||||
}
|
||||
};
|
||||
let next_wait_duration =
|
||||
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
|
||||
total_wait_duration += next_wait_duration;
|
||||
retries += 1;
|
||||
|
||||
tokio::time::sleep(next_wait_duration).await;
|
||||
}
|
||||
ControlFlow::Break(output) => {
|
||||
break Ok(output);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
|
||||
pub enum PollingWaitBehavior {
|
||||
Constant(Duration),
|
||||
#[default]
|
||||
ExponentialBackoff,
|
||||
}
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
pub mod cached_fs;
|
||||
pub mod fs;
|
||||
pub mod futures;
|
||||
pub mod iterators;
|
||||
pub mod macros;
|
||||
pub mod types;
|
||||
|
||||
@@ -39,6 +39,12 @@ pub enum PlatformIdentifier {
|
||||
ZombienetPolkavmResolc,
|
||||
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
|
||||
ZombienetRevmSolc,
|
||||
/// A polkadot-omni-chain based node with a custom runtime with the PolkaVM backend and the
|
||||
/// resolc compiler.
|
||||
PolkadotOmniNodePolkavmResolc,
|
||||
/// A polkadot-omni-chain based node with a custom runtime with the REVM backend and the solc
|
||||
/// compiler.
|
||||
PolkadotOmniNodeRevmSolc,
|
||||
}
|
||||
|
||||
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
||||
@@ -95,6 +101,8 @@ pub enum NodeIdentifier {
|
||||
ReviveDevNode,
|
||||
/// A zombienet spawned nodes
|
||||
Zombienet,
|
||||
/// The polkadot-omni-node.
|
||||
PolkadotOmniNode,
|
||||
}
|
||||
|
||||
/// An enum representing the identifiers of the supported VMs.
|
||||
|
||||
@@ -23,6 +23,18 @@ pub struct Mode {
|
||||
pub version: Option<semver::VersionReq>,
|
||||
}
|
||||
|
||||
impl Ord for Mode {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.to_string().cmp(&other.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for Mode {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Mode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.pipeline.fmt(f)?;
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
use std::{fmt::Display, path::PathBuf, str::FromStr};
|
||||
use std::{
|
||||
fmt::Display,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use anyhow::{Context as _, bail};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::types::Mode;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum ParsedTestSpecifier {
|
||||
/// All of the test cases in the file should be ran across all of the specified modes
|
||||
FileOrDirectory {
|
||||
@@ -34,6 +39,22 @@ pub enum ParsedTestSpecifier {
|
||||
},
|
||||
}
|
||||
|
||||
impl ParsedTestSpecifier {
|
||||
pub fn metadata_path(&self) -> &Path {
|
||||
match self {
|
||||
ParsedTestSpecifier::FileOrDirectory {
|
||||
metadata_or_directory_file_path: metadata_file_path,
|
||||
}
|
||||
| ParsedTestSpecifier::Case {
|
||||
metadata_file_path, ..
|
||||
}
|
||||
| ParsedTestSpecifier::CaseWithMode {
|
||||
metadata_file_path, ..
|
||||
} => metadata_file_path,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ParsedTestSpecifier {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
@@ -131,3 +152,22 @@ impl TryFrom<&str> for ParsedTestSpecifier {
|
||||
value.parse()
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ParsedTestSpecifier {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
self.to_string().serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ParsedTestSpecifier {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let string = String::deserialize(deserializer)?;
|
||||
string.parse().map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,9 +12,13 @@ use dashmap::DashMap;
|
||||
use revive_dt_common::types::VersionOrRequirement;
|
||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||
use revive_solc_json_interface::{
|
||||
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
||||
SolcStandardJsonOutput,
|
||||
PolkaVMDefaultHeapMemorySize, PolkaVMDefaultStackMemorySize, SolcStandardJsonInput,
|
||||
SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||
SolcStandardJsonInputSettingsLibraries, SolcStandardJsonInputSettingsMetadata,
|
||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsPolkaVM,
|
||||
SolcStandardJsonInputSettingsPolkaVMMemory, SolcStandardJsonInputSettingsSelection,
|
||||
SolcStandardJsonOutput, standard_json::input::settings::optimizer::Optimizer,
|
||||
standard_json::input::settings::optimizer::details::Details,
|
||||
};
|
||||
use tracing::{Span, field::display};
|
||||
|
||||
@@ -25,6 +29,7 @@ use crate::{
|
||||
use alloy::json_abi::JsonAbi;
|
||||
use anyhow::{Context as _, Result};
|
||||
use semver::Version;
|
||||
use std::collections::BTreeSet;
|
||||
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||
|
||||
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
|
||||
@@ -37,6 +42,10 @@ struct ResolcInner {
|
||||
solc: Solc,
|
||||
/// Path to the `resolc` executable
|
||||
resolc_path: PathBuf,
|
||||
/// The PVM heap size in bytes.
|
||||
pvm_heap_size: u32,
|
||||
/// The PVM stack size in bytes.
|
||||
pvm_stack_size: u32,
|
||||
}
|
||||
|
||||
impl Resolc {
|
||||
@@ -63,10 +72,35 @@ impl Resolc {
|
||||
Self(Arc::new(ResolcInner {
|
||||
solc,
|
||||
resolc_path: resolc_configuration.path.clone(),
|
||||
pvm_heap_size: resolc_configuration
|
||||
.heap_size
|
||||
.unwrap_or(PolkaVMDefaultHeapMemorySize),
|
||||
pvm_stack_size: resolc_configuration
|
||||
.stack_size
|
||||
.unwrap_or(PolkaVMDefaultStackMemorySize),
|
||||
}))
|
||||
})
|
||||
.clone())
|
||||
}
|
||||
|
||||
fn polkavm_settings(&self) -> SolcStandardJsonInputSettingsPolkaVM {
|
||||
SolcStandardJsonInputSettingsPolkaVM::new(
|
||||
Some(SolcStandardJsonInputSettingsPolkaVMMemory::new(
|
||||
Some(self.0.pvm_heap_size),
|
||||
Some(self.0.pvm_stack_size),
|
||||
)),
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
fn inject_polkavm_settings(&self, input: &SolcStandardJsonInput) -> Result<serde_json::Value> {
|
||||
let mut input_value = serde_json::to_value(input)
|
||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||
if let Some(settings) = input_value.get_mut("settings") {
|
||||
settings["polkavm"] = serde_json::to_value(self.polkavm_settings()).unwrap();
|
||||
}
|
||||
Ok(input_value)
|
||||
}
|
||||
}
|
||||
|
||||
impl SolidityCompiler for Resolc {
|
||||
@@ -121,8 +155,8 @@ impl SolidityCompiler for Resolc {
|
||||
.collect(),
|
||||
settings: SolcStandardJsonInputSettings {
|
||||
evm_version,
|
||||
libraries: Some(
|
||||
libraries
|
||||
libraries: SolcStandardJsonInputSettingsLibraries {
|
||||
inner: libraries
|
||||
.into_iter()
|
||||
.map(|(source_code, libraries_map)| {
|
||||
(
|
||||
@@ -136,23 +170,29 @@ impl SolidityCompiler for Resolc {
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
remappings: None,
|
||||
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
||||
},
|
||||
remappings: BTreeSet::<String>::new(),
|
||||
output_selection: SolcStandardJsonInputSettingsSelection::new_required(),
|
||||
via_ir: Some(true),
|
||||
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||
optimization
|
||||
.unwrap_or(ModeOptimizerSetting::M0)
|
||||
.optimizations_enabled(),
|
||||
None,
|
||||
&Version::new(0, 0, 0),
|
||||
false,
|
||||
Optimizer::default_mode(),
|
||||
Details::disabled(&Version::new(0, 0, 0)),
|
||||
),
|
||||
metadata: None,
|
||||
polkavm: None,
|
||||
polkavm: self.polkavm_settings(),
|
||||
metadata: SolcStandardJsonInputSettingsMetadata::default(),
|
||||
detect_missing_libraries: false,
|
||||
},
|
||||
};
|
||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||
// Manually inject polkavm settings since it's marked skip_serializing in the upstream crate
|
||||
let std_input_json = self.inject_polkavm_settings(&input)?;
|
||||
|
||||
Span::current().record(
|
||||
"json_in",
|
||||
display(serde_json::to_string(&std_input_json).unwrap()),
|
||||
);
|
||||
|
||||
let path = &self.0.resolc_path;
|
||||
let mut command = AsyncCommand::new(path);
|
||||
@@ -181,8 +221,9 @@ impl SolidityCompiler for Resolc {
|
||||
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
||||
|
||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||
let serialized_input = serde_json::to_vec(&input)
|
||||
let serialized_input = serde_json::to_vec(&std_input_json)
|
||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||
|
||||
stdin_pipe
|
||||
.write_all(&serialized_input)
|
||||
.await
|
||||
@@ -208,14 +249,18 @@ impl SolidityCompiler for Resolc {
|
||||
anyhow::bail!("Compilation failed with an error: {message}");
|
||||
}
|
||||
|
||||
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||
String::from_utf8_lossy(&stderr)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse resolc standard JSON output")?;
|
||||
let parsed: SolcStandardJsonOutput = {
|
||||
let mut deserializer = serde_json::Deserializer::from_slice(&stdout);
|
||||
deserializer.disable_recursion_limit();
|
||||
serde::de::Deserialize::deserialize(&mut deserializer)
|
||||
.map_err(|e| {
|
||||
anyhow::anyhow!(
|
||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||
String::from_utf8_lossy(&stderr)
|
||||
)
|
||||
})
|
||||
.context("Failed to parse resolc standard JSON output")?
|
||||
};
|
||||
|
||||
tracing::debug!(
|
||||
output = %serde_json::to_string(&parsed).unwrap(),
|
||||
@@ -224,7 +269,7 @@ impl SolidityCompiler for Resolc {
|
||||
|
||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||
// errors instead of returning the compiler output that might contain errors.
|
||||
for error in parsed.errors.iter().flatten() {
|
||||
for error in parsed.errors.iter() {
|
||||
if error.severity == "error" {
|
||||
tracing::error!(
|
||||
?error,
|
||||
@@ -236,12 +281,12 @@ impl SolidityCompiler for Resolc {
|
||||
}
|
||||
}
|
||||
|
||||
let Some(contracts) = parsed.contracts else {
|
||||
if parsed.contracts.is_empty() {
|
||||
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
||||
};
|
||||
}
|
||||
|
||||
let mut compiler_output = CompilerOutput::default();
|
||||
for (source_path, contracts) in contracts.into_iter() {
|
||||
for (source_path, contracts) in parsed.contracts.into_iter() {
|
||||
let src_for_msg = source_path.clone();
|
||||
let source_path = PathBuf::from(source_path)
|
||||
.canonicalize()
|
||||
@@ -249,15 +294,22 @@ impl SolidityCompiler for Resolc {
|
||||
|
||||
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||
for (contract_name, contract_information) in contracts.into_iter() {
|
||||
let bytecode = contract_information
|
||||
let Some(bytecode) = contract_information
|
||||
.evm
|
||||
.and_then(|evm| evm.bytecode.clone())
|
||||
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
||||
else {
|
||||
tracing::debug!(
|
||||
"Skipping abstract or interface contract {} - no bytecode",
|
||||
contract_name
|
||||
);
|
||||
continue;
|
||||
};
|
||||
let abi = {
|
||||
let metadata = contract_information
|
||||
.metadata
|
||||
.as_ref()
|
||||
.context("No metadata found for the contract")?;
|
||||
let metadata = &contract_information.metadata;
|
||||
if metadata.is_null() {
|
||||
anyhow::bail!("No metadata found for the contract");
|
||||
}
|
||||
|
||||
let solc_metadata_str = match metadata {
|
||||
serde_json::Value::String(solc_metadata_str) => {
|
||||
solc_metadata_str.as_str()
|
||||
|
||||
@@ -143,6 +143,17 @@ impl AsRef<ReviveDevNodeConfiguration> for Context {
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<PolkadotOmnichainNodeConfiguration> for Context {
|
||||
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
|
||||
match self {
|
||||
Self::Test(context) => context.as_ref().as_ref(),
|
||||
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||
Self::ExportGenesis(context) => context.as_ref().as_ref(),
|
||||
Self::ExportJsonSchema => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<EthRpcConfiguration> for Context {
|
||||
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||
match self {
|
||||
@@ -228,6 +239,7 @@ pub struct TestExecutionContext {
|
||||
#[arg(
|
||||
short = 'p',
|
||||
long = "platform",
|
||||
id = "platforms",
|
||||
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
|
||||
)]
|
||||
pub platforms: Vec<PlatformIdentifier>,
|
||||
@@ -277,6 +289,10 @@ pub struct TestExecutionContext {
|
||||
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||
|
||||
/// Configuration parameters for the Polkadot Omnichain Node.
|
||||
#[clap(flatten, next_help_heading = "Polkadot Omnichain Node Configuration")]
|
||||
pub polkadot_omnichain_node_configuration: PolkadotOmnichainNodeConfiguration,
|
||||
|
||||
/// Configuration parameters for the Eth Rpc.
|
||||
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
|
||||
pub eth_rpc_configuration: EthRpcConfiguration,
|
||||
@@ -375,6 +391,23 @@ pub struct BenchmarkingContext {
|
||||
#[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)]
|
||||
pub default_repetition_count: usize,
|
||||
|
||||
/// This transaction controls whether the benchmarking driver should await for transactions to
|
||||
/// be included in a block before moving on to the next transaction in the sequence or not.
|
||||
///
|
||||
/// This behavior is useful in certain cases and not so useful in others. For example, in some
|
||||
/// repetition block if there's some kind of relationship between txs n and n+1 (for example a
|
||||
/// mint then a transfer) then you would want to wait for the minting to happen and then move on
|
||||
/// to the transfers. On the other hand, if there's no relationship between the transactions n
|
||||
/// and n+1 (e.g., mint and another mint of a different token) then awaiting the first mint to
|
||||
/// be included in a block might not seem necessary.
|
||||
///
|
||||
/// By default, this behavior is set to false to allow the benchmarking framework to saturate
|
||||
/// the node's mempool as quickly as possible. However, as explained above, there are cases
|
||||
/// where it's needed and certain workloads where failure to provide this argument would lead to
|
||||
/// inaccurate results.
|
||||
#[arg(long)]
|
||||
pub await_transaction_inclusion: bool,
|
||||
|
||||
/// Configuration parameters for the corpus files to use.
|
||||
#[clap(flatten, next_help_heading = "Corpus Configuration")]
|
||||
pub corpus_configuration: CorpusConfiguration,
|
||||
@@ -403,6 +436,10 @@ pub struct BenchmarkingContext {
|
||||
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||
|
||||
/// Configuration parameters for the Polkadot Omnichain Node.
|
||||
#[clap(flatten, next_help_heading = "Polkadot Omnichain Node Configuration")]
|
||||
pub polkadot_omnichain_node_configuration: PolkadotOmnichainNodeConfiguration,
|
||||
|
||||
/// Configuration parameters for the Eth Rpc.
|
||||
#[clap(flatten, next_help_heading = "Eth RPC Configuration")]
|
||||
pub eth_rpc_configuration: EthRpcConfiguration,
|
||||
@@ -481,6 +518,10 @@ pub struct ExportGenesisContext {
|
||||
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||
|
||||
/// Configuration parameters for the Polkadot Omnichain Node.
|
||||
#[clap(flatten, next_help_heading = "Polkadot Omnichain Node Configuration")]
|
||||
pub polkadot_omnichain_node_configuration: PolkadotOmnichainNodeConfiguration,
|
||||
|
||||
/// Configuration parameters for the wallet.
|
||||
#[clap(flatten, next_help_heading = "Wallet Configuration")]
|
||||
pub wallet_configuration: WalletConfiguration,
|
||||
@@ -540,6 +581,12 @@ impl AsRef<ReviveDevNodeConfiguration> for TestExecutionContext {
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<PolkadotOmnichainNodeConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
|
||||
&self.polkadot_omnichain_node_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<EthRpcConfiguration> for TestExecutionContext {
|
||||
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||
&self.eth_rpc_configuration
|
||||
@@ -636,6 +683,12 @@ impl AsRef<ReviveDevNodeConfiguration> for BenchmarkingContext {
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<PolkadotOmnichainNodeConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
|
||||
&self.polkadot_omnichain_node_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<EthRpcConfiguration> for BenchmarkingContext {
|
||||
fn as_ref(&self) -> &EthRpcConfiguration {
|
||||
&self.eth_rpc_configuration
|
||||
@@ -696,6 +749,12 @@ impl AsRef<ReviveDevNodeConfiguration> for ExportGenesisContext {
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<PolkadotOmnichainNodeConfiguration> for ExportGenesisContext {
|
||||
fn as_ref(&self) -> &PolkadotOmnichainNodeConfiguration {
|
||||
&self.polkadot_omnichain_node_configuration
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<WalletConfiguration> for ExportGenesisContext {
|
||||
fn as_ref(&self) -> &WalletConfiguration {
|
||||
&self.wallet_configuration
|
||||
@@ -741,6 +800,17 @@ pub struct ResolcConfiguration {
|
||||
/// provided in the user's $PATH.
|
||||
#[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// Specifies the PVM heap size in bytes.
|
||||
///
|
||||
/// If unspecified, the revive compiler default is used
|
||||
#[clap(id = "resolc.heap-size", long = "resolc.heap-size")]
|
||||
pub heap_size: Option<u32>,
|
||||
/// Specifies the PVM stack size in bytes.
|
||||
///
|
||||
/// If unspecified, the revive compiler default is used
|
||||
#[clap(id = "resolc.stack-size", long = "resolc.stack-size")]
|
||||
pub stack_size: Option<u32>,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for Polkadot Parachain.
|
||||
@@ -852,6 +922,54 @@ pub struct ReviveDevNodeConfiguration {
|
||||
pub existing_rpc_url: Vec<String>,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the polkadot-omni-node.
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct PolkadotOmnichainNodeConfiguration {
|
||||
/// Specifies the path of the polkadot-omni-node to be used by the tool.
|
||||
///
|
||||
/// If this is not specified, then the tool assumes that it should use the polkadot-omni-node
|
||||
/// binary that's provided in the user's $PATH.
|
||||
#[clap(
|
||||
id = "polkadot-omni-node.path",
|
||||
long = "polkadot-omni-node.path",
|
||||
default_value = "polkadot-omni-node"
|
||||
)]
|
||||
pub path: PathBuf,
|
||||
|
||||
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||
#[clap(
|
||||
id = "polkadot-omni-node.start-timeout-ms",
|
||||
long = "polkadot-omni-node.start-timeout-ms",
|
||||
default_value = "90000",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub start_timeout_ms: Duration,
|
||||
|
||||
/// Defines how often blocks will be sealed by the node in milliseconds.
|
||||
#[clap(
|
||||
id = "polkadot-omni-node.block-time-ms",
|
||||
long = "polkadot-omni-node.block-time-ms",
|
||||
default_value = "200",
|
||||
value_parser = parse_duration
|
||||
)]
|
||||
pub block_time: Duration,
|
||||
|
||||
/// The path of the chainspec of the chain that we're spawning
|
||||
#[clap(
|
||||
id = "polkadot-omni-node.chain-spec-path",
|
||||
long = "polkadot-omni-node.chain-spec-path"
|
||||
)]
|
||||
pub chain_spec_path: Option<PathBuf>,
|
||||
|
||||
/// The ID of the parachain that the polkadot-omni-node will spawn. This argument is required if
|
||||
/// the polkadot-omni-node is one of the selected platforms for running the tests or benchmarks.
|
||||
#[clap(
|
||||
id = "polkadot-omni-node.parachain-id",
|
||||
long = "polkadot-omni-node.parachain-id"
|
||||
)]
|
||||
pub parachain_id: Option<usize>,
|
||||
}
|
||||
|
||||
/// A set of configuration parameters for the ETH RPC.
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
pub struct EthRpcConfiguration {
|
||||
@@ -1006,6 +1124,10 @@ pub struct ReportConfiguration {
|
||||
/// Controls if the compiler output is included in the final report.
|
||||
#[clap(long = "report.include-compiler-output")]
|
||||
pub include_compiler_output: bool,
|
||||
|
||||
/// The filename to use for the report.
|
||||
#[clap(long = "report.file-name")]
|
||||
pub file_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||
|
||||
@@ -37,6 +37,7 @@ schemars = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
subxt = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
ops::ControlFlow,
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
@@ -13,6 +12,7 @@ use alloy::{
|
||||
json_abi::JsonAbi,
|
||||
network::{Ethereum, TransactionBuilder},
|
||||
primitives::{Address, TxHash, U256},
|
||||
providers::Provider,
|
||||
rpc::types::{
|
||||
TransactionReceipt, TransactionRequest,
|
||||
trace::geth::{
|
||||
@@ -22,12 +22,9 @@ use alloy::{
|
||||
},
|
||||
};
|
||||
use anyhow::{Context as _, Result, bail};
|
||||
use futures::TryFutureExt;
|
||||
use futures::{FutureExt as _, TryFutureExt};
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::{
|
||||
futures::{PollingWaitBehavior, poll},
|
||||
types::PrivateKeyAllocator,
|
||||
};
|
||||
use revive_dt_common::types::PrivateKeyAllocator;
|
||||
use revive_dt_format::{
|
||||
metadata::{ContractInstance, ContractPathAndIdent},
|
||||
steps::{
|
||||
@@ -37,7 +34,7 @@ use revive_dt_format::{
|
||||
traits::{ResolutionContext, ResolverApi},
|
||||
};
|
||||
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
|
||||
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
|
||||
use tracing::{Span, debug, error, field::display, info, instrument};
|
||||
|
||||
use crate::{
|
||||
differential_benchmarks::{ExecutionState, WatcherEvent},
|
||||
@@ -73,6 +70,10 @@ pub struct Driver<'a, I> {
|
||||
/// The number of steps that were executed on the driver.
|
||||
steps_executed: usize,
|
||||
|
||||
/// This function controls if the driver should wait for transactions to be included in a block
|
||||
/// or not before proceeding forward.
|
||||
await_transaction_inclusion: bool,
|
||||
|
||||
/// This is the queue of steps that are to be executed by the driver for this test case. Each
|
||||
/// time `execute_step` is called one of the steps is executed.
|
||||
steps_iterator: I,
|
||||
@@ -89,6 +90,7 @@ where
|
||||
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||
cached_compiler: &CachedCompiler<'a>,
|
||||
watcher_tx: UnboundedSender<WatcherEvent>,
|
||||
await_transaction_inclusion: bool,
|
||||
steps: I,
|
||||
) -> Result<Self> {
|
||||
let mut this = Driver {
|
||||
@@ -104,6 +106,7 @@ where
|
||||
execution_state: ExecutionState::empty(),
|
||||
steps_executed: 0,
|
||||
steps_iterator: steps,
|
||||
await_transaction_inclusion,
|
||||
watcher_tx,
|
||||
};
|
||||
this.init_execution_state(cached_compiler)
|
||||
@@ -127,6 +130,8 @@ where
|
||||
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
|
||||
.context("Failed to produce the pre-linking compiled contracts")?;
|
||||
|
||||
let deployer_address = self.test_definition.case.deployer_address();
|
||||
|
||||
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||
let mut contract_sources = self
|
||||
.test_definition
|
||||
@@ -159,29 +164,12 @@ where
|
||||
|
||||
let code = alloy::hex::decode(code)?;
|
||||
|
||||
// Getting the deployer address from the cases themselves. This is to ensure
|
||||
// that we're doing the deployments from different accounts and therefore we're
|
||||
// not slowed down by the nonce.
|
||||
let deployer_address = self
|
||||
.test_definition
|
||||
.case
|
||||
.steps
|
||||
.iter()
|
||||
.filter_map(|step| match step {
|
||||
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
||||
Step::BalanceAssertion(..) => None,
|
||||
Step::StorageEmptyAssertion(..) => None,
|
||||
Step::Repeat(..) => None,
|
||||
Step::AllocateAccount(..) => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap_or(FunctionCallStep::default_caller_address());
|
||||
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||
TransactionRequest::default().from(deployer_address),
|
||||
code,
|
||||
);
|
||||
let receipt = self
|
||||
.execute_transaction(tx, None)
|
||||
.execute_transaction(tx, None, Duration::from_secs(5 * 60))
|
||||
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
@@ -380,7 +368,30 @@ where
|
||||
let tx = step
|
||||
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
|
||||
.await?;
|
||||
Ok(self.execute_transaction(tx, Some(step_path)).await?.0)
|
||||
|
||||
let (tx_hash, receipt_future) = self
|
||||
.execute_transaction(tx.clone(), Some(step_path), Duration::from_secs(30 * 60))
|
||||
.await?;
|
||||
if self.await_transaction_inclusion {
|
||||
let receipt = receipt_future
|
||||
.await
|
||||
.context("Failed while waiting for transaction inclusion in block")?;
|
||||
|
||||
if !receipt.status() {
|
||||
error!(
|
||||
?tx,
|
||||
tx.hash = %receipt.transaction_hash,
|
||||
?receipt,
|
||||
"Encountered a failing benchmark transaction"
|
||||
);
|
||||
bail!(
|
||||
"Encountered a failing transaction in benchmarks: {}",
|
||||
receipt.transaction_hash
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(tx_hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -481,6 +492,7 @@ where
|
||||
.collect::<Vec<_>>();
|
||||
steps.into_iter()
|
||||
},
|
||||
await_transaction_inclusion: self.await_transaction_inclusion,
|
||||
watcher_tx: self.watcher_tx.clone(),
|
||||
})
|
||||
.map(|driver| driver.execute_all());
|
||||
@@ -647,7 +659,7 @@ where
|
||||
};
|
||||
|
||||
let receipt = match self
|
||||
.execute_transaction(tx, step_path)
|
||||
.execute_transaction(tx, step_path, Duration::from_secs(5 * 60))
|
||||
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||
.await
|
||||
{
|
||||
@@ -692,18 +704,33 @@ where
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
fields(driver_id = self.driver_id, transaction_hash = tracing::field::Empty)
|
||||
fields(
|
||||
driver_id = self.driver_id,
|
||||
transaction = ?transaction,
|
||||
transaction_hash = tracing::field::Empty
|
||||
),
|
||||
err(Debug)
|
||||
)]
|
||||
async fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
step_path: Option<&StepPath>,
|
||||
receipt_wait_duration: Duration,
|
||||
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
|
||||
let node = self.platform_information.node;
|
||||
let transaction_hash = node
|
||||
.submit_transaction(transaction)
|
||||
let provider = node.provider().await.context("Creating provider failed")?;
|
||||
|
||||
let pending_transaction_builder = provider
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.context("Failed to submit transaction")?;
|
||||
|
||||
let transaction_hash = *pending_transaction_builder.tx_hash();
|
||||
let receipt_future = pending_transaction_builder
|
||||
.with_timeout(Some(receipt_wait_duration))
|
||||
.with_required_confirmations(2)
|
||||
.get_receipt()
|
||||
.map(|res| res.context("Failed to get the receipt of the transaction"));
|
||||
Span::current().record("transaction_hash", display(transaction_hash));
|
||||
|
||||
info!("Submitted transaction");
|
||||
@@ -716,28 +743,7 @@ where
|
||||
.context("Failed to send the transaction hash to the watcher")?;
|
||||
};
|
||||
|
||||
Ok((transaction_hash, async move {
|
||||
info!("Starting to poll for transaction receipt");
|
||||
poll(
|
||||
Duration::from_secs(30 * 60),
|
||||
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|
||||
|| {
|
||||
async move {
|
||||
match node.get_receipt(transaction_hash).await {
|
||||
Ok(receipt) => {
|
||||
info!("Polling succeeded, receipt found");
|
||||
Ok(ControlFlow::Break(receipt))
|
||||
}
|
||||
Err(_) => Ok(ControlFlow::Continue(())),
|
||||
}
|
||||
}
|
||||
.instrument(info_span!("Polling for receipt"))
|
||||
},
|
||||
)
|
||||
.instrument(info_span!("Polling for receipt", %transaction_hash))
|
||||
.await
|
||||
.inspect(|_| info!("Found the transaction receipt"))
|
||||
}))
|
||||
Ok((transaction_hash, receipt_future))
|
||||
}
|
||||
// endregion:Transaction Execution
|
||||
}
|
||||
|
||||
@@ -160,6 +160,7 @@ pub async fn handle_differential_benchmarks(
|
||||
private_key_allocator,
|
||||
cached_compiler.as_ref(),
|
||||
watcher_tx.clone(),
|
||||
context.await_transaction_inclusion,
|
||||
test_definition
|
||||
.case
|
||||
.steps_iterator_for_benchmarks(context.default_repetition_count)
|
||||
|
||||
@@ -139,23 +139,18 @@ impl Watcher {
|
||||
break;
|
||||
}
|
||||
|
||||
info!(
|
||||
block_number = block.ethereum_block_information.block_number,
|
||||
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
|
||||
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
||||
"Observed a block"
|
||||
);
|
||||
|
||||
// Remove all of the transaction hashes observed in this block from the txs we
|
||||
// are currently watching for.
|
||||
let mut watch_for_transaction_hashes =
|
||||
watch_for_transaction_hashes.write().await;
|
||||
let mut relevant_transactions_observed = 0;
|
||||
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
||||
let Some((step_path, submission_time)) =
|
||||
watch_for_transaction_hashes.remove(tx_hash)
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
relevant_transactions_observed += 1;
|
||||
let transaction_information = TransactionInformation {
|
||||
transaction_hash: *tx_hash,
|
||||
submission_timestamp: submission_time
|
||||
@@ -172,6 +167,14 @@ impl Watcher {
|
||||
)
|
||||
.expect("Can't fail")
|
||||
}
|
||||
|
||||
info!(
|
||||
block_number = block.ethereum_block_information.block_number,
|
||||
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
|
||||
relevant_transactions_observed,
|
||||
remaining_transactions = watch_for_transaction_hashes.len(),
|
||||
"Observed a block"
|
||||
);
|
||||
}
|
||||
|
||||
info!("Watcher's Block Watching Task Finished");
|
||||
|
||||
@@ -8,7 +8,7 @@ use alloy::{
|
||||
hex,
|
||||
json_abi::JsonAbi,
|
||||
network::{Ethereum, TransactionBuilder},
|
||||
primitives::{Address, TxHash, U256},
|
||||
primitives::{Address, TxHash, U256, address},
|
||||
rpc::types::{
|
||||
TransactionReceipt, TransactionRequest,
|
||||
trace::geth::{
|
||||
@@ -18,9 +18,9 @@ use alloy::{
|
||||
},
|
||||
};
|
||||
use anyhow::{Context as _, Result, bail};
|
||||
use futures::TryStreamExt;
|
||||
use futures::{TryStreamExt, future::try_join_all};
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator};
|
||||
use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator, VmIdentifier};
|
||||
use revive_dt_format::{
|
||||
metadata::{ContractInstance, ContractPathAndIdent},
|
||||
steps::{
|
||||
@@ -30,6 +30,7 @@ use revive_dt_format::{
|
||||
},
|
||||
traits::ResolutionContext,
|
||||
};
|
||||
use subxt::{ext::codec::Decode, metadata::Metadata, tx::Payload};
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
@@ -198,6 +199,8 @@ where
|
||||
})
|
||||
.context("Failed to produce the pre-linking compiled contracts")?;
|
||||
|
||||
let deployer_address = test_definition.case.deployer_address();
|
||||
|
||||
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||
let mut contract_sources = test_definition
|
||||
.metadata
|
||||
@@ -232,22 +235,6 @@ where
|
||||
|
||||
let code = alloy::hex::decode(code)?;
|
||||
|
||||
// Getting the deployer address from the cases themselves. This is to ensure
|
||||
// that we're doing the deployments from different accounts and therefore we're
|
||||
// not slowed down by the nonce.
|
||||
let deployer_address = test_definition
|
||||
.case
|
||||
.steps
|
||||
.iter()
|
||||
.filter_map(|step| match step {
|
||||
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
||||
Step::BalanceAssertion(..) => None,
|
||||
Step::StorageEmptyAssertion(..) => None,
|
||||
Step::Repeat(..) => None,
|
||||
Step::AllocateAccount(..) => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap_or(FunctionCallStep::default_caller_address());
|
||||
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||
TransactionRequest::default().from(deployer_address),
|
||||
code,
|
||||
@@ -295,6 +282,51 @@ where
|
||||
})
|
||||
.context("Failed to compile the post-link contracts")?;
|
||||
|
||||
// Factory contracts on the PVM refer to the code that they're instantiating by hash rather
|
||||
// than including the actual bytecode. This creates a problem where a factory contract could
|
||||
// be deployed but the code it's supposed to create is not on chain. Therefore, we upload
|
||||
// all the code to the chain prior to running any transactions on the driver.
|
||||
if platform_information.platform.vm_identifier() == VmIdentifier::PolkaVM {
|
||||
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
|
||||
pub mod revive {}
|
||||
|
||||
let metadata_bytes = include_bytes!("../../../../assets/revive_metadata.scale");
|
||||
let metadata = Metadata::decode(&mut &metadata_bytes[..])
|
||||
.context("Failed to decode the revive metadata")?;
|
||||
|
||||
const RUNTIME_PALLET_ADDRESS: Address =
|
||||
address!("0x6d6f646c70792f70616464720000000000000000");
|
||||
|
||||
let code_upload_tasks = compiler_output
|
||||
.contracts
|
||||
.values()
|
||||
.flat_map(|item| item.values())
|
||||
.map(|(code_string, _)| {
|
||||
let metadata = metadata.clone();
|
||||
async move {
|
||||
let code = alloy::hex::decode(code_string)
|
||||
.context("Failed to hex-decode the post-link code. This is a bug")?;
|
||||
let payload = revive::tx().revive().upload_code(code, u128::MAX);
|
||||
let encoded_payload = payload
|
||||
.encode_call_data(&metadata)
|
||||
.context("Failed to encode the upload code payload")?;
|
||||
|
||||
let tx_request = TransactionRequest::default()
|
||||
.from(deployer_address)
|
||||
.to(RUNTIME_PALLET_ADDRESS)
|
||||
.input(encoded_payload.into());
|
||||
platform_information
|
||||
.node
|
||||
.execute_transaction(tx_request)
|
||||
.await
|
||||
.context("Failed to execute transaction")
|
||||
}
|
||||
});
|
||||
try_join_all(code_upload_tasks)
|
||||
.await
|
||||
.context("Code upload failed")?;
|
||||
}
|
||||
|
||||
Ok(ExecutionState::new(
|
||||
compiler_output.contracts,
|
||||
deployed_libraries.unwrap_or_default(),
|
||||
@@ -377,7 +409,6 @@ where
|
||||
.handle_function_call_execution(step, deployment_receipts)
|
||||
.await
|
||||
.context("Failed to handle the function call execution")?;
|
||||
tracing::Span::current().record("block_number", execution_receipt.block_number);
|
||||
let tracing_result = self
|
||||
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
|
||||
.await
|
||||
|
||||
+152
-7
@@ -14,9 +14,12 @@ use revive_dt_common::types::*;
|
||||
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_node::{
|
||||
Node, node_implementations::geth::GethNode,
|
||||
node_implementations::lighthouse_geth::LighthouseGethNode,
|
||||
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombienetNode,
|
||||
Node,
|
||||
node_implementations::{
|
||||
geth::GethNode, lighthouse_geth::LighthouseGethNode,
|
||||
polkadot_omni_node::PolkadotOmnichainNode, substrate::SubstrateNode,
|
||||
zombienet::ZombienetNode,
|
||||
},
|
||||
};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use tracing::info;
|
||||
@@ -91,7 +94,8 @@ impl Platform for GethEvmSolcPlatform {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = GethNode::new(context);
|
||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||
let node = GethNode::new(context, use_fallback_gas_filler);
|
||||
let node = spawn_node::<GethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
@@ -145,7 +149,8 @@ impl Platform for LighthouseGethEvmSolcPlatform {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = LighthouseGethNode::new(context);
|
||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||
let node = LighthouseGethNode::new(context, use_fallback_gas_filler);
|
||||
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
@@ -206,12 +211,14 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
||||
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
Some(revive_dev_node_consensus),
|
||||
context,
|
||||
ð_rpc_connection_strings,
|
||||
use_fallback_gas_filler,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
@@ -274,12 +281,14 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
|
||||
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||
let node = SubstrateNode::new(
|
||||
revive_dev_node_path,
|
||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||
Some(revive_dev_node_consensus),
|
||||
context,
|
||||
ð_rpc_connection_strings,
|
||||
use_fallback_gas_filler,
|
||||
);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
@@ -338,7 +347,9 @@ impl Platform for ZombienetPolkavmResolcPlatform {
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = ZombienetNode::new(polkadot_parachain_path, context);
|
||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||
let node =
|
||||
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
@@ -395,7 +406,9 @@ impl Platform for ZombienetRevmSolcPlatform {
|
||||
.clone();
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let node = ZombienetNode::new(polkadot_parachain_path, context);
|
||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||
let node =
|
||||
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
@@ -422,6 +435,126 @@ impl Platform for ZombienetRevmSolcPlatform {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct PolkadotOmniNodePolkavmResolcPlatform;
|
||||
|
||||
impl Platform for PolkadotOmniNodePolkavmResolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::PolkadotOmniNodePolkavmResolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::PolkadotOmniNode
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Resolc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Resolc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
|
||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||
let polkadot_omnichain_node_configuration =
|
||||
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
|
||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||
|
||||
PolkadotOmnichainNode::node_genesis(
|
||||
&wallet,
|
||||
polkadot_omnichain_node_configuration
|
||||
.chain_spec_path
|
||||
.as_ref()
|
||||
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct PolkadotOmniNodeRevmSolcPlatform;
|
||||
|
||||
impl Platform for PolkadotOmniNodeRevmSolcPlatform {
|
||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||
PlatformIdentifier::PolkadotOmniNodeRevmSolc
|
||||
}
|
||||
|
||||
fn node_identifier(&self) -> NodeIdentifier {
|
||||
NodeIdentifier::PolkadotOmniNode
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Evm
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
CompilerIdentifier::Solc
|
||||
}
|
||||
|
||||
fn new_node(
|
||||
&self,
|
||||
context: Context,
|
||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
let genesis = genesis_configuration.genesis()?.clone();
|
||||
Ok(thread::spawn(move || {
|
||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
|
||||
let node = spawn_node(node, genesis)?;
|
||||
Ok(Box::new(node) as Box<_>)
|
||||
}))
|
||||
}
|
||||
|
||||
fn new_compiler(
|
||||
&self,
|
||||
context: Context,
|
||||
version: Option<VersionOrRequirement>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||
Box::pin(async move {
|
||||
let compiler = Solc::new(context, version).await;
|
||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||
})
|
||||
}
|
||||
|
||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||
let polkadot_omnichain_node_configuration =
|
||||
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
|
||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||
|
||||
PolkadotOmnichainNode::node_genesis(
|
||||
&wallet,
|
||||
polkadot_omnichain_node_configuration
|
||||
.chain_spec_path
|
||||
.as_ref()
|
||||
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
@@ -439,6 +572,12 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
|
||||
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
|
||||
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
|
||||
Box::new(PolkadotOmniNodePolkavmResolcPlatform) as Box<_>
|
||||
}
|
||||
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
|
||||
Box::new(PolkadotOmniNodeRevmSolcPlatform) as Box<_>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -460,6 +599,12 @@ impl From<PlatformIdentifier> for &dyn Platform {
|
||||
&ZombienetPolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
|
||||
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
|
||||
&PolkadotOmniNodePolkavmResolcPlatform as &dyn Platform
|
||||
}
|
||||
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
|
||||
&PolkadotOmniNodeRevmSolcPlatform as &dyn Platform
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
+37
-6
@@ -2,9 +2,9 @@ mod differential_benchmarks;
|
||||
mod differential_tests;
|
||||
mod helpers;
|
||||
|
||||
use anyhow::Context as _;
|
||||
use anyhow::{Context as _, bail};
|
||||
use clap::Parser;
|
||||
use revive_dt_report::ReportAggregator;
|
||||
use revive_dt_report::{ReportAggregator, TestCaseStatus};
|
||||
use schemars::schema_for;
|
||||
use tracing::{info, level_filters::LevelFilter};
|
||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||
@@ -57,8 +57,22 @@ fn main() -> anyhow::Result<()> {
|
||||
let differential_tests_handling_task =
|
||||
handle_differential_tests(*context, reporter);
|
||||
|
||||
futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
|
||||
.await?;
|
||||
let (_, report) = futures::future::try_join(
|
||||
differential_tests_handling_task,
|
||||
report_aggregator_task,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let contains_failure = report
|
||||
.execution_information
|
||||
.values()
|
||||
.flat_map(|values| values.case_reports.values())
|
||||
.flat_map(|values| values.mode_execution_reports.values())
|
||||
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
|
||||
|
||||
if contains_failure {
|
||||
bail!("Some tests failed")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
@@ -71,12 +85,23 @@ fn main() -> anyhow::Result<()> {
|
||||
let differential_benchmarks_handling_task =
|
||||
handle_differential_benchmarks(*context, reporter);
|
||||
|
||||
futures::future::try_join(
|
||||
let (_, report) = futures::future::try_join(
|
||||
differential_benchmarks_handling_task,
|
||||
report_aggregator_task,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let contains_failure = report
|
||||
.execution_information
|
||||
.values()
|
||||
.flat_map(|values| values.case_reports.values())
|
||||
.flat_map(|values| values.mode_execution_reports.values())
|
||||
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
|
||||
|
||||
if contains_failure {
|
||||
bail!("Some benchmarks failed")
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
Context::ExportGenesis(ref export_genesis_context) => {
|
||||
@@ -85,11 +110,17 @@ fn main() -> anyhow::Result<()> {
|
||||
let genesis_json = serde_json::to_string_pretty(&genesis)
|
||||
.context("Failed to serialize the genesis to JSON")?;
|
||||
println!("{genesis_json}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Context::ExportJsonSchema => {
|
||||
let schema = schema_for!(Metadata);
|
||||
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&schema)
|
||||
.context("Failed to export the JSON schema")?
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use alloy::primitives::Address;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -107,6 +108,20 @@ impl Case {
|
||||
None => Mode::all().cloned().collect(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deployer_address(&self) -> Address {
|
||||
self.steps
|
||||
.iter()
|
||||
.filter_map(|step| match step {
|
||||
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
||||
Step::BalanceAssertion(..) => None,
|
||||
Step::StorageEmptyAssertion(..) => None,
|
||||
Step::Repeat(..) => None,
|
||||
Step::AllocateAccount(..) => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap_or(FunctionCallStep::default_caller_address())
|
||||
}
|
||||
}
|
||||
|
||||
define_wrapper_type!(
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
use std::{
|
||||
fs::{File, create_dir_all, remove_dir_all},
|
||||
io::Read,
|
||||
ops::ControlFlow,
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::{Command, Stdio},
|
||||
@@ -35,12 +34,9 @@ use anyhow::Context as _;
|
||||
use futures::{FutureExt, Stream, StreamExt};
|
||||
use revive_common::EVMVersion;
|
||||
use tokio::sync::OnceCell;
|
||||
use tracing::{Instrument, error, instrument};
|
||||
use tracing::{error, instrument};
|
||||
|
||||
use revive_dt_common::{
|
||||
fs::clear_directory,
|
||||
futures::{PollingWaitBehavior, poll},
|
||||
};
|
||||
use revive_dt_common::fs::clear_directory;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
@@ -76,6 +72,7 @@ pub struct GethNode {
|
||||
wallet: Arc<EthereumWallet>,
|
||||
nonce_manager: CachedNonceManager,
|
||||
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||
use_fallback_gas_filler: bool,
|
||||
}
|
||||
|
||||
impl GethNode {
|
||||
@@ -89,17 +86,12 @@ impl GethNode {
|
||||
const READY_MARKER: &str = "IPC endpoint opened";
|
||||
const ERROR_MARKER: &str = "Fatal:";
|
||||
|
||||
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
|
||||
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
|
||||
|
||||
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
|
||||
|
||||
pub fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ Clone,
|
||||
use_fallback_gas_filler: bool,
|
||||
) -> Self {
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
@@ -126,6 +118,7 @@ impl GethNode {
|
||||
wallet: wallet.clone(),
|
||||
nonce_manager: Default::default(),
|
||||
provider: Default::default(),
|
||||
use_fallback_gas_filler,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -246,7 +239,8 @@ impl GethNode {
|
||||
.get_or_try_init(|| async move {
|
||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
||||
self.connection_string.as_str(),
|
||||
FallbackGasFiller::default(),
|
||||
FallbackGasFiller::default()
|
||||
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
||||
ChainIdFiller::new(Some(CHAIN_ID)),
|
||||
NonceFiller::new(self.nonce_manager.clone()),
|
||||
self.wallet.clone(),
|
||||
@@ -337,62 +331,15 @@ impl EthereumNode for GethNode {
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for transaction submission")?;
|
||||
|
||||
let pending_transaction = provider
|
||||
.context("Failed to create provider for transaction submission")?
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.inspect_err(
|
||||
|err| error!(%err, "Encountered an error when submitting the transaction"),
|
||||
)
|
||||
.context("Failed to submit transaction to geth node")?;
|
||||
let transaction_hash = *pending_transaction.tx_hash();
|
||||
|
||||
// The following is a fix for the "transaction indexing is in progress" error that we used
|
||||
// to get. You can find more information on this in the following GH issue in geth
|
||||
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
||||
// before we can get the receipt of the transaction it needs to have been indexed by the
|
||||
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
|
||||
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
|
||||
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
|
||||
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
|
||||
// implement a retry mechanism for the receipt to keep retrying to get it until it
|
||||
// eventually works, but we only do that if the error we get back is the "transaction
|
||||
// indexing is in progress" error or if the receipt is None.
|
||||
//
|
||||
// Getting the transaction indexed and taking a receipt can take a long time especially when
|
||||
// a lot of transactions are being submitted to the node. Thus, while initially we only
|
||||
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
|
||||
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
|
||||
// backoff each time we attempt to get the receipt and find that it's not available.
|
||||
poll(
|
||||
Self::RECEIPT_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
async move {
|
||||
match provider.get_transaction_receipt(transaction_hash).await {
|
||||
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||
Ok(None) => Ok(ControlFlow::Continue(())),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.instrument(tracing::info_span!(
|
||||
"Awaiting transaction receipt",
|
||||
?transaction_hash
|
||||
))
|
||||
.await
|
||||
.context("Encountered an error when submitting a transaction")?
|
||||
.get_receipt()
|
||||
.await
|
||||
.context("Failed to get the receipt for the transaction")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -403,34 +350,12 @@ impl EthereumNode for GethNode {
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for tracing")?;
|
||||
poll(
|
||||
Self::TRACE_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
let trace_options = trace_options.clone();
|
||||
async move {
|
||||
match provider
|
||||
.debug_trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
{
|
||||
Ok(trace) => Ok(ControlFlow::Break(trace)),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.await
|
||||
.context("Failed to create provider for tracing")?
|
||||
.debug_trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
.context("Failed to get the transaction trace")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -742,7 +667,7 @@ mod tests {
|
||||
|
||||
fn new_node() -> (TestExecutionContext, GethNode) {
|
||||
let context = test_config();
|
||||
let mut node = GethNode::new(&context);
|
||||
let mut node = GethNode::new(&context, true);
|
||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||
.expect("Failed to initialize the node")
|
||||
.spawn_process()
|
||||
|
||||
@@ -12,7 +12,6 @@ use std::{
|
||||
collections::{BTreeMap, HashSet},
|
||||
fs::{File, create_dir_all},
|
||||
io::Read,
|
||||
ops::ControlFlow,
|
||||
path::PathBuf,
|
||||
pin::Pin,
|
||||
process::{Command, Stdio},
|
||||
@@ -48,12 +47,9 @@ use revive_common::EVMVersion;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde_with::serde_as;
|
||||
use tokio::sync::OnceCell;
|
||||
use tracing::{Instrument, info, instrument};
|
||||
use tracing::{info, instrument};
|
||||
|
||||
use revive_dt_common::{
|
||||
fs::clear_directory,
|
||||
futures::{PollingWaitBehavior, poll},
|
||||
};
|
||||
use revive_dt_common::fs::clear_directory;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
@@ -106,6 +102,8 @@ pub struct LighthouseGethNode {
|
||||
|
||||
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||
|
||||
use_fallback_gas_filler: bool,
|
||||
}
|
||||
|
||||
impl LighthouseGethNode {
|
||||
@@ -114,12 +112,6 @@ impl LighthouseGethNode {
|
||||
|
||||
const CONFIG_FILE_NAME: &str = "config.yaml";
|
||||
|
||||
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
|
||||
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
|
||||
|
||||
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
|
||||
|
||||
const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete";
|
||||
|
||||
pub fn new(
|
||||
@@ -127,6 +119,7 @@ impl LighthouseGethNode {
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<KurtosisConfiguration>
|
||||
+ Clone,
|
||||
use_fallback_gas_filler: bool,
|
||||
) -> Self {
|
||||
let working_directory_configuration =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||
@@ -176,6 +169,7 @@ impl LighthouseGethNode {
|
||||
nonce_manager: Default::default(),
|
||||
persistent_http_provider: OnceCell::const_new(),
|
||||
persistent_ws_provider: OnceCell::const_new(),
|
||||
use_fallback_gas_filler,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -374,7 +368,8 @@ impl LighthouseGethNode {
|
||||
.get_or_try_init(|| async move {
|
||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
||||
self.ws_connection_string.as_str(),
|
||||
FallbackGasFiller::default(),
|
||||
FallbackGasFiller::default()
|
||||
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
||||
ChainIdFiller::new(Some(CHAIN_ID)),
|
||||
NonceFiller::new(self.nonce_manager.clone()),
|
||||
self.wallet.clone(),
|
||||
@@ -476,73 +471,6 @@ impl LighthouseGethNode {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn internal_execute_transaction<'a>(
|
||||
transaction: TransactionRequest,
|
||||
provider: FillProvider<
|
||||
impl TxFiller<Ethereum> + 'a,
|
||||
impl Provider<Ethereum> + Clone + 'a,
|
||||
Ethereum,
|
||||
>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + 'a>> {
|
||||
Box::pin(async move {
|
||||
let pending_transaction = provider
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
tracing::error!(
|
||||
%err,
|
||||
"Encountered an error when submitting the transaction"
|
||||
)
|
||||
})
|
||||
.context("Failed to submit transaction to geth node")?;
|
||||
let transaction_hash = *pending_transaction.tx_hash();
|
||||
|
||||
// The following is a fix for the "transaction indexing is in progress" error that we
|
||||
// used to get. You can find more information on this in the following GH issue in geth
|
||||
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
||||
// before we can get the receipt of the transaction it needs to have been indexed by the
|
||||
// node's indexer. Just because the transaction has been confirmed it doesn't mean that
|
||||
// it has been indexed. When we call alloy's `get_receipt` it checks if the transaction
|
||||
// was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method
|
||||
// which _might_ return the above error if the tx has not yet been indexed yet. So, we
|
||||
// need to implement a retry mechanism for the receipt to keep retrying to get it until
|
||||
// it eventually works, but we only do that if the error we get back is the "transaction
|
||||
// indexing is in progress" error or if the receipt is None.
|
||||
//
|
||||
// Getting the transaction indexed and taking a receipt can take a long time especially
|
||||
// when a lot of transactions are being submitted to the node. Thus, while initially we
|
||||
// only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to
|
||||
// allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting
|
||||
// with exponential backoff each time we attempt to get the receipt and find that it's
|
||||
// not available.
|
||||
poll(
|
||||
Self::RECEIPT_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(500)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
async move {
|
||||
match provider.get_transaction_receipt(transaction_hash).await {
|
||||
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||
Ok(None) => Ok(ControlFlow::Continue(())),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.instrument(tracing::info_span!(
|
||||
"Awaiting transaction receipt",
|
||||
?transaction_hash
|
||||
))
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
|
||||
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
|
||||
genesis
|
||||
@@ -621,11 +549,15 @@ impl EthereumNode for LighthouseGethNode {
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.http_provider()
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for transaction execution")?;
|
||||
Self::internal_execute_transaction(transaction, provider).await
|
||||
.context("Failed to create provider for transaction submission")?
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.context("Encountered an error when submitting a transaction")?
|
||||
.get_receipt()
|
||||
.await
|
||||
.context("Failed to get the receipt for the transaction")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -636,35 +568,12 @@ impl EthereumNode for LighthouseGethNode {
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = Arc::new(
|
||||
self.http_provider()
|
||||
.await
|
||||
.context("Failed to create provider for tracing")?,
|
||||
);
|
||||
poll(
|
||||
Self::TRACE_POLLING_DURATION,
|
||||
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||
move || {
|
||||
let provider = provider.clone();
|
||||
let trace_options = trace_options.clone();
|
||||
async move {
|
||||
match provider
|
||||
.debug_trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
{
|
||||
Ok(trace) => Ok(ControlFlow::Break(trace)),
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
|
||||
true => Ok(ControlFlow::Continue(())),
|
||||
false => Err(error.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.await
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for tracing")?
|
||||
.debug_trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
.context("Failed to get the transaction trace")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1152,7 +1061,7 @@ mod tests {
|
||||
let _guard = NODE_START_MUTEX.lock().unwrap();
|
||||
|
||||
let context = test_config();
|
||||
let mut node = LighthouseGethNode::new(&context);
|
||||
let mut node = LighthouseGethNode::new(&context, true);
|
||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||
.expect("Failed to initialize the node")
|
||||
.spawn_process()
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod geth;
|
||||
pub mod lighthouse_geth;
|
||||
pub mod polkadot_omni_node;
|
||||
pub mod substrate;
|
||||
pub mod zombienet;
|
||||
|
||||
@@ -0,0 +1,791 @@
|
||||
use std::{
|
||||
fs::{File, create_dir_all, remove_dir_all},
|
||||
path::{Path, PathBuf},
|
||||
pin::Pin,
|
||||
process::{Command, Stdio},
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicU32, Ordering},
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use alloy::{
|
||||
eips::BlockNumberOrTag,
|
||||
genesis::Genesis,
|
||||
network::{Ethereum, EthereumWallet, NetworkWallet},
|
||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||
providers::{
|
||||
Provider,
|
||||
ext::DebugApi,
|
||||
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
|
||||
},
|
||||
rpc::types::{
|
||||
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
||||
trace::geth::{
|
||||
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
|
||||
},
|
||||
},
|
||||
};
|
||||
use anyhow::Context as _;
|
||||
use futures::{FutureExt, Stream, StreamExt};
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_common::fs::clear_directory;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
use serde_json::json;
|
||||
use sp_core::crypto::Ss58Codec;
|
||||
use sp_runtime::AccountId32;
|
||||
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use revive_dt_report::{
|
||||
EthereumMinedBlockInformation, MinedBlockInformation, SubstrateMinedBlockInformation,
|
||||
};
|
||||
use subxt::{OnlineClient, SubstrateConfig};
|
||||
use tokio::sync::OnceCell;
|
||||
use tracing::{instrument, trace};
|
||||
|
||||
use crate::{
|
||||
Node,
|
||||
constants::INITIAL_BALANCE,
|
||||
helpers::{Process, ProcessReadinessWaitBehavior},
|
||||
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
|
||||
};
|
||||
|
||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||
|
||||
/// The number of blocks that should be cached by the polkadot-omni-node and the eth-rpc.
|
||||
const NUMBER_OF_CACHED_BLOCKS: u32 = 100_000;
|
||||
|
||||
/// A node implementation for the polkadot-omni-node.
|
||||
#[derive(Debug)]
|
||||
|
||||
pub struct PolkadotOmnichainNode {
|
||||
/// The id of the node.
|
||||
id: u32,
|
||||
|
||||
/// The path of the polkadot-omni-chain node binary.
|
||||
polkadot_omnichain_node_binary_path: PathBuf,
|
||||
/// The path of the eth-rpc binary.
|
||||
eth_rpc_binary_path: PathBuf,
|
||||
/// The path of the runtime's WASM that this node will be spawned with.
|
||||
chain_spec_path: Option<PathBuf>,
|
||||
/// The path of the base directory which contains all of the stored data for this node.
|
||||
base_directory_path: PathBuf,
|
||||
/// The path of the logs directory which contains all of the stored logs.
|
||||
logs_directory_path: PathBuf,
|
||||
|
||||
/// Defines the amount of time to wait before considering that the node start has timed out.
|
||||
node_start_timeout: Duration,
|
||||
|
||||
/// The id of the parachain that this node will be spawning.
|
||||
parachain_id: Option<usize>,
|
||||
/// The block time.
|
||||
block_time: Duration,
|
||||
|
||||
/// The node's process.
|
||||
polkadot_omnichain_node_process: Option<Process>,
|
||||
/// The eth-rpc's process.
|
||||
eth_rpc_process: Option<Process>,
|
||||
|
||||
/// The URL of the eth-rpc.
|
||||
rpc_url: String,
|
||||
/// The wallet object that's used to sign any transaction submitted through this node.
|
||||
wallet: Arc<EthereumWallet>,
|
||||
/// The nonce manager used to populate nonces for all transactions submitted through this node.
|
||||
nonce_manager: CachedNonceManager,
|
||||
/// The provider used for all RPC interactions with the RPC of this node.
|
||||
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||
|
||||
/// A boolean that controls if the fallback gas filler should be used or not.
|
||||
use_fallback_gas_filler: bool,
|
||||
}
|
||||
|
||||
impl PolkadotOmnichainNode {
|
||||
const BASE_DIRECTORY: &str = "polkadot-omni-node";
|
||||
const LOGS_DIRECTORY: &str = "logs";
|
||||
|
||||
const POLKADOT_OMNICHAIN_NODE_READY_MARKER: &str = "Running JSON-RPC server";
|
||||
const ETH_RPC_READY_MARKER: &str = "Running JSON-RPC server";
|
||||
const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json";
|
||||
const BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT: u16 = 9944;
|
||||
const BASE_ETH_RPC_PORT: u16 = 8545;
|
||||
|
||||
const POLKADOT_OMNICHAIN_NODE_LOG_ENV: &str =
|
||||
"error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
|
||||
const RPC_LOG_ENV: &str = "info,eth-rpc=debug";
|
||||
|
||||
pub fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<PolkadotOmnichainNodeConfiguration>,
|
||||
use_fallback_gas_filler: bool,
|
||||
) -> Self {
|
||||
let polkadot_omnichain_node_configuration =
|
||||
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
|
||||
let working_directory_path =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
|
||||
let eth_rpc_path = AsRef::<EthRpcConfiguration>::as_ref(&context)
|
||||
.path
|
||||
.as_path();
|
||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||
|
||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
let base_directory = working_directory_path
|
||||
.join(Self::BASE_DIRECTORY)
|
||||
.join(id.to_string());
|
||||
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
|
||||
|
||||
Self {
|
||||
id,
|
||||
polkadot_omnichain_node_binary_path: polkadot_omnichain_node_configuration
|
||||
.path
|
||||
.to_path_buf(),
|
||||
eth_rpc_binary_path: eth_rpc_path.to_path_buf(),
|
||||
chain_spec_path: polkadot_omnichain_node_configuration
|
||||
.chain_spec_path
|
||||
.clone(),
|
||||
base_directory_path: base_directory,
|
||||
logs_directory_path: logs_directory,
|
||||
parachain_id: polkadot_omnichain_node_configuration.parachain_id,
|
||||
block_time: polkadot_omnichain_node_configuration.block_time,
|
||||
polkadot_omnichain_node_process: Default::default(),
|
||||
eth_rpc_process: Default::default(),
|
||||
rpc_url: Default::default(),
|
||||
wallet,
|
||||
nonce_manager: Default::default(),
|
||||
provider: Default::default(),
|
||||
use_fallback_gas_filler,
|
||||
node_start_timeout: polkadot_omnichain_node_configuration.start_timeout_ms,
|
||||
}
|
||||
}
|
||||
|
||||
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
|
||||
trace!("Removing the various directories");
|
||||
let _ = remove_dir_all(self.base_directory_path.as_path());
|
||||
let _ = clear_directory(&self.base_directory_path);
|
||||
let _ = clear_directory(&self.logs_directory_path);
|
||||
|
||||
trace!("Creating the various directories");
|
||||
create_dir_all(&self.base_directory_path)
|
||||
.context("Failed to create base directory for polkadot-omni-node node")?;
|
||||
create_dir_all(&self.logs_directory_path)
|
||||
.context("Failed to create logs directory for polkadot-omni-node node")?;
|
||||
|
||||
let template_chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
|
||||
|
||||
let chainspec_json = Self::node_genesis(
|
||||
&self.wallet,
|
||||
self.chain_spec_path
|
||||
.as_ref()
|
||||
.context("No runtime path provided")?,
|
||||
)
|
||||
.context("Failed to prepare the chainspec command")?;
|
||||
|
||||
serde_json::to_writer_pretty(
|
||||
std::fs::File::create(&template_chainspec_path)
|
||||
.context("Failed to create polkadot-omni-node template chainspec file")?,
|
||||
&chainspec_json,
|
||||
)
|
||||
.context("Failed to write polkadot-omni-node template chainspec JSON")?;
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn spawn_process(&mut self) -> anyhow::Result<()> {
|
||||
// Error out if the runtime's path or the parachain id are not set which means that the
|
||||
// arguments we require were not provided.
|
||||
self.chain_spec_path
|
||||
.as_ref()
|
||||
.context("No WASM path provided for the runtime")?;
|
||||
self.parachain_id
|
||||
.as_ref()
|
||||
.context("No argument provided for the parachain-id")?;
|
||||
|
||||
let polkadot_omnichain_node_rpc_port =
|
||||
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
|
||||
let eth_rpc_port = Self::BASE_ETH_RPC_PORT + self.id as u16;
|
||||
|
||||
let chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
|
||||
|
||||
self.rpc_url = format!("http://127.0.0.1:{eth_rpc_port}");
|
||||
|
||||
let polkadot_omnichain_node_process = Process::new(
|
||||
"node",
|
||||
self.logs_directory_path.as_path(),
|
||||
self.polkadot_omnichain_node_binary_path.as_path(),
|
||||
|command, stdout_file, stderr_file| {
|
||||
command
|
||||
.arg("--log")
|
||||
.arg(Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
|
||||
.arg("--dev-block-time")
|
||||
.arg(self.block_time.as_millis().to_string())
|
||||
.arg("--rpc-port")
|
||||
.arg(polkadot_omnichain_node_rpc_port.to_string())
|
||||
.arg("--base-path")
|
||||
.arg(self.base_directory_path.as_path())
|
||||
.arg("--no-prometheus")
|
||||
.arg("--no-hardware-benchmarks")
|
||||
.arg("--authoring")
|
||||
.arg("slot-based")
|
||||
.arg("--chain")
|
||||
.arg(chainspec_path)
|
||||
.arg("--name")
|
||||
.arg(format!("polkadot-omni-node-{}", self.id))
|
||||
.arg("--rpc-methods")
|
||||
.arg("unsafe")
|
||||
.arg("--rpc-cors")
|
||||
.arg("all")
|
||||
.arg("--rpc-max-connections")
|
||||
.arg(u32::MAX.to_string())
|
||||
.arg("--pool-limit")
|
||||
.arg(u32::MAX.to_string())
|
||||
.arg("--pool-kbytes")
|
||||
.arg(u32::MAX.to_string())
|
||||
.arg("--state-pruning")
|
||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
||||
.env("RUST_LOG", Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
|
||||
.stdout(stdout_file)
|
||||
.stderr(stderr_file);
|
||||
},
|
||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||
max_wait_duration: self.node_start_timeout,
|
||||
check_function: Box::new(|_, stderr_line| match stderr_line {
|
||||
Some(line) => Ok(line.contains(Self::POLKADOT_OMNICHAIN_NODE_READY_MARKER)),
|
||||
None => Ok(false),
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
match polkadot_omnichain_node_process {
|
||||
Ok(process) => self.polkadot_omnichain_node_process = Some(process),
|
||||
Err(err) => {
|
||||
tracing::error!(
|
||||
?err,
|
||||
"Failed to start polkadot-omni-node, shutting down gracefully"
|
||||
);
|
||||
self.shutdown().context(
|
||||
"Failed to gracefully shutdown after polkadot-omni-node start error",
|
||||
)?;
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
let eth_rpc_process = Process::new(
|
||||
"eth-rpc",
|
||||
self.logs_directory_path.as_path(),
|
||||
self.eth_rpc_binary_path.as_path(),
|
||||
|command, stdout_file, stderr_file| {
|
||||
command
|
||||
.arg("--dev")
|
||||
.arg("--rpc-port")
|
||||
.arg(eth_rpc_port.to_string())
|
||||
.arg("--node-rpc-url")
|
||||
.arg(format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}"))
|
||||
.arg("--rpc-max-connections")
|
||||
.arg(u32::MAX.to_string())
|
||||
.arg("--index-last-n-blocks")
|
||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
||||
.arg("--cache-size")
|
||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
||||
.env("RUST_LOG", Self::RPC_LOG_ENV)
|
||||
.stdout(stdout_file)
|
||||
.stderr(stderr_file);
|
||||
},
|
||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||
max_wait_duration: Duration::from_secs(30),
|
||||
check_function: Box::new(|_, stderr_line| match stderr_line {
|
||||
Some(line) => Ok(line.contains(Self::ETH_RPC_READY_MARKER)),
|
||||
None => Ok(false),
|
||||
}),
|
||||
},
|
||||
);
|
||||
match eth_rpc_process {
|
||||
Ok(process) => self.eth_rpc_process = Some(process),
|
||||
Err(err) => {
|
||||
tracing::error!(?err, "Failed to start eth-rpc, shutting down gracefully");
|
||||
self.shutdown()
|
||||
.context("Failed to gracefully shutdown after eth-rpc start error")?;
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn eth_to_substrate_address(address: &Address) -> String {
|
||||
let eth_bytes = address.0.0;
|
||||
|
||||
let mut padded = [0xEEu8; 32];
|
||||
padded[..20].copy_from_slice(ð_bytes);
|
||||
|
||||
let account_id = AccountId32::from(padded);
|
||||
account_id.to_ss58check()
|
||||
}
|
||||
|
||||
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
|
||||
let output = Command::new(&self.eth_rpc_binary_path)
|
||||
.arg("--version")
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()?
|
||||
.wait_with_output()?
|
||||
.stdout;
|
||||
Ok(String::from_utf8_lossy(&output).trim().to_string())
|
||||
}
|
||||
|
||||
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
|
||||
self.provider
|
||||
.get_or_try_init(|| async move {
|
||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
||||
self.rpc_url.as_str(),
|
||||
FallbackGasFiller::default()
|
||||
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
||||
ChainIdFiller::default(),
|
||||
NonceFiller::new(self.nonce_manager.clone()),
|
||||
self.wallet.clone(),
|
||||
)
|
||||
.await
|
||||
.context("Failed to construct the provider")
|
||||
})
|
||||
.await
|
||||
.cloned()
|
||||
}
|
||||
|
||||
pub fn node_genesis(
|
||||
wallet: &EthereumWallet,
|
||||
chain_spec_path: &Path,
|
||||
) -> anyhow::Result<serde_json::Value> {
|
||||
let unmodified_chainspec_file =
|
||||
File::open(chain_spec_path).context("Failed to open the unmodified chainspec file")?;
|
||||
let mut chainspec_json =
|
||||
serde_json::from_reader::<_, serde_json::Value>(&unmodified_chainspec_file)
|
||||
.context("Failed to read the unmodified chainspec JSON")?;
|
||||
|
||||
let existing_chainspec_balances =
|
||||
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
|
||||
.as_array_mut()
|
||||
.expect("Can't fail");
|
||||
|
||||
for address in NetworkWallet::<Ethereum>::signer_addresses(wallet) {
|
||||
let substrate_address = Self::eth_to_substrate_address(&address);
|
||||
let balance = INITIAL_BALANCE;
|
||||
existing_chainspec_balances.push(json!((substrate_address, balance)));
|
||||
}
|
||||
|
||||
Ok(chainspec_json)
|
||||
}
|
||||
}
|
||||
|
||||
impl EthereumNode for PolkadotOmnichainNode {
|
||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
|
||||
Box::pin(async move { Ok(()) })
|
||||
}
|
||||
|
||||
fn id(&self) -> usize {
|
||||
self.id as _
|
||||
}
|
||||
|
||||
fn connection_string(&self) -> &str {
|
||||
&self.rpc_url
|
||||
}
|
||||
|
||||
fn submit_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
.await
|
||||
.context("Failed to create the provider for transaction submission")?;
|
||||
let pending_transaction = provider
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.context("Failed to submit the transaction through the provider")?;
|
||||
Ok(*pending_transaction.tx_hash())
|
||||
})
|
||||
}
|
||||
|
||||
fn get_receipt(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for getting the receipt")?
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await
|
||||
.context("Failed to get the receipt of the transaction")?
|
||||
.context("Failed to get the receipt of the transaction")
|
||||
})
|
||||
}
|
||||
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for transaction submission")?
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.context("Encountered an error when submitting a transaction")?
|
||||
.get_receipt()
|
||||
.await
|
||||
.context("Failed to get the receipt for the transaction")
|
||||
})
|
||||
}
|
||||
|
||||
fn trace_transaction(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
trace_options: GethDebugTracingOptions,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create provider for debug tracing")?
|
||||
.debug_trace_transaction(tx_hash, trace_options)
|
||||
.await
|
||||
.context("Failed to obtain debug trace from eth-proxy")
|
||||
})
|
||||
}
|
||||
|
||||
fn state_diff(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<DiffMode>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||
diff_mode: Some(true),
|
||||
disable_code: None,
|
||||
disable_storage: None,
|
||||
});
|
||||
match self
|
||||
.trace_transaction(tx_hash, trace_options)
|
||||
.await?
|
||||
.try_into_pre_state_frame()?
|
||||
{
|
||||
PreStateFrame::Diff(diff) => Ok(diff),
|
||||
_ => anyhow::bail!("expected a diff mode trace"),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn balance_of(
|
||||
&self,
|
||||
address: Address,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the eth-rpc provider")?
|
||||
.get_balance(address)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
fn latest_state_proof(
|
||||
&self,
|
||||
address: Address,
|
||||
keys: Vec<StorageKey>,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to get the eth-rpc provider")?
|
||||
.get_proof(address, keys)
|
||||
.latest()
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
fn resolver(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let id = self.id;
|
||||
let provider = self.provider().await?;
|
||||
Ok(Arc::new(PolkadotOmnichainNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
||||
})
|
||||
}
|
||||
|
||||
fn evm_version(&self) -> EVMVersion {
|
||||
EVMVersion::Cancun
|
||||
}
|
||||
|
||||
fn subscribe_to_full_blocks_information(
|
||||
&self,
|
||||
) -> Pin<
|
||||
Box<
|
||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
||||
+ '_,
|
||||
>,
|
||||
> {
|
||||
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
|
||||
pub mod revive {}
|
||||
|
||||
Box::pin(async move {
|
||||
let polkadot_omnichain_node_rpc_port =
|
||||
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
|
||||
let polkadot_omnichain_node_rpc_url =
|
||||
format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}");
|
||||
let api = OnlineClient::<SubstrateConfig>::from_url(polkadot_omnichain_node_rpc_url)
|
||||
.await
|
||||
.context("Failed to create subxt rpc client")?;
|
||||
let provider = self.provider().await.context("Failed to create provider")?;
|
||||
|
||||
let block_stream = api
|
||||
.blocks()
|
||||
.subscribe_all()
|
||||
.await
|
||||
.context("Failed to subscribe to blocks")?;
|
||||
|
||||
let mined_block_information_stream = block_stream.filter_map(move |block| {
|
||||
let api = api.clone();
|
||||
let provider = provider.clone();
|
||||
|
||||
async move {
|
||||
let substrate_block = block.ok()?;
|
||||
let revive_block = provider
|
||||
.get_block_by_number(
|
||||
BlockNumberOrTag::Number(substrate_block.number() as _),
|
||||
)
|
||||
.await
|
||||
.expect("TODO: Remove")
|
||||
.expect("TODO: Remove");
|
||||
|
||||
let used = api
|
||||
.storage()
|
||||
.at(substrate_block.reference())
|
||||
.fetch_or_default(&revive::storage().system().block_weight())
|
||||
.await
|
||||
.expect("TODO: Remove");
|
||||
|
||||
let block_ref_time = (used.normal.ref_time as u128)
|
||||
+ (used.operational.ref_time as u128)
|
||||
+ (used.mandatory.ref_time as u128);
|
||||
let block_proof_size = (used.normal.proof_size as u128)
|
||||
+ (used.operational.proof_size as u128)
|
||||
+ (used.mandatory.proof_size as u128);
|
||||
|
||||
let limits = api
|
||||
.constants()
|
||||
.at(&revive::constants().system().block_weights())
|
||||
.expect("TODO: Remove");
|
||||
|
||||
let max_ref_time = limits.max_block.ref_time;
|
||||
let max_proof_size = limits.max_block.proof_size;
|
||||
|
||||
Some(MinedBlockInformation {
|
||||
ethereum_block_information: EthereumMinedBlockInformation {
|
||||
block_number: revive_block.number(),
|
||||
block_timestamp: revive_block.header.timestamp,
|
||||
mined_gas: revive_block.header.gas_used as _,
|
||||
block_gas_limit: revive_block.header.gas_limit as _,
|
||||
transaction_hashes: revive_block
|
||||
.transactions
|
||||
.into_hashes()
|
||||
.as_hashes()
|
||||
.expect("Must be hashes")
|
||||
.to_vec(),
|
||||
},
|
||||
substrate_block_information: Some(SubstrateMinedBlockInformation {
|
||||
ref_time: block_ref_time,
|
||||
max_ref_time,
|
||||
proof_size: block_proof_size,
|
||||
max_proof_size,
|
||||
}),
|
||||
tx_counts: Default::default(),
|
||||
})
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Box::pin(mined_block_information_stream)
|
||||
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
||||
})
|
||||
}
|
||||
|
||||
fn provider(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
|
||||
{
|
||||
Box::pin(
|
||||
self.provider()
|
||||
.map(|provider| provider.map(|provider| provider.erased())),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PolkadotOmnichainNodeResolver {
|
||||
id: u32,
|
||||
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
|
||||
}
|
||||
|
||||
impl ResolverApi for PolkadotOmnichainNodeResolver {
|
||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||
fn chain_id(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
|
||||
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||
fn transaction_gas_price(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await?
|
||||
.context("Failed to get the transaction receipt")
|
||||
.map(|receipt| receipt.effective_gas_price)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||
fn block_gas_limit(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the eth-rpc block")?
|
||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||
.map(|block| block.header.gas_limit as _)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||
fn block_coinbase(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Address>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the eth-rpc block")?
|
||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||
.map(|block| block.header.beneficiary)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||
fn block_difficulty(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the eth-rpc block")?
|
||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||
fn block_base_fee(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u64>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the eth-rpc block")?
|
||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||
.and_then(|block| {
|
||||
block
|
||||
.header
|
||||
.base_fee_per_gas
|
||||
.context("Failed to get the base fee per gas")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||
fn block_hash(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockHash>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the eth-rpc block")?
|
||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||
.map(|block| block.header.hash)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||
fn block_timestamp(
|
||||
&self,
|
||||
number: BlockNumberOrTag,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockTimestamp>> + '_>> {
|
||||
Box::pin(async move {
|
||||
self.provider
|
||||
.get_block_by_number(number)
|
||||
.await
|
||||
.context("Failed to get the eth-rpc block")?
|
||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||
.map(|block| block.header.timestamp)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
|
||||
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for PolkadotOmnichainNode {
|
||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||
drop(self.polkadot_omnichain_node_process.take());
|
||||
drop(self.eth_rpc_process.take());
|
||||
|
||||
// Remove the node's database so that subsequent runs do not run on the same database. We
|
||||
// ignore the error just in case the directory didn't exist in the first place and therefore
|
||||
// there's nothing to be deleted.
|
||||
let _ = remove_dir_all(self.base_directory_path.join("data"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||
self.init(genesis)?.spawn_process()
|
||||
}
|
||||
|
||||
fn version(&self) -> anyhow::Result<String> {
|
||||
let output = Command::new(&self.polkadot_omnichain_node_binary_path)
|
||||
.arg("--version")
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()
|
||||
.context("Failed to spawn substrate --version")?
|
||||
.wait_with_output()
|
||||
.context("Failed to wait for substrate --version")?
|
||||
.stdout;
|
||||
Ok(String::from_utf8_lossy(&output).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PolkadotOmnichainNode {
|
||||
fn drop(&mut self) {
|
||||
self.shutdown().expect("Failed to shutdown")
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@ use std::{
|
||||
pin::Pin,
|
||||
process::{Command, Stdio},
|
||||
sync::{
|
||||
Arc,
|
||||
Arc, Mutex,
|
||||
atomic::{AtomicU32, Ordering},
|
||||
},
|
||||
time::Duration,
|
||||
@@ -32,7 +32,7 @@ use futures::{FutureExt, Stream, StreamExt};
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_common::fs::clear_directory;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
use serde_json::json;
|
||||
use serde_json::{Value, json};
|
||||
use sp_core::crypto::Ss58Codec;
|
||||
use sp_runtime::AccountId32;
|
||||
|
||||
@@ -47,16 +47,16 @@ use tracing::{instrument, trace};
|
||||
|
||||
use crate::{
|
||||
Node,
|
||||
constants::{CHAIN_ID, INITIAL_BALANCE},
|
||||
constants::INITIAL_BALANCE,
|
||||
helpers::{Process, ProcessReadinessWaitBehavior},
|
||||
provider_utils::{
|
||||
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
|
||||
execute_transaction,
|
||||
},
|
||||
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
|
||||
};
|
||||
|
||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||
|
||||
/// The number of blocks that should be cached by the revive-dev-node and the eth-rpc.
|
||||
const NUMBER_OF_CACHED_BLOCKS: u32 = 100_000;
|
||||
|
||||
/// A node implementation for Substrate based chains. Currently, this supports either substrate
|
||||
/// or the revive-dev-node which is done by changing the path and some of the other arguments passed
|
||||
/// to the command.
|
||||
@@ -76,6 +76,7 @@ pub struct SubstrateNode {
|
||||
nonce_manager: CachedNonceManager,
|
||||
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||
consensus: Option<String>,
|
||||
use_fallback_gas_filler: bool,
|
||||
}
|
||||
|
||||
impl SubstrateNode {
|
||||
@@ -102,6 +103,7 @@ impl SubstrateNode {
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ AsRef<WalletConfiguration>,
|
||||
existing_connection_strings: &[String],
|
||||
use_fallback_gas_filler: bool,
|
||||
) -> Self {
|
||||
let working_directory_path =
|
||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
|
||||
@@ -134,10 +136,13 @@ impl SubstrateNode {
|
||||
nonce_manager: Default::default(),
|
||||
provider: Default::default(),
|
||||
consensus,
|
||||
use_fallback_gas_filler,
|
||||
}
|
||||
}
|
||||
|
||||
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
|
||||
static CHAINSPEC_MUTEX: Mutex<Option<Value>> = Mutex::new(None);
|
||||
|
||||
if !self.rpc_url.is_empty() {
|
||||
return Ok(self);
|
||||
}
|
||||
@@ -156,12 +161,22 @@ impl SubstrateNode {
|
||||
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
|
||||
|
||||
trace!("Creating the node genesis");
|
||||
let chainspec_json = Self::node_genesis(
|
||||
&self.node_binary,
|
||||
&self.export_chainspec_command,
|
||||
&self.wallet,
|
||||
)
|
||||
.context("Failed to prepare the chainspec command")?;
|
||||
let chainspec_json = {
|
||||
let mut chainspec_mutex = CHAINSPEC_MUTEX.lock().expect("Poisoned");
|
||||
match chainspec_mutex.as_ref() {
|
||||
Some(chainspec_json) => chainspec_json.clone(),
|
||||
None => {
|
||||
let chainspec_json = Self::node_genesis(
|
||||
&self.node_binary,
|
||||
&self.export_chainspec_command,
|
||||
&self.wallet,
|
||||
)
|
||||
.context("Failed to prepare the chainspec command")?;
|
||||
*chainspec_mutex = Some(chainspec_json.clone());
|
||||
chainspec_json
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
trace!("Writing the node genesis");
|
||||
serde_json::to_writer_pretty(
|
||||
@@ -212,6 +227,8 @@ impl SubstrateNode {
|
||||
.arg(u32::MAX.to_string())
|
||||
.arg("--pool-kbytes")
|
||||
.arg(u32::MAX.to_string())
|
||||
.arg("--state-pruning")
|
||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
||||
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
|
||||
.stdout(stdout_file)
|
||||
.stderr(stderr_file);
|
||||
@@ -252,9 +269,9 @@ impl SubstrateNode {
|
||||
.arg("--rpc-max-connections")
|
||||
.arg(u32::MAX.to_string())
|
||||
.arg("--index-last-n-blocks")
|
||||
.arg(1_000u32.to_string())
|
||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
||||
.arg("--cache-size")
|
||||
.arg(1_000u32.to_string())
|
||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
||||
.env("RUST_LOG", Self::PROXY_LOG_ENV)
|
||||
.stdout(stdout_file)
|
||||
.stderr(stderr_file);
|
||||
@@ -307,8 +324,9 @@ impl SubstrateNode {
|
||||
.get_or_try_init(|| async move {
|
||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
||||
self.rpc_url.as_str(),
|
||||
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
|
||||
ChainIdFiller::new(Some(CHAIN_ID)),
|
||||
FallbackGasFiller::default()
|
||||
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
||||
ChainIdFiller::default(),
|
||||
NonceFiller::new(self.nonce_manager.clone()),
|
||||
self.wallet.clone(),
|
||||
)
|
||||
@@ -413,11 +431,15 @@ impl EthereumNode for SubstrateNode {
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create the provider")?;
|
||||
execute_transaction(provider, transaction).await
|
||||
.context("Failed to create provider for transaction submission")?
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.context("Encountered an error when submitting a transaction")?
|
||||
.get_receipt()
|
||||
.await
|
||||
.context("Failed to get the receipt for the transaction")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -808,6 +830,7 @@ mod tests {
|
||||
None,
|
||||
&context,
|
||||
&[],
|
||||
true,
|
||||
);
|
||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||
.expect("Failed to initialize the node")
|
||||
@@ -879,6 +902,7 @@ mod tests {
|
||||
None,
|
||||
&context,
|
||||
&[],
|
||||
true,
|
||||
);
|
||||
|
||||
// Call `init()`
|
||||
|
||||
@@ -76,10 +76,7 @@ use crate::{
|
||||
Node,
|
||||
constants::INITIAL_BALANCE,
|
||||
helpers::{Process, ProcessReadinessWaitBehavior},
|
||||
provider_utils::{
|
||||
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
|
||||
execute_transaction,
|
||||
},
|
||||
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
|
||||
};
|
||||
|
||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||
@@ -114,6 +111,8 @@ pub struct ZombienetNode {
|
||||
nonce_manager: CachedNonceManager,
|
||||
|
||||
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||
|
||||
use_fallback_gas_filler: bool,
|
||||
}
|
||||
|
||||
impl ZombienetNode {
|
||||
@@ -137,6 +136,7 @@ impl ZombienetNode {
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ AsRef<WalletConfiguration>,
|
||||
use_fallback_gas_filler: bool,
|
||||
) -> Self {
|
||||
let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context)
|
||||
.path
|
||||
@@ -164,6 +164,7 @@ impl ZombienetNode {
|
||||
connection_string: String::new(),
|
||||
node_rpc_port: None,
|
||||
provider: Default::default(),
|
||||
use_fallback_gas_filler,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -330,7 +331,8 @@ impl ZombienetNode {
|
||||
.get_or_try_init(|| async move {
|
||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
||||
self.connection_string.as_str(),
|
||||
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
|
||||
FallbackGasFiller::default()
|
||||
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
||||
ChainIdFiller::default(), // TODO: use CHAIN_ID constant
|
||||
NonceFiller::new(self.nonce_manager.clone()),
|
||||
self.wallet.clone(),
|
||||
@@ -428,14 +430,18 @@ impl EthereumNode for ZombienetNode {
|
||||
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
transaction: alloy::rpc::types::TransactionRequest,
|
||||
transaction: TransactionRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let provider = self
|
||||
.provider()
|
||||
self.provider()
|
||||
.await
|
||||
.context("Failed to create the provider")?;
|
||||
execute_transaction(provider, transaction).await
|
||||
.context("Failed to create provider for transaction submission")?
|
||||
.send_transaction(transaction)
|
||||
.await
|
||||
.context("Encountered an error when submitting a transaction")?
|
||||
.get_receipt()
|
||||
.await
|
||||
.context("Failed to get the receipt for the transaction")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -823,6 +829,7 @@ mod tests {
|
||||
let mut node = ZombienetNode::new(
|
||||
context.polkadot_parachain_configuration.path.clone(),
|
||||
&context,
|
||||
true,
|
||||
);
|
||||
let genesis = context.genesis_configuration.genesis().unwrap().clone();
|
||||
node.init(genesis).unwrap();
|
||||
@@ -856,7 +863,7 @@ mod tests {
|
||||
use utils::{new_node, test_config};
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored for the time being"]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
async fn test_transfer_transaction_should_return_receipt() {
|
||||
// Arrange
|
||||
let (ctx, node) = new_node().await;
|
||||
@@ -882,6 +889,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
fn print_eth_to_polkadot_mappings() {
|
||||
let eth_addresses = vec![
|
||||
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
|
||||
@@ -897,6 +905,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
fn test_eth_to_polkadot_address() {
|
||||
let cases = vec![
|
||||
(
|
||||
@@ -927,12 +936,14 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
fn eth_rpc_version_works() {
|
||||
// Arrange
|
||||
let context = test_config();
|
||||
let node = ZombienetNode::new(
|
||||
context.polkadot_parachain_configuration.path.clone(),
|
||||
&context,
|
||||
true,
|
||||
);
|
||||
|
||||
// Act
|
||||
@@ -946,12 +957,14 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
fn version_works() {
|
||||
// Arrange
|
||||
let context = test_config();
|
||||
let node = ZombienetNode::new(
|
||||
context.polkadot_parachain_configuration.path.clone(),
|
||||
&context,
|
||||
true,
|
||||
);
|
||||
|
||||
// Act
|
||||
@@ -965,7 +978,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
async fn get_chain_id_from_node_should_succeed() {
|
||||
// Arrange
|
||||
let node = shared_node().await;
|
||||
@@ -984,7 +997,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
async fn can_get_gas_limit_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node().await;
|
||||
@@ -1002,7 +1015,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
async fn can_get_coinbase_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node().await;
|
||||
@@ -1020,7 +1033,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
async fn can_get_block_difficulty_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node().await;
|
||||
@@ -1038,7 +1051,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
async fn can_get_block_hash_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node().await;
|
||||
@@ -1056,7 +1069,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
async fn can_get_block_timestamp_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node().await;
|
||||
@@ -1074,7 +1087,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "Ignored since they take a long time to run"]
|
||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
||||
async fn can_get_block_number_from_node() {
|
||||
// Arrange
|
||||
let node = shared_node().await;
|
||||
|
||||
@@ -1,42 +1,69 @@
|
||||
use alloy::{
|
||||
eips::BlockNumberOrTag,
|
||||
network::{Network, TransactionBuilder},
|
||||
providers::{
|
||||
Provider, SendableTx,
|
||||
fillers::{GasFiller, TxFiller},
|
||||
ext::DebugApi,
|
||||
fillers::{GasFillable, GasFiller, TxFiller},
|
||||
},
|
||||
transports::TransportResult,
|
||||
rpc::types::trace::geth::{
|
||||
GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions,
|
||||
GethDebugTracingOptions,
|
||||
},
|
||||
transports::{RpcError, TransportResult},
|
||||
};
|
||||
|
||||
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
|
||||
const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120;
|
||||
const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// An implementation of [`GasFiller`] with a fallback mechanism for reverting transactions.
|
||||
///
|
||||
/// This struct provides a fallback mechanism for alloy's [`GasFiller`] which kicks in when a
|
||||
/// transaction's dry run fails due to it reverting allowing us to get gas estimates even for
|
||||
/// failing transactions. In this codebase, this is very important since the MatterLabs tests
|
||||
/// expect some transactions in the test suite revert. Since we're expected to run a number of
|
||||
/// assertions on these reverting transactions we must commit them to the ledger.
|
||||
///
|
||||
/// Therefore, this struct does the following:
|
||||
///
|
||||
/// 1. It first attempts to estimate the gas through the mechanism implemented in the [`GasFiller`].
|
||||
/// 2. If it fails, then we perform a debug trace of the transaction to find out how much gas the
|
||||
/// transaction needs until it reverts.
|
||||
/// 3. We fill in these values (either the success or failure case) into the transaction.
|
||||
///
|
||||
/// The fallback mechanism of this filler can be completely disabled if we don't want it to be used.
|
||||
/// In that case, this gas filler will act in an identical way to alloy's [`GasFiller`].
|
||||
///
|
||||
/// We then fill in these values into the transaction.
|
||||
///
|
||||
/// The previous implementation of this fallback gas filler relied on making use of default values
|
||||
/// for the gas limit in order to be able to submit the reverting transactions to the network. But,
|
||||
/// it introduced a number of issues that we weren't anticipating at the time when it was built.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct FallbackGasFiller {
|
||||
/// The inner [`GasFiller`] which we pass all of the calls to in the happy path.
|
||||
inner: GasFiller,
|
||||
default_gas_limit: u64,
|
||||
default_max_fee_per_gas: u128,
|
||||
default_priority_fee: u128,
|
||||
|
||||
/// A [`bool`] that controls if the fallback mechanism is enabled or not.
|
||||
enable_fallback_mechanism: bool,
|
||||
}
|
||||
|
||||
impl FallbackGasFiller {
|
||||
pub fn new(
|
||||
default_gas_limit: u64,
|
||||
default_max_fee_per_gas: u128,
|
||||
default_priority_fee: u128,
|
||||
) -> Self {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: GasFiller,
|
||||
default_gas_limit,
|
||||
default_max_fee_per_gas,
|
||||
default_priority_fee,
|
||||
inner: Default::default(),
|
||||
enable_fallback_mechanism: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FallbackGasFiller {
|
||||
fn default() -> Self {
|
||||
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000)
|
||||
pub fn with_fallback_mechanism(mut self, enable: bool) -> Self {
|
||||
self.enable_fallback_mechanism = enable;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_fallback_mechanism_enabled(self) -> Self {
|
||||
self.with_fallback_mechanism(true)
|
||||
}
|
||||
|
||||
pub fn with_fallback_mechanism_disabled(self) -> Self {
|
||||
self.with_fallback_mechanism(false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,27 +71,84 @@ impl<N> TxFiller<N> for FallbackGasFiller
|
||||
where
|
||||
N: Network,
|
||||
{
|
||||
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
|
||||
type Fillable = <GasFiller as TxFiller<N>>::Fillable;
|
||||
|
||||
fn status(
|
||||
&self,
|
||||
tx: &<N as Network>::TransactionRequest,
|
||||
) -> alloy::providers::fillers::FillerControlFlow {
|
||||
<GasFiller as TxFiller<N>>::status(&self.inner, tx)
|
||||
TxFiller::<N>::status(&self.inner, tx)
|
||||
}
|
||||
|
||||
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
|
||||
fn fill_sync(&self, _: &mut SendableTx<N>) {}
|
||||
|
||||
async fn prepare<P: Provider<N>>(
|
||||
&self,
|
||||
provider: &P,
|
||||
tx: &<N as Network>::TransactionRequest,
|
||||
) -> TransportResult<Self::Fillable> {
|
||||
match self.inner.prepare(provider, tx).await {
|
||||
Ok(fill) => Ok(Some(fill)),
|
||||
Err(err) => {
|
||||
tracing::debug!(error = ?err, "Gas Provider Estimation Failed, using fallback");
|
||||
Ok(None)
|
||||
match (
|
||||
self.inner.prepare(provider, tx).await,
|
||||
self.enable_fallback_mechanism,
|
||||
) {
|
||||
// Return the same thing if either this calls succeeds, or if the call falls and the
|
||||
// fallback mechanism is disabled.
|
||||
(rtn @ Ok(..), ..) | (rtn @ Err(..), false) => rtn,
|
||||
(Err(..), true) => {
|
||||
// Perform a trace of the transaction.
|
||||
let trace = provider
|
||||
.debug_trace_call(
|
||||
tx.clone(),
|
||||
BlockNumberOrTag::Latest.into(),
|
||||
GethDebugTracingCallOptions {
|
||||
tracing_options: GethDebugTracingOptions {
|
||||
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
||||
GethDebugBuiltInTracerType::CallTracer,
|
||||
)),
|
||||
..Default::default()
|
||||
},
|
||||
state_overrides: Default::default(),
|
||||
block_overrides: Default::default(),
|
||||
tx_index: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?
|
||||
.try_into_call_frame()
|
||||
.map_err(|err| {
|
||||
RpcError::local_usage_str(
|
||||
format!("Expected a callframe trace, but got: {err:?}").as_str(),
|
||||
)
|
||||
})?;
|
||||
|
||||
let gas_used = u64::try_from(trace.gas_used).map_err(|_| {
|
||||
RpcError::local_usage_str(
|
||||
"Transaction trace returned a value of gas used that exceeds u64",
|
||||
)
|
||||
})?;
|
||||
let gas_limit = gas_used.saturating_mul(2);
|
||||
|
||||
if let Some(gas_price) = tx.gas_price() {
|
||||
return Ok(GasFillable::Legacy {
|
||||
gas_limit,
|
||||
gas_price,
|
||||
});
|
||||
}
|
||||
|
||||
let estimate = if let (Some(max_fee_per_gas), Some(max_priority_fee_per_gas)) =
|
||||
(tx.max_fee_per_gas(), tx.max_priority_fee_per_gas())
|
||||
{
|
||||
alloy::eips::eip1559::Eip1559Estimation {
|
||||
max_fee_per_gas,
|
||||
max_priority_fee_per_gas,
|
||||
}
|
||||
} else {
|
||||
provider.estimate_eip1559_fees().await?
|
||||
};
|
||||
|
||||
Ok(GasFillable::Eip1559 {
|
||||
gas_limit,
|
||||
estimate,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -72,27 +156,14 @@ where
|
||||
async fn fill(
|
||||
&self,
|
||||
fillable: Self::Fillable,
|
||||
mut tx: alloy::providers::SendableTx<N>,
|
||||
tx: SendableTx<N>,
|
||||
) -> TransportResult<SendableTx<N>> {
|
||||
if let Some(fill) = fillable {
|
||||
let mut tx = self.inner.fill(fill, tx).await?;
|
||||
if let Some(builder) = tx.as_mut_builder() {
|
||||
if let Some(estimated) = builder.gas_limit() {
|
||||
let padded = estimated
|
||||
.checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR)
|
||||
.and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR))
|
||||
.unwrap_or(u64::MAX);
|
||||
builder.set_gas_limit(padded);
|
||||
}
|
||||
}
|
||||
Ok(tx)
|
||||
} else {
|
||||
if let Some(builder) = tx.as_mut_builder() {
|
||||
builder.set_gas_limit(self.default_gas_limit);
|
||||
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
|
||||
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
|
||||
}
|
||||
Ok(tx)
|
||||
}
|
||||
self.inner.fill(fillable, tx).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FallbackGasFiller {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
mod concurrency_limiter;
|
||||
mod fallback_gas_filler;
|
||||
mod provider;
|
||||
mod receipt_retry_layer;
|
||||
|
||||
pub use concurrency_limiter::*;
|
||||
pub use fallback_gas_filler::*;
|
||||
pub use provider::*;
|
||||
pub use receipt_retry_layer::*;
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use alloy::{
|
||||
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
|
||||
network::{Network, NetworkWallet, TransactionBuilder4844},
|
||||
providers::{
|
||||
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
|
||||
Identity, ProviderBuilder, RootProvider,
|
||||
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
||||
},
|
||||
rpc::client::ClientBuilder,
|
||||
};
|
||||
use anyhow::{Context, Result};
|
||||
use revive_dt_common::futures::{PollingWaitBehavior, poll};
|
||||
use tracing::{Instrument, debug, info, info_span};
|
||||
|
||||
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
|
||||
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller, RetryLayer};
|
||||
|
||||
pub type ConcreteProvider<N, W> = FillProvider<
|
||||
JoinFill<
|
||||
@@ -48,6 +46,7 @@ where
|
||||
|
||||
let client = ClientBuilder::default()
|
||||
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
||||
.layer(RetryLayer::default())
|
||||
.connect(rpc_url)
|
||||
.await
|
||||
.context("Failed to construct the RPC client")?;
|
||||
@@ -63,70 +62,3 @@ where
|
||||
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
pub async fn execute_transaction<N, W>(
|
||||
provider: ConcreteProvider<N, W>,
|
||||
transaction: N::TransactionRequest,
|
||||
) -> Result<N::ReceiptResponse>
|
||||
where
|
||||
N: Network<
|
||||
TransactionRequest: TransactionBuilder4844,
|
||||
TxEnvelope = <Ethereum as Network>::TxEnvelope,
|
||||
>,
|
||||
W: NetworkWallet<N>,
|
||||
Identity: TxFiller<N>,
|
||||
FallbackGasFiller: TxFiller<N>,
|
||||
ChainIdFiller: TxFiller<N>,
|
||||
NonceFiller: TxFiller<N>,
|
||||
WalletFiller<W>: TxFiller<N>,
|
||||
{
|
||||
let sendable_transaction = provider
|
||||
.fill(transaction)
|
||||
.await
|
||||
.context("Failed to fill transaction")?;
|
||||
|
||||
let transaction_envelope = sendable_transaction
|
||||
.try_into_envelope()
|
||||
.context("Failed to convert transaction into an envelope")?;
|
||||
let tx_hash = *transaction_envelope.tx_hash();
|
||||
|
||||
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
|
||||
Ok(pending_transaction) => pending_transaction,
|
||||
Err(error) => {
|
||||
let error_string = error.to_string();
|
||||
|
||||
if error_string.contains("Transaction Already Imported") {
|
||||
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
|
||||
} else {
|
||||
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
|
||||
}
|
||||
}
|
||||
};
|
||||
debug!(%tx_hash, "Submitted Transaction");
|
||||
|
||||
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
|
||||
let tx_hash = pending_transaction.watch().await.context(format!(
|
||||
"Transaction inclusion watching timeout for {tx_hash}"
|
||||
))?;
|
||||
|
||||
poll(
|
||||
Duration::from_secs(60),
|
||||
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|
||||
|| {
|
||||
let provider = provider.clone();
|
||||
|
||||
async move {
|
||||
match provider.get_transaction_receipt(tx_hash).await {
|
||||
Ok(Some(receipt)) => {
|
||||
info!("Found the transaction receipt");
|
||||
Ok(ControlFlow::Break(receipt))
|
||||
}
|
||||
_ => Ok(ControlFlow::Continue(())),
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
.instrument(info_span!("Polling for receipt", %tx_hash))
|
||||
.await
|
||||
.context(format!("Polling for receipt failed for {tx_hash}"))
|
||||
}
|
||||
|
||||
@@ -0,0 +1,158 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use alloy::{
|
||||
network::{AnyNetwork, Network},
|
||||
rpc::json_rpc::{RequestPacket, ResponsePacket},
|
||||
transports::{TransportError, TransportErrorKind, TransportFut},
|
||||
};
|
||||
use tokio::time::{interval, timeout};
|
||||
use tower::{Layer, Service};
|
||||
|
||||
/// A layer that allows for automatic retries for getting the receipt.
|
||||
///
|
||||
/// There are certain cases where getting the receipt of a committed transaction might fail. In Geth
|
||||
/// this can happen if the transaction has been committed to the ledger but has not been indexed, in
|
||||
/// the substrate and revive stack it can also happen for other reasons.
|
||||
///
|
||||
/// Therefore, just because the first attempt to get the receipt (after transaction confirmation)
|
||||
/// has failed it doesn't mean that it will continue to fail. This layer can be added to any alloy
|
||||
/// provider to allow the provider to retry getting the receipt for some period of time before it
|
||||
/// considers that a timeout. It attempts to poll for the receipt for the `polling_duration` with an
|
||||
/// interval of `polling_interval` between each poll. If by the end of the `polling_duration` it was
|
||||
/// not able to get the receipt successfully then this is considered to be a timeout.
|
||||
///
|
||||
/// Additionally, this layer allows for retries for other rpc methods such as all tracing methods.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct RetryLayer {
|
||||
/// The amount of time to keep polling for the receipt before considering it a timeout.
|
||||
polling_duration: Duration,
|
||||
|
||||
/// The interval of time to wait between each poll for the receipt.
|
||||
polling_interval: Duration,
|
||||
}
|
||||
|
||||
impl RetryLayer {
|
||||
pub fn new(polling_duration: Duration, polling_interval: Duration) -> Self {
|
||||
Self {
|
||||
polling_duration,
|
||||
polling_interval,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_polling_duration(mut self, polling_duration: Duration) -> Self {
|
||||
self.polling_duration = polling_duration;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_polling_interval(mut self, polling_interval: Duration) -> Self {
|
||||
self.polling_interval = polling_interval;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RetryLayer {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
polling_duration: Duration::from_secs(90),
|
||||
polling_interval: Duration::from_millis(500),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Layer<S> for RetryLayer {
|
||||
type Service = RetryService<S>;
|
||||
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
RetryService {
|
||||
service: inner,
|
||||
polling_duration: self.polling_duration,
|
||||
polling_interval: self.polling_interval,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct RetryService<S> {
|
||||
/// The internal service.
|
||||
service: S,
|
||||
|
||||
/// The amount of time to keep polling for the receipt before considering it a timeout.
|
||||
polling_duration: Duration,
|
||||
|
||||
/// The interval of time to wait between each poll for the receipt.
|
||||
polling_interval: Duration,
|
||||
}
|
||||
|
||||
impl<S> Service<RequestPacket> for RetryService<S>
|
||||
where
|
||||
S: Service<RequestPacket, Future = TransportFut<'static>, Error = TransportError>
|
||||
+ Send
|
||||
+ 'static
|
||||
+ Clone,
|
||||
{
|
||||
type Response = ResponsePacket;
|
||||
type Error = TransportError;
|
||||
type Future = TransportFut<'static>;
|
||||
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
self.service.poll_ready(cx)
|
||||
}
|
||||
|
||||
#[allow(clippy::nonminimal_bool)]
|
||||
fn call(&mut self, req: RequestPacket) -> Self::Future {
|
||||
type ReceiptOutput = <AnyNetwork as Network>::ReceiptResponse;
|
||||
|
||||
let mut service = self.service.clone();
|
||||
let polling_interval = self.polling_interval;
|
||||
let polling_duration = self.polling_duration;
|
||||
|
||||
Box::pin(async move {
|
||||
let request = req.as_single().ok_or_else(|| {
|
||||
TransportErrorKind::custom_str("Retry layer doesn't support batch requests")
|
||||
})?;
|
||||
let method = request.method();
|
||||
let requires_retries = method == "eth_getTransactionReceipt"
|
||||
|| (method.contains("debug") && method.contains("trace"));
|
||||
|
||||
if !requires_retries {
|
||||
return service.call(req).await;
|
||||
}
|
||||
|
||||
timeout(polling_duration, async {
|
||||
let mut interval = interval(polling_interval);
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
let Ok(resp) = service.call(req.clone()).await else {
|
||||
continue;
|
||||
};
|
||||
let response = resp.as_single().expect("Can't fail");
|
||||
if response.is_error() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if method == "eth_getTransactionReceipt"
|
||||
&& response
|
||||
.payload()
|
||||
.clone()
|
||||
.deserialize_success::<ReceiptOutput>()
|
||||
.ok()
|
||||
.and_then(|resp| resp.try_into_success().ok())
|
||||
.is_some()
|
||||
|| method != "eth_getTransactionReceipt"
|
||||
{
|
||||
return resp;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
.map_err(|_| TransportErrorKind::custom_str("Timeout when retrying request"))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "revive-dt-report-processor"
|
||||
description = "revive differential testing report processor utility"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "report-processor"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
revive-dt-report = { workspace = true }
|
||||
revive-dt-common = { workspace = true }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -0,0 +1,329 @@
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
fmt::Display,
|
||||
fs::{File, OpenOptions},
|
||||
ops::{Deref, DerefMut},
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use anyhow::{Context as _, Error, Result, bail};
|
||||
use clap::Parser;
|
||||
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
||||
|
||||
use revive_dt_common::types::{Mode, ParsedTestSpecifier};
|
||||
use revive_dt_report::{Report, TestCaseStatus};
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let cli = Cli::try_parse().context("Failed to parse the CLI arguments")?;
|
||||
|
||||
match cli {
|
||||
Cli::GenerateExpectationsFile {
|
||||
report_path,
|
||||
output_path: output_file,
|
||||
remove_prefix,
|
||||
} => {
|
||||
let remove_prefix = remove_prefix
|
||||
.into_iter()
|
||||
.map(|path| path.canonicalize().context("Failed to canonicalize path"))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let expectations = report_path
|
||||
.execution_information
|
||||
.iter()
|
||||
.flat_map(|(metadata_file_path, metadata_file_report)| {
|
||||
metadata_file_report
|
||||
.case_reports
|
||||
.iter()
|
||||
.map(move |(case_idx, case_report)| {
|
||||
(metadata_file_path, case_idx, case_report)
|
||||
})
|
||||
})
|
||||
.flat_map(|(metadata_file_path, case_idx, case_report)| {
|
||||
case_report.mode_execution_reports.iter().map(
|
||||
move |(mode, execution_report)| {
|
||||
(
|
||||
metadata_file_path,
|
||||
case_idx,
|
||||
mode,
|
||||
execution_report.status.as_ref(),
|
||||
)
|
||||
},
|
||||
)
|
||||
})
|
||||
.filter_map(|(metadata_file_path, case_idx, mode, status)| {
|
||||
status.map(|status| (metadata_file_path, case_idx, mode, status))
|
||||
})
|
||||
.map(|(metadata_file_path, case_idx, mode, status)| {
|
||||
(
|
||||
TestSpecifier {
|
||||
metadata_file_path: Cow::Borrowed(
|
||||
remove_prefix
|
||||
.iter()
|
||||
.filter_map(|prefix| {
|
||||
metadata_file_path.as_inner().strip_prefix(prefix).ok()
|
||||
})
|
||||
.next()
|
||||
.unwrap_or(metadata_file_path.as_inner()),
|
||||
),
|
||||
case_idx: case_idx.into_inner(),
|
||||
mode: Cow::Borrowed(mode),
|
||||
},
|
||||
Status::from(status),
|
||||
)
|
||||
})
|
||||
.filter(|(_, status)| *status == Status::Failed)
|
||||
.collect::<Expectations>();
|
||||
|
||||
let output_file = OpenOptions::new()
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(output_file)
|
||||
.context("Failed to create the output file")?;
|
||||
serde_json::to_writer_pretty(output_file, &expectations)
|
||||
.context("Failed to write the expectations to file")?;
|
||||
}
|
||||
Cli::CompareExpectationFiles {
|
||||
base_expectation_path,
|
||||
other_expectation_path,
|
||||
} => {
|
||||
let keys = base_expectation_path
|
||||
.keys()
|
||||
.chain(other_expectation_path.keys())
|
||||
.collect::<BTreeSet<_>>();
|
||||
|
||||
for key in keys {
|
||||
let base_status = base_expectation_path.get(key).context(format!(
|
||||
"Entry not found in the base expectations: \"{}\"",
|
||||
key
|
||||
))?;
|
||||
let other_status = other_expectation_path.get(key).context(format!(
|
||||
"Entry not found in the other expectations: \"{}\"",
|
||||
key
|
||||
))?;
|
||||
|
||||
if base_status != other_status {
|
||||
bail!(
|
||||
"Expectations for entry \"{}\" have changed. They were {:?} and now they are {:?}",
|
||||
key,
|
||||
base_status,
|
||||
other_status
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
type Expectations<'a> = BTreeMap<TestSpecifier<'a>, Status>;
|
||||
|
||||
/// A tool that's used to process the reports generated by the retester binary in various ways.
|
||||
#[derive(Clone, Debug, Parser)]
|
||||
#[command(name = "retester", term_width = 100)]
|
||||
pub enum Cli {
|
||||
/// Generates an expectation file out of a given report.
|
||||
GenerateExpectationsFile {
|
||||
/// The path of the report's JSON file to generate the expectation's file for.
|
||||
#[clap(long)]
|
||||
report_path: JsonFile<Report>,
|
||||
|
||||
/// The path of the output file to generate.
|
||||
///
|
||||
/// Note that we expect that:
|
||||
/// 1. The provided path points to a JSON file.
|
||||
/// 1. The ancestor's of the provided path already exist such that no directory creations
|
||||
/// are required.
|
||||
#[clap(long)]
|
||||
output_path: PathBuf,
|
||||
|
||||
/// Prefix paths to remove from the paths in the final expectations file.
|
||||
#[clap(long)]
|
||||
remove_prefix: Vec<PathBuf>,
|
||||
},
|
||||
|
||||
/// Compares two expectation files to ensure that they match each other.
|
||||
CompareExpectationFiles {
|
||||
/// The path of the base expectation file.
|
||||
#[clap(long)]
|
||||
base_expectation_path: JsonFile<Expectations<'static>>,
|
||||
|
||||
/// The path of the other expectation file.
|
||||
#[clap(long)]
|
||||
other_expectation_path: JsonFile<Expectations<'static>>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
pub enum Status {
|
||||
Succeeded,
|
||||
Failed,
|
||||
Ignored,
|
||||
}
|
||||
|
||||
impl From<TestCaseStatus> for Status {
|
||||
fn from(value: TestCaseStatus) -> Self {
|
||||
match value {
|
||||
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
|
||||
TestCaseStatus::Failed { .. } => Self::Failed,
|
||||
TestCaseStatus::Ignored { .. } => Self::Ignored,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a TestCaseStatus> for Status {
|
||||
fn from(value: &'a TestCaseStatus) -> Self {
|
||||
match value {
|
||||
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
|
||||
TestCaseStatus::Failed { .. } => Self::Failed,
|
||||
TestCaseStatus::Ignored { .. } => Self::Ignored,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct JsonFile<T> {
|
||||
path: PathBuf,
|
||||
content: Box<T>,
|
||||
}
|
||||
|
||||
impl<T> Deref for JsonFile<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.content
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for JsonFile<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.content
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FromStr for JsonFile<T>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
{
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let path = PathBuf::from(s);
|
||||
let file = File::open(&path).context("Failed to open the file")?;
|
||||
serde_json::from_reader(&file)
|
||||
.map(|content| Self { path, content })
|
||||
.context(format!(
|
||||
"Failed to deserialize file's content as {}",
|
||||
std::any::type_name::<T>()
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Display for JsonFile<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Display::fmt(&self.path.display(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<JsonFile<T>> for String {
|
||||
fn from(value: JsonFile<T>) -> Self {
|
||||
value.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct TestSpecifier<'a> {
|
||||
pub metadata_file_path: Cow<'a, Path>,
|
||||
pub case_idx: usize,
|
||||
pub mode: Cow<'a, Mode>,
|
||||
}
|
||||
|
||||
impl<'a> Display for TestSpecifier<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}::{}::{}",
|
||||
self.metadata_file_path.display(),
|
||||
self.case_idx,
|
||||
self.mode
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<TestSpecifier<'a>> for ParsedTestSpecifier {
|
||||
fn from(
|
||||
TestSpecifier {
|
||||
metadata_file_path,
|
||||
case_idx,
|
||||
mode,
|
||||
}: TestSpecifier,
|
||||
) -> Self {
|
||||
Self::CaseWithMode {
|
||||
metadata_file_path: metadata_file_path.to_path_buf(),
|
||||
case_idx,
|
||||
mode: mode.into_owned(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<ParsedTestSpecifier> for TestSpecifier<'static> {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: ParsedTestSpecifier) -> Result<Self> {
|
||||
let ParsedTestSpecifier::CaseWithMode {
|
||||
metadata_file_path,
|
||||
case_idx,
|
||||
mode,
|
||||
} = value
|
||||
else {
|
||||
bail!("Expected a full test case specifier")
|
||||
};
|
||||
Ok(Self {
|
||||
metadata_file_path: Cow::Owned(metadata_file_path),
|
||||
case_idx,
|
||||
mode: Cow::Owned(mode),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Serialize for TestSpecifier<'a> {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
self.to_string().serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'d, 'a> Deserialize<'d> for TestSpecifier<'a> {
|
||||
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'d>,
|
||||
{
|
||||
let string = String::deserialize(deserializer)?;
|
||||
let mut splitted = string.split("::");
|
||||
let (Some(metadata_file_path), Some(case_idx), Some(mode), None) = (
|
||||
splitted.next(),
|
||||
splitted.next(),
|
||||
splitted.next(),
|
||||
splitted.next(),
|
||||
) else {
|
||||
return Err(serde::de::Error::custom(
|
||||
"Test specifier doesn't contain the components required",
|
||||
));
|
||||
};
|
||||
let metadata_file_path = PathBuf::from(metadata_file_path);
|
||||
let case_idx = usize::from_str(case_idx)
|
||||
.map_err(|_| serde::de::Error::custom("Case idx is not a usize"))?;
|
||||
let mode = Mode::from_str(mode).map_err(|_| serde::de::Error::custom("Invalid mode"))?;
|
||||
|
||||
Ok(Self {
|
||||
metadata_file_path: Cow::Owned(metadata_file_path),
|
||||
case_idx,
|
||||
mode: Cow::Owned(mode),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -36,6 +36,8 @@ pub struct ReportAggregator {
|
||||
runner_tx: Option<UnboundedSender<RunnerEvent>>,
|
||||
runner_rx: UnboundedReceiver<RunnerEvent>,
|
||||
listener_tx: Sender<ReporterEvent>,
|
||||
/* Context */
|
||||
file_name: Option<String>,
|
||||
}
|
||||
|
||||
impl ReportAggregator {
|
||||
@@ -43,6 +45,11 @@ impl ReportAggregator {
|
||||
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||
let (listener_tx, _) = channel::<ReporterEvent>(0xFFFF);
|
||||
Self {
|
||||
file_name: match context {
|
||||
Context::Test(ref context) => context.report_configuration.file_name.clone(),
|
||||
Context::Benchmark(ref context) => context.report_configuration.file_name.clone(),
|
||||
Context::ExportJsonSchema | Context::ExportGenesis(..) => None,
|
||||
},
|
||||
report: Report::new(context),
|
||||
remaining_cases: Default::default(),
|
||||
runner_tx: Some(runner_tx),
|
||||
@@ -51,7 +58,7 @@ impl ReportAggregator {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
|
||||
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<Report>>) {
|
||||
let reporter = self
|
||||
.runner_tx
|
||||
.take()
|
||||
@@ -60,7 +67,7 @@ impl ReportAggregator {
|
||||
(reporter, async move { self.aggregate().await })
|
||||
}
|
||||
|
||||
async fn aggregate(mut self) -> Result<()> {
|
||||
async fn aggregate(mut self) -> Result<Report> {
|
||||
debug!("Starting to aggregate report");
|
||||
|
||||
while let Some(event) = self.runner_rx.recv().await {
|
||||
@@ -121,7 +128,7 @@ impl ReportAggregator {
|
||||
self.handle_completion(CompletionEvent {});
|
||||
debug!("Report aggregation completed");
|
||||
|
||||
let file_name = {
|
||||
let default_file_name = {
|
||||
let current_timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
|
||||
@@ -130,6 +137,7 @@ impl ReportAggregator {
|
||||
file_name.push_str(".json");
|
||||
file_name
|
||||
};
|
||||
let file_name = self.file_name.unwrap_or(default_file_name);
|
||||
let file_path = self
|
||||
.report
|
||||
.context
|
||||
@@ -152,7 +160,7 @@ impl ReportAggregator {
|
||||
format!("Failed to serialize report JSON to {}", file_path.display())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
Ok(self.report)
|
||||
}
|
||||
|
||||
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
|
||||
@@ -562,7 +570,7 @@ pub struct Report {
|
||||
/// The list of metadata files that were found by the tool.
|
||||
pub metadata_files: BTreeSet<MetadataFilePath>,
|
||||
/// Metrics from the execution.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub metrics: Option<Metrics>,
|
||||
/// Information relating to each test case.
|
||||
pub execution_information: BTreeMap<MetadataFilePath, MetadataFileReport>,
|
||||
@@ -582,7 +590,7 @@ impl Report {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||
pub struct MetadataFileReport {
|
||||
/// Metrics from the execution.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub metrics: Option<Metrics>,
|
||||
/// The report of each case keyed by the case idx.
|
||||
pub case_reports: BTreeMap<CaseIdx, CaseReport>,
|
||||
@@ -592,7 +600,7 @@ pub struct MetadataFileReport {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||
pub struct CaseReport {
|
||||
/// Metrics from the execution.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub metrics: Option<Metrics>,
|
||||
/// The [`ExecutionReport`] for each one of the [`Mode`]s.
|
||||
#[serde_as(as = "HashMap<DisplayFromStr, _>")]
|
||||
@@ -602,31 +610,31 @@ pub struct CaseReport {
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||
pub struct ExecutionReport {
|
||||
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<TestCaseStatus>,
|
||||
/// Metrics from the execution.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub metrics: Option<Metrics>,
|
||||
/// Information related to the execution on one of the platforms.
|
||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub platform_execution: PlatformKeyedInformation<Option<ExecutionInformation>>,
|
||||
/// Information on the compiled contracts.
|
||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub compiled_contracts: BTreeMap<PathBuf, BTreeMap<String, ContractInformation>>,
|
||||
/// The addresses of the deployed contracts
|
||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub contract_addresses: BTreeMap<ContractInstance, PlatformKeyedInformation<Vec<Address>>>,
|
||||
/// Information on the mined blocks as part of this execution.
|
||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub mined_block_information: PlatformKeyedInformation<Vec<MinedBlockInformation>>,
|
||||
/// Information tracked for each step that was executed.
|
||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||
pub steps: BTreeMap<StepPath, StepReport>,
|
||||
}
|
||||
|
||||
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||
/// it was ignored.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(tag = "status")]
|
||||
pub enum TestCaseStatus {
|
||||
/// The test case succeeded.
|
||||
@@ -664,19 +672,19 @@ pub struct TestCaseNodeInformation {
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct ExecutionInformation {
|
||||
/// Information related to the node assigned to this test case.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub node: Option<TestCaseNodeInformation>,
|
||||
/// Information on the pre-link compiled contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub pre_link_compilation_status: Option<CompilationStatus>,
|
||||
/// Information on the post-link compiled contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub post_link_compilation_status: Option<CompilationStatus>,
|
||||
/// Information on the deployed libraries.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
|
||||
/// Information on the deployed contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
|
||||
}
|
||||
|
||||
@@ -695,11 +703,11 @@ pub enum CompilationStatus {
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
/// The output of the compiler. This is only included if the appropriate flag is set in the
|
||||
/// CLI contexts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
compiler_output: Option<CompilerOutput>,
|
||||
},
|
||||
/// The compilation failed.
|
||||
@@ -707,15 +715,15 @@ pub enum CompilationStatus {
|
||||
/// The failure reason.
|
||||
reason: String,
|
||||
/// The version of the compiler used to compile the contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
compiler_version: Option<Version>,
|
||||
/// The path of the compiler used to compile the contracts.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
compiler_path: Option<PathBuf>,
|
||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||
/// the compiler was invoked.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
compiler_input: Option<CompilerInput>,
|
||||
},
|
||||
}
|
||||
@@ -743,24 +751,24 @@ pub struct Metrics {
|
||||
pub gas_per_second: Metric<u64>,
|
||||
/* Block Fullness */
|
||||
pub gas_block_fullness: Metric<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub ref_time_block_fullness: Option<Metric<u64>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub proof_size_block_fullness: Option<Metric<u64>>,
|
||||
}
|
||||
|
||||
/// The data that we store for a given metric (e.g., TPS).
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct Metric<T> {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub minimum: Option<PlatformKeyedInformation<T>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub maximum: Option<PlatformKeyedInformation<T>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub mean: Option<PlatformKeyedInformation<T>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub median: Option<PlatformKeyedInformation<T>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub raw: Option<PlatformKeyedInformation<Vec<T>>>,
|
||||
}
|
||||
|
||||
|
||||
-1
Submodule polkadot-sdk deleted from 45a0ea734f
+1
-1
Submodule resolc-compiler-tests updated: 40ffa2b839...55da34c4f6
@@ -28,7 +28,7 @@ from __future__ import annotations
|
||||
import json
|
||||
import sys
|
||||
import csv
|
||||
from typing import List, Mapping, TypedDict
|
||||
from typing import List, Mapping, TypedDict, no_type_check
|
||||
|
||||
|
||||
class EthereumMinedBlockInformation(TypedDict):
|
||||
@@ -69,7 +69,43 @@ class MinedBlockInformation(TypedDict):
|
||||
"""Block-level information for a mined block with both EVM and optional Substrate fields."""
|
||||
|
||||
ethereum_block_information: EthereumMinedBlockInformation
|
||||
substrate_block_information: SubstrateMinedBlockInformation
|
||||
substrate_block_information: SubstrateMinedBlockInformation | None
|
||||
|
||||
|
||||
def substrate_block_information_ref_time(
|
||||
block: SubstrateMinedBlockInformation | None,
|
||||
) -> int | None:
|
||||
if block is None:
|
||||
return None
|
||||
else:
|
||||
return block["ref_time"]
|
||||
|
||||
|
||||
def substrate_block_information_max_ref_time(
|
||||
block: SubstrateMinedBlockInformation | None,
|
||||
) -> int | None:
|
||||
if block is None:
|
||||
return None
|
||||
else:
|
||||
return block["max_ref_time"]
|
||||
|
||||
|
||||
def substrate_block_information_proof_size(
|
||||
block: SubstrateMinedBlockInformation | None,
|
||||
) -> int | None:
|
||||
if block is None:
|
||||
return None
|
||||
else:
|
||||
return block["proof_size"]
|
||||
|
||||
|
||||
def substrate_block_information_max_proof_size(
|
||||
block: SubstrateMinedBlockInformation | None,
|
||||
) -> int | None:
|
||||
if block is None:
|
||||
return None
|
||||
else:
|
||||
return block["max_proof_size"]
|
||||
|
||||
|
||||
class Metric(TypedDict):
|
||||
@@ -100,8 +136,19 @@ class Metrics(TypedDict):
|
||||
transaction_per_second: Metric
|
||||
gas_per_second: Metric
|
||||
gas_block_fullness: Metric
|
||||
ref_time_block_fullness: Metric
|
||||
proof_size_block_fullness: Metric
|
||||
ref_time_block_fullness: Metric | None
|
||||
proof_size_block_fullness: Metric | None
|
||||
|
||||
|
||||
@no_type_check
|
||||
def metrics_raw_item(
|
||||
metrics: Metrics, name: str, target: str, index: int
|
||||
) -> int | None:
|
||||
l: list[int] = metrics.get(name, dict()).get("raw", dict()).get(target, dict())
|
||||
try:
|
||||
return l[index]
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
class ExecutionReport(TypedDict):
|
||||
@@ -144,12 +191,15 @@ BlockInformation = TypedDict(
|
||||
"Transaction Count": int,
|
||||
"TPS": int | None,
|
||||
"GPS": int | None,
|
||||
"Ref Time": int,
|
||||
"Max Ref Time": int,
|
||||
"Block Fullness Ref Time": int,
|
||||
"Proof Size": int,
|
||||
"Max Proof Size": int,
|
||||
"Block Fullness Proof Size": int,
|
||||
"Gas Mined": int,
|
||||
"Block Gas Limit": int,
|
||||
"Block Fullness Gas": float,
|
||||
"Ref Time": int | None,
|
||||
"Max Ref Time": int | None,
|
||||
"Block Fullness Ref Time": int | None,
|
||||
"Proof Size": int | None,
|
||||
"Max Proof Size": int | None,
|
||||
"Block Fullness Proof Size": int | None,
|
||||
},
|
||||
)
|
||||
"""A typed dictionary used to hold all of the block information"""
|
||||
@@ -175,7 +225,7 @@ def main() -> None:
|
||||
report: ReportRoot = load_report(report_path)
|
||||
|
||||
# TODO: Remove this in the future, but for now, the target is fixed.
|
||||
target: str = "revive-dev-node-revm-solc"
|
||||
target: str = sys.argv[2]
|
||||
|
||||
csv_writer = csv.writer(sys.stdout)
|
||||
|
||||
@@ -188,6 +238,12 @@ def main() -> None:
|
||||
|
||||
resolved_blocks: list[BlockInformation] = []
|
||||
for i, block_information in enumerate(blocks_information):
|
||||
mined_gas: int = block_information["ethereum_block_information"][
|
||||
"mined_gas"
|
||||
]
|
||||
block_gas_limit: int = block_information[
|
||||
"ethereum_block_information"
|
||||
]["block_gas_limit"]
|
||||
resolved_blocks.append(
|
||||
{
|
||||
"Block Number": block_information[
|
||||
@@ -216,24 +272,37 @@ def main() -> None:
|
||||
"raw"
|
||||
][target][i - 1]
|
||||
),
|
||||
"Ref Time": block_information[
|
||||
"substrate_block_information"
|
||||
]["ref_time"],
|
||||
"Max Ref Time": block_information[
|
||||
"substrate_block_information"
|
||||
]["max_ref_time"],
|
||||
"Block Fullness Ref Time": execution_report["metrics"][
|
||||
"ref_time_block_fullness"
|
||||
]["raw"][target][i],
|
||||
"Proof Size": block_information[
|
||||
"substrate_block_information"
|
||||
]["proof_size"],
|
||||
"Max Proof Size": block_information[
|
||||
"substrate_block_information"
|
||||
]["max_proof_size"],
|
||||
"Block Fullness Proof Size": execution_report["metrics"][
|
||||
"proof_size_block_fullness"
|
||||
]["raw"][target][i],
|
||||
"Gas Mined": block_information[
|
||||
"ethereum_block_information"
|
||||
]["mined_gas"],
|
||||
"Block Gas Limit": block_information[
|
||||
"ethereum_block_information"
|
||||
]["block_gas_limit"],
|
||||
"Block Fullness Gas": mined_gas / block_gas_limit,
|
||||
"Ref Time": substrate_block_information_ref_time(
|
||||
block_information["substrate_block_information"]
|
||||
),
|
||||
"Max Ref Time": substrate_block_information_max_ref_time(
|
||||
block_information["substrate_block_information"]
|
||||
),
|
||||
"Block Fullness Ref Time": metrics_raw_item(
|
||||
execution_report["metrics"],
|
||||
"ref_time_block_fullness",
|
||||
target,
|
||||
i,
|
||||
),
|
||||
"Proof Size": substrate_block_information_proof_size(
|
||||
block_information["substrate_block_information"]
|
||||
),
|
||||
"Max Proof Size": substrate_block_information_max_proof_size(
|
||||
block_information["substrate_block_information"]
|
||||
),
|
||||
"Block Fullness Proof Size": metrics_raw_item(
|
||||
execution_report["metrics"],
|
||||
"proof_size_block_fullness",
|
||||
target,
|
||||
i,
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -5,51 +5,54 @@ CI. The full models used in the JSON report can be found in the revive different
|
||||
the models used in this script are just a partial reproduction of the full report models.
|
||||
"""
|
||||
|
||||
from typing import TypedDict, Literal, Union
|
||||
|
||||
import json, io
|
||||
import json, typing, io, sys
|
||||
|
||||
|
||||
class Report(TypedDict):
|
||||
class Report(typing.TypedDict):
|
||||
context: "Context"
|
||||
execution_information: dict[
|
||||
"MetadataFilePathString",
|
||||
dict["ModeString", dict["CaseIdxString", "CaseReport"]],
|
||||
]
|
||||
execution_information: dict["MetadataFilePathString", "MetadataFileReport"]
|
||||
|
||||
|
||||
class Context(TypedDict):
|
||||
class MetadataFileReport(typing.TypedDict):
|
||||
case_reports: dict["CaseIdxString", "CaseReport"]
|
||||
|
||||
|
||||
class CaseReport(typing.TypedDict):
|
||||
mode_execution_reports: dict["ModeString", "ExecutionReport"]
|
||||
|
||||
|
||||
class ExecutionReport(typing.TypedDict):
|
||||
status: "TestCaseStatus"
|
||||
|
||||
|
||||
class Context(typing.TypedDict):
|
||||
Test: "TestContext"
|
||||
|
||||
|
||||
class TestContext(TypedDict):
|
||||
class TestContext(typing.TypedDict):
|
||||
corpus_configuration: "CorpusConfiguration"
|
||||
|
||||
|
||||
class CorpusConfiguration(TypedDict):
|
||||
class CorpusConfiguration(typing.TypedDict):
|
||||
test_specifiers: list["TestSpecifier"]
|
||||
|
||||
|
||||
class CaseReport(TypedDict):
|
||||
status: "CaseStatus"
|
||||
|
||||
|
||||
class CaseStatusSuccess(TypedDict):
|
||||
status: Literal["Succeeded"]
|
||||
class CaseStatusSuccess(typing.TypedDict):
|
||||
status: typing.Literal["Succeeded"]
|
||||
steps_executed: int
|
||||
|
||||
|
||||
class CaseStatusFailure(TypedDict):
|
||||
status: Literal["Failed"]
|
||||
class CaseStatusFailure(typing.TypedDict):
|
||||
status: typing.Literal["Failed"]
|
||||
reason: str
|
||||
|
||||
|
||||
class CaseStatusIgnored(TypedDict):
|
||||
status: Literal["Ignored"]
|
||||
class CaseStatusIgnored(typing.TypedDict):
|
||||
status: typing.Literal["Ignored"]
|
||||
reason: str
|
||||
|
||||
|
||||
CaseStatus = Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
|
||||
TestCaseStatus = typing.Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
|
||||
"""A union type of all of the possible statuses that could be reported for a case."""
|
||||
|
||||
TestSpecifier = str
|
||||
@@ -64,6 +67,12 @@ MetadataFilePathString = str
|
||||
CaseIdxString = str
|
||||
"""The index of a case as a string. For example '0'"""
|
||||
|
||||
PlatformString = typing.Union[
|
||||
typing.Literal["revive-dev-node-revm-solc"],
|
||||
typing.Literal["revive-dev-node-polkavm-resolc"],
|
||||
]
|
||||
"""A string of the platform on which the test was run"""
|
||||
|
||||
|
||||
def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
|
||||
"""
|
||||
@@ -78,12 +87,22 @@ def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
|
||||
|
||||
|
||||
def main() -> None:
|
||||
with open("report.json", "r") as file:
|
||||
with open(sys.argv[1], "r") as file:
|
||||
report: Report = json.load(file)
|
||||
|
||||
# Getting the platform string and resolving it into a simpler version of
|
||||
# itself.
|
||||
platform_identifier: PlatformString = typing.cast(PlatformString, sys.argv[2])
|
||||
if platform_identifier == "revive-dev-node-polkavm-resolc":
|
||||
platform: str = "PolkaVM"
|
||||
elif platform_identifier == "revive-dev-node-revm-solc":
|
||||
platform: str = "REVM"
|
||||
else:
|
||||
platform: str = platform_identifier
|
||||
|
||||
# Starting the markdown document and adding information to it as we go.
|
||||
markdown_document: io.TextIOWrapper = open("report.md", "w")
|
||||
print("# Differential Tests Results", file=markdown_document)
|
||||
print(f"# Differential Tests Results ({platform})", file=markdown_document)
|
||||
|
||||
# Getting all of the test specifiers from the report and making them relative to the tests dir.
|
||||
test_specifiers: list[str] = list(
|
||||
@@ -94,7 +113,7 @@ def main() -> None:
|
||||
)
|
||||
print("## Specified Tests", file=markdown_document)
|
||||
for test_specifier in test_specifiers:
|
||||
print(f"* `{test_specifier}`", file=markdown_document)
|
||||
print(f"* ``{test_specifier}``", file=markdown_document)
|
||||
|
||||
# Counting the total number of test cases, successes, failures, and ignored tests
|
||||
total_number_of_cases: int = 0
|
||||
@@ -102,9 +121,13 @@ def main() -> None:
|
||||
total_number_of_failures: int = 0
|
||||
total_number_of_ignores: int = 0
|
||||
for _, mode_to_case_mapping in report["execution_information"].items():
|
||||
for _, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
||||
for _, case_report in case_idx_to_report_mapping.items():
|
||||
status: CaseStatus = case_report["status"]
|
||||
for _, case_idx_to_report_mapping in mode_to_case_mapping[
|
||||
"case_reports"
|
||||
].items():
|
||||
for _, execution_report in case_idx_to_report_mapping[
|
||||
"mode_execution_reports"
|
||||
].items():
|
||||
status: TestCaseStatus = execution_report["status"]
|
||||
|
||||
total_number_of_cases += 1
|
||||
if status["status"] == "Succeeded":
|
||||
@@ -144,9 +167,13 @@ def main() -> None:
|
||||
for metadata_file_path, mode_to_case_mapping in report[
|
||||
"execution_information"
|
||||
].items():
|
||||
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
||||
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
||||
status: CaseStatus = case_report["status"]
|
||||
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
|
||||
"case_reports"
|
||||
].items():
|
||||
for mode_string, execution_report in case_idx_to_report_mapping[
|
||||
"mode_execution_reports"
|
||||
].items():
|
||||
status: TestCaseStatus = execution_report["status"]
|
||||
metadata_file_path: str = (
|
||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
||||
)
|
||||
@@ -183,9 +210,13 @@ def main() -> None:
|
||||
for metadata_file_path, mode_to_case_mapping in report[
|
||||
"execution_information"
|
||||
].items():
|
||||
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
||||
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
||||
status: CaseStatus = case_report["status"]
|
||||
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
|
||||
"case_reports"
|
||||
].items():
|
||||
for mode_string, execution_report in case_idx_to_report_mapping[
|
||||
"mode_execution_reports"
|
||||
].items():
|
||||
status: TestCaseStatus = execution_report["status"]
|
||||
metadata_file_path: str = (
|
||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
||||
)
|
||||
@@ -194,7 +225,9 @@ def main() -> None:
|
||||
if status["status"] != "Failed":
|
||||
continue
|
||||
|
||||
failure_reason: str = status["reason"].replace("\n", " ")
|
||||
failure_reason: str = (
|
||||
status["reason"].replace("\n", " ").replace("|", " ")
|
||||
)
|
||||
|
||||
note: str = ""
|
||||
modes_where_this_case_succeeded: set[ModeString] = (
|
||||
@@ -212,7 +245,7 @@ def main() -> None:
|
||||
f"{metadata_file_path}::{case_idx_string}::{mode_string}"
|
||||
)
|
||||
print(
|
||||
f"| `{test_specifier}` | `{failure_reason}` | {note} |",
|
||||
f"| ``{test_specifier}`` | ``{failure_reason}`` | {note} |",
|
||||
file=markdown_document,
|
||||
)
|
||||
print("\n\n</details>", file=markdown_document)
|
||||
|
||||
Reference in New Issue
Block a user