mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 21:57:58 +00:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 93f6e5efdf | |||
| ecd6252666 | |||
| 1d1642887b | |||
| db0522aa71 | |||
| ca5cad8e43 | |||
| 33911b5ce3 | |||
| 5b730d914e | |||
| 9f7a314b20 | |||
| 5cd3dd8c83 | |||
| 8d15f87ff0 | |||
| 566dd06d9a | |||
| 5c30e8a5bf |
@@ -1,141 +0,0 @@
|
|||||||
name: "Run Revive Differential Tests"
|
|
||||||
description: "Builds and runs revive-differential-tests (retester) from this repo against the caller's Polkadot SDK."
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
# Setup arguments & environment
|
|
||||||
polkadot-sdk-path:
|
|
||||||
description: "The path of the polkadot-sdk that should be compiled for the tests to run against."
|
|
||||||
required: false
|
|
||||||
default: "."
|
|
||||||
type: string
|
|
||||||
cargo-command:
|
|
||||||
description: "The cargo command to use in compilations and running of tests (e.g., forklift cargo)."
|
|
||||||
required: false
|
|
||||||
default: "cargo"
|
|
||||||
type: string
|
|
||||||
revive-differential-tests-ref:
|
|
||||||
description: "The branch, tag or SHA to checkout for the revive-differential-tests."
|
|
||||||
required: false
|
|
||||||
default: "main"
|
|
||||||
type: string
|
|
||||||
resolc-version:
|
|
||||||
description: "The version of resolc to install and use in tests."
|
|
||||||
required: false
|
|
||||||
default: "0.5.0"
|
|
||||||
type: string
|
|
||||||
use-compilation-caches:
|
|
||||||
description: "Controls if the compilation caches will be used for the test run or not."
|
|
||||||
required: false
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
# Test Execution Arguments
|
|
||||||
platform:
|
|
||||||
description: "The identifier of the platform to run the tests on (e.g., geth-evm-solc, revive-dev-node-revm-solc)"
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
polkadot-omnichain-node-chain-spec-path:
|
|
||||||
description: "The path of the chain-spec of the chain we're spawning'. This is only required if the polkadot-omni-node is one of the selected platforms."
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
polkadot-omnichain-node-parachain-id:
|
|
||||||
description: "The id of the parachain to spawn with the polkadot-omni-node. This is only required if the polkadot-omni-node is one of the selected platforms."
|
|
||||||
type: number
|
|
||||||
required: false
|
|
||||||
expectations-file-path:
|
|
||||||
description: "Path to the expectations file to use to compare against."
|
|
||||||
type: string
|
|
||||||
required: false
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: "composite"
|
|
||||||
steps:
|
|
||||||
- name: Checkout the Differential Tests Repository
|
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
with:
|
|
||||||
repository: paritytech/revive-differential-tests
|
|
||||||
ref: ${{ inputs['revive-differential-tests-ref'] }}
|
|
||||||
path: revive-differential-tests
|
|
||||||
submodules: recursive
|
|
||||||
- name: Installing the Latest Resolc
|
|
||||||
shell: bash
|
|
||||||
if: ${{ runner.os == 'Linux' && runner.arch == 'X64' }}
|
|
||||||
run: |
|
|
||||||
VERSION="${{ inputs['resolc-version'] }}"
|
|
||||||
ASSET_URL="https://github.com/paritytech/revive/releases/download/v$VERSION/resolc-x86_64-unknown-linux-musl"
|
|
||||||
echo "Downloading resolc v$VERSION from $ASSET_URL"
|
|
||||||
curl -Lsf --show-error -o resolc "$ASSET_URL"
|
|
||||||
chmod +x resolc
|
|
||||||
./resolc --version
|
|
||||||
- name: Installing Retester
|
|
||||||
shell: bash
|
|
||||||
run: ${{ inputs['cargo-command'] }} install --locked --path revive-differential-tests/crates/core
|
|
||||||
- name: Creating a workdir for retester
|
|
||||||
shell: bash
|
|
||||||
run: mkdir workdir
|
|
||||||
- name: Downloading & Initializing the compilation caches
|
|
||||||
shell: bash
|
|
||||||
if: ${{ inputs['use-compilation-caches'] == true }}
|
|
||||||
run: |
|
|
||||||
curl -fL --retry 3 --retry-all-errors --connect-timeout 10 -o cache.tar.gz "https://github.com/paritytech/revive-differential-tests/releases/download/compilation-caches-v1.1/cache.tar.gz"
|
|
||||||
tar -zxf cache.tar.gz -C ./workdir > /dev/null 2>&1
|
|
||||||
- name: Building the dependencies from the Polkadot SDK
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
${{ inputs['cargo-command'] }} build --locked --profile release -p pallet-revive-eth-rpc -p revive-dev-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
|
|
||||||
${{ inputs['cargo-command'] }} build --locked --profile release --bin polkadot-omni-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
|
|
||||||
- name: Installing retester
|
|
||||||
shell: bash
|
|
||||||
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/core
|
|
||||||
- name: Installing report-processor
|
|
||||||
shell: bash
|
|
||||||
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/report-processor
|
|
||||||
- name: Running the Differential Tests
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
OMNI_ARGS=()
|
|
||||||
if [[ -n "${{ inputs['polkadot-omnichain-node-parachain-id'] }}" ]]; then
|
|
||||||
OMNI_ARGS+=(
|
|
||||||
--polkadot-omni-node.parachain-id
|
|
||||||
"${{ inputs['polkadot-omnichain-node-parachain-id'] }}"
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
if [[ -n "${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}" ]]; then
|
|
||||||
OMNI_ARGS+=(
|
|
||||||
--polkadot-omni-node.chain-spec-path
|
|
||||||
"${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}"
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
retester test \
|
|
||||||
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/simple \
|
|
||||||
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/complex \
|
|
||||||
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/translated_semantic_tests \
|
|
||||||
--platform ${{ inputs['platform'] }} \
|
|
||||||
--report.file-name report.json \
|
|
||||||
--concurrency.number-of-nodes 10 \
|
|
||||||
--concurrency.number-of-threads 10 \
|
|
||||||
--concurrency.number-of-concurrent-tasks 100 \
|
|
||||||
--working-directory ./workdir \
|
|
||||||
--revive-dev-node.consensus manual-seal-200 \
|
|
||||||
--revive-dev-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/revive-dev-node \
|
|
||||||
--eth-rpc.path ${{ inputs['polkadot-sdk-path'] }}/target/release/eth-rpc \
|
|
||||||
--polkadot-omni-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/polkadot-omni-node \
|
|
||||||
--resolc.path ./resolc \
|
|
||||||
"${OMNI_ARGS[@]}" || true
|
|
||||||
- name: Generate the expectation file
|
|
||||||
shell: bash
|
|
||||||
run: report-processor generate-expectations-file --report-path ./workdir/report.json --output-path ./workdir/expectations.json --remove-prefix ./revive-differential-tests/resolc-compiler-tests
|
|
||||||
- name: Upload the Report to the CI
|
|
||||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
|
||||||
with:
|
|
||||||
name: ${{ inputs['platform'] }}-report.json
|
|
||||||
path: ./workdir/report.json
|
|
||||||
- name: Upload the Report to the CI
|
|
||||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
|
||||||
with:
|
|
||||||
name: ${{ inputs['platform'] }}.json
|
|
||||||
path: ./workdir/expectations.json
|
|
||||||
- name: Check Expectations
|
|
||||||
shell: bash
|
|
||||||
if: ${{ inputs['expectations-file-path'] != '' }}
|
|
||||||
run: report-processor compare-expectation-files --base-expectation-path ${{ inputs['expectations-file-path'] }} --other-expectation-path ./workdir/expectations.json
|
|
||||||
+107
-144
@@ -15,98 +15,87 @@ concurrency:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
POLKADOT_VERSION: polkadot-stable2506-2
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
machete:
|
cache-polkadot:
|
||||||
name: Check for Unneeded Dependencies
|
name: Build and cache Polkadot binaries on ${{ matrix.os }}
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
steps:
|
|
||||||
- name: Checkout This Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Install the Cargo Make Binary
|
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Run Cargo Machete
|
|
||||||
run: cargo make machete
|
|
||||||
check-fmt:
|
|
||||||
name: Check Formatting
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
steps:
|
|
||||||
- name: Checkout This Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Install the Cargo Make Binary
|
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Run Cargo Formatter
|
|
||||||
run: cargo make fmt-check
|
|
||||||
check-clippy:
|
|
||||||
name: Check Clippy Lints
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
steps:
|
|
||||||
- name: Checkout This Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Install the Cargo Make Binary
|
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Run Cargo Clippy
|
|
||||||
run: cargo make clippy
|
|
||||||
test:
|
|
||||||
name: Unit Tests
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
needs: cache-polkadot
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-24.04, macos-14]
|
os: [ubuntu-24.04, macos-14]
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout This Repository
|
- name: Checkout repo and submodules
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
- name: Install dependencies (Linux)
|
||||||
- name: Install the Rust Toolchain
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
run: |
|
||||||
with:
|
sudo apt-get update
|
||||||
target: "wasm32-unknown-unknown"
|
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
||||||
components: "rust-src,rust-std"
|
rustup target add wasm32-unknown-unknown
|
||||||
- name: Install the Cargo Make Binary
|
rustup component add rust-src
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Caching Step
|
- name: Install dependencies (macOS)
|
||||||
uses: actions/cache@v4
|
if: matrix.os == 'macos-14'
|
||||||
|
run: |
|
||||||
|
brew install protobuf
|
||||||
|
rustup target add wasm32-unknown-unknown
|
||||||
|
rustup component add rust-src
|
||||||
|
|
||||||
|
- name: Cache binaries
|
||||||
|
id: cache
|
||||||
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
|
~/.cargo/bin/substrate-node
|
||||||
~/.cargo/bin/eth-rpc
|
~/.cargo/bin/eth-rpc
|
||||||
~/.cargo/bin/revive-dev-node
|
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
||||||
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
|
||||||
|
- name: Build substrate-node
|
||||||
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
cd polkadot-sdk
|
||||||
|
cargo install --locked --force --profile=production --path substrate/bin/node/cli --bin substrate-node --features cli
|
||||||
|
|
||||||
|
- name: Build eth-rpc
|
||||||
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
cd polkadot-sdk
|
||||||
|
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
|
||||||
|
|
||||||
|
ci:
|
||||||
|
name: CI on ${{ matrix.os }}
|
||||||
|
needs: cache-polkadot
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-24.04, macos-14]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Restore binaries from cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/bin/substrate-node
|
||||||
|
~/.cargo/bin/eth-rpc
|
||||||
|
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
||||||
|
|
||||||
|
- name: Setup Rust toolchain
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
rustflags: ""
|
||||||
|
|
||||||
|
- name: Add wasm32 target and formatting
|
||||||
|
run: |
|
||||||
|
rustup target add wasm32-unknown-unknown
|
||||||
|
rustup component add rust-src rustfmt clippy
|
||||||
|
|
||||||
- name: Install Geth on Ubuntu
|
- name: Install Geth on Ubuntu
|
||||||
if: matrix.os == 'ubuntu-24.04'
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
run: |
|
run: |
|
||||||
@@ -139,6 +128,7 @@ jobs:
|
|||||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-x86_64-unknown-linux-musl -o resolc
|
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-x86_64-unknown-linux-musl -o resolc
|
||||||
chmod +x resolc
|
chmod +x resolc
|
||||||
sudo mv resolc /usr/local/bin
|
sudo mv resolc /usr/local/bin
|
||||||
|
|
||||||
- name: Install Geth on macOS
|
- name: Install Geth on macOS
|
||||||
if: matrix.os == 'macos-14'
|
if: matrix.os == 'macos-14'
|
||||||
run: |
|
run: |
|
||||||
@@ -150,79 +140,52 @@ jobs:
|
|||||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-universal-apple-darwin -o resolc
|
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-universal-apple-darwin -o resolc
|
||||||
chmod +x resolc
|
chmod +x resolc
|
||||||
sudo mv resolc /usr/local/bin
|
sudo mv resolc /usr/local/bin
|
||||||
|
|
||||||
- name: Install Kurtosis on macOS
|
- name: Install Kurtosis on macOS
|
||||||
if: matrix.os == 'macos-14'
|
if: matrix.os == 'macos-14'
|
||||||
run: brew install kurtosis-tech/tap/kurtosis-cli
|
run: brew install kurtosis-tech/tap/kurtosis-cli
|
||||||
|
|
||||||
- name: Install Kurtosis on Ubuntu
|
- name: Install Kurtosis on Ubuntu
|
||||||
if: matrix.os == 'ubuntu-24.04'
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
run: |
|
run: |
|
||||||
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install kurtosis-cli
|
sudo apt install kurtosis-cli
|
||||||
- name: Run Tests
|
|
||||||
run: cargo make test
|
|
||||||
cache-polkadot:
|
|
||||||
name: Build and Cache Polkadot Binaries on ${{ matrix.os }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-24.04, macos-14]
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
RUSTFLAGS: "-Awarnings"
|
|
||||||
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
|
||||||
steps:
|
|
||||||
- name: Caching Step
|
|
||||||
id: cache-step
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/eth-rpc
|
|
||||||
~/.cargo/bin/revive-dev-node
|
|
||||||
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
|
||||||
- name: Checkout the Polkadot SDK Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
with:
|
|
||||||
repository: paritytech/polkadot-sdk
|
|
||||||
ref: ${{ env.POLKADOT_SDK_COMMIT_HASH }}
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
with:
|
|
||||||
target: "wasm32-unknown-unknown"
|
|
||||||
components: "rust-src"
|
|
||||||
toolchain: "1.90.0"
|
|
||||||
|
|
||||||
- name: Install dependencies (Linux)
|
- name: Machete
|
||||||
if: matrix.os == 'ubuntu-24.04' && steps.cache-step.outputs.cache-hit != 'true'
|
uses: bnjbvr/cargo-machete@v0.7.1
|
||||||
|
|
||||||
|
- name: Format
|
||||||
|
run: make format
|
||||||
|
|
||||||
|
- name: Clippy
|
||||||
|
run: make clippy
|
||||||
|
|
||||||
|
- name: Check substrate-node version
|
||||||
|
run: substrate-node --version
|
||||||
|
|
||||||
|
- name: Check eth-rpc version
|
||||||
|
run: eth-rpc --version
|
||||||
|
|
||||||
|
- name: Check resolc version
|
||||||
|
run: resolc --version
|
||||||
|
|
||||||
|
- name: Test Formatting
|
||||||
|
run: make format
|
||||||
|
|
||||||
|
- name: Test Clippy
|
||||||
|
run: make clippy
|
||||||
|
|
||||||
|
- name: Test Machete
|
||||||
|
run: make machete
|
||||||
|
|
||||||
|
- name: Unit Tests
|
||||||
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
|
run: cargo test --workspace -- --nocapture
|
||||||
|
|
||||||
|
# We can't install docker in the MacOS image used in CI and therefore we need to skip the
|
||||||
|
# Kurtosis and lighthouse related tests when running the CI on MacOS.
|
||||||
|
- name: Unit Tests
|
||||||
|
if: matrix.os == 'macos-14'
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
cargo test --workspace -- --nocapture --skip lighthouse_geth::tests::
|
||||||
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
|
||||||
- name: Install dependencies (macOS)
|
|
||||||
if: matrix.os == 'macos-14' && steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
brew install protobuf llvm
|
|
||||||
LLVM_PREFIX="$(brew --prefix llvm)"
|
|
||||||
echo "LDFLAGS=-L${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
|
||||||
echo "CPPFLAGS=-I${LLVM_PREFIX}/include" >> "$GITHUB_ENV"
|
|
||||||
echo "CMAKE_PREFIX_PATH=${LLVM_PREFIX}" >> "$GITHUB_ENV"
|
|
||||||
echo "LIBCLANG_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
|
||||||
echo "DYLD_FALLBACK_LIBRARY_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
|
||||||
echo "${LLVM_PREFIX}/bin" >> "$GITHUB_PATH"
|
|
||||||
- name: Build Polkadot Dependencies
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
cargo build \
|
|
||||||
--locked \
|
|
||||||
--profile production \
|
|
||||||
--package revive-dev-node \
|
|
||||||
--package pallet-revive-eth-rpc;
|
|
||||||
mv ./target/production/revive-dev-node ~/.cargo/bin
|
|
||||||
mv ./target/production/eth-rpc ~/.cargo/bin
|
|
||||||
chmod +x ~/.cargo/bin/*
|
|
||||||
|
|||||||
+2
-3
@@ -3,15 +3,14 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
node_modules
|
node_modules
|
||||||
/*.json
|
/*.json
|
||||||
*.sh
|
|
||||||
|
|
||||||
# We do not want to commit any log files that we produce from running the code locally so this is
|
# We do not want to commit any log files that we produce from running the code locally so this is
|
||||||
# added to the .gitignore file.
|
# added to the .gitignore file.
|
||||||
*.log
|
*.log
|
||||||
|
|
||||||
profile.json.gz
|
profile.json.gz
|
||||||
workdir*
|
resolc-compiler-tests
|
||||||
|
workdir
|
||||||
|
|
||||||
!/schema.json
|
!/schema.json
|
||||||
!/dev-genesis.json
|
!/dev-genesis.json
|
||||||
!/scripts/*
|
|
||||||
|
|||||||
+3
-3
@@ -1,3 +1,3 @@
|
|||||||
[submodule "resolc-compiler-tests"]
|
[submodule "polkadot-sdk"]
|
||||||
path = resolc-compiler-tests
|
path = polkadot-sdk
|
||||||
url = https://github.com/paritytech/resolc-compiler-tests
|
url = https://github.com/paritytech/polkadot-sdk.git
|
||||||
|
|||||||
Generated
+945
-3451
File diff suppressed because it is too large
Load Diff
+24
-16
@@ -21,14 +21,13 @@ revive-dt-node-interaction = { version = "0.1.0", path = "crates/node-interactio
|
|||||||
revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
|
revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
|
||||||
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
||||||
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
||||||
revive-dt-report-processor = { version = "0.1.0", path = "crates/report-processor" }
|
|
||||||
|
|
||||||
alloy = { version = "1.4.1", features = ["full", "genesis", "json-rpc"] }
|
alloy-primitives = "1.2.1"
|
||||||
ansi_term = "0.12.1"
|
alloy-sol-types = "1.2.1"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bson = { version = "2.15.0" }
|
bson = { version = "2.15.0" }
|
||||||
cacache = { version = "13.1.0" }
|
cacache = { version = "13.1.0" }
|
||||||
clap = { version = "4", features = ["derive", "wrap_help"] }
|
clap = { version = "4", features = ["derive"] }
|
||||||
dashmap = { version = "6.1.0" }
|
dashmap = { version = "6.1.0" }
|
||||||
foundry-compilers-artifacts = { version = "0.18.0" }
|
foundry-compilers-artifacts = { version = "0.18.0" }
|
||||||
futures = { version = "0.3.31" }
|
futures = { version = "0.3.31" }
|
||||||
@@ -52,7 +51,6 @@ sha2 = { version = "0.10.9" }
|
|||||||
sp-core = "36.1.0"
|
sp-core = "36.1.0"
|
||||||
sp-runtime = "41.1.0"
|
sp-runtime = "41.1.0"
|
||||||
strum = { version = "0.27.2", features = ["derive"] }
|
strum = { version = "0.27.2", features = ["derive"] }
|
||||||
subxt = { version = "0.44.0" }
|
|
||||||
temp-dir = { version = "0.1.16" }
|
temp-dir = { version = "0.1.16" }
|
||||||
tempfile = "3.3"
|
tempfile = "3.3"
|
||||||
thiserror = "2"
|
thiserror = "2"
|
||||||
@@ -61,7 +59,6 @@ tokio = { version = "1.47.0", default-features = false, features = [
|
|||||||
"process",
|
"process",
|
||||||
"rt",
|
"rt",
|
||||||
] }
|
] }
|
||||||
tower = { version = "0.5.2", features = ["limit"] }
|
|
||||||
uuid = { version = "1.8", features = ["v4"] }
|
uuid = { version = "1.8", features = ["v4"] }
|
||||||
tracing = { version = "0.1.41" }
|
tracing = { version = "0.1.41" }
|
||||||
tracing-appender = { version = "0.2.3" }
|
tracing-appender = { version = "0.2.3" }
|
||||||
@@ -71,23 +68,34 @@ tracing-subscriber = { version = "0.3.19", default-features = false, features =
|
|||||||
"env-filter",
|
"env-filter",
|
||||||
] }
|
] }
|
||||||
indexmap = { version = "2.10.0", default-features = false }
|
indexmap = { version = "2.10.0", default-features = false }
|
||||||
itertools = { version = "0.14.0" }
|
|
||||||
|
|
||||||
# revive compiler
|
# revive compiler
|
||||||
revive-solc-json-interface = { version = "0.5.0" }
|
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||||
revive-common = { version = "0.3.0" }
|
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||||
revive-differential = { version = "0.3.0" }
|
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||||
|
|
||||||
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
|
[workspace.dependencies.alloy]
|
||||||
|
version = "1.0.22"
|
||||||
|
default-features = false
|
||||||
|
features = [
|
||||||
|
"json-abi",
|
||||||
|
"providers",
|
||||||
|
"provider-ipc",
|
||||||
|
"provider-ws",
|
||||||
|
"provider-debug-api",
|
||||||
|
"reqwest",
|
||||||
|
"rpc-types",
|
||||||
|
"signer-local",
|
||||||
|
"std",
|
||||||
|
"network",
|
||||||
|
"serde",
|
||||||
|
"rpc-types-eth",
|
||||||
|
"genesis",
|
||||||
|
]
|
||||||
|
|
||||||
[profile.bench]
|
[profile.bench]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
codegen-units = 1
|
|
||||||
lto = true
|
lto = true
|
||||||
|
|
||||||
[profile.production]
|
|
||||||
inherits = "release"
|
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
lto = true
|
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
|
|||||||
@@ -0,0 +1,15 @@
|
|||||||
|
.PHONY: format clippy test machete
|
||||||
|
|
||||||
|
format:
|
||||||
|
cargo fmt --all -- --check
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
cargo clippy --all-features --workspace -- --deny warnings
|
||||||
|
|
||||||
|
machete:
|
||||||
|
cargo install cargo-machete
|
||||||
|
cargo machete crates
|
||||||
|
|
||||||
|
test: format clippy machete
|
||||||
|
cargo test --workspace -- --nocapture
|
||||||
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
[config]
|
|
||||||
default_to_workspace = false
|
|
||||||
|
|
||||||
[tasks.machete]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["machete", "crates"]
|
|
||||||
install_crate = "cargo-machete"
|
|
||||||
|
|
||||||
[tasks.fmt-check]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["fmt", "--all", "--", "--check"]
|
|
||||||
install_crate = "rustfmt"
|
|
||||||
|
|
||||||
[tasks.clippy]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["clippy", "--all-features", "--workspace", "--", "--deny", "warnings"]
|
|
||||||
install_crate = "clippy"
|
|
||||||
|
|
||||||
[tasks.test]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["test", "--workspace", "--", "--nocapture"]
|
|
||||||
@@ -9,7 +9,7 @@
|
|||||||
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
||||||
|
|
||||||
- Geth (EVM reference implementation)
|
- Geth (EVM reference implementation)
|
||||||
- Revive Dev Node (Substrate-based PolkaVM + `eth-rpc` proxy)
|
- Revive Kitchensink (Substrate-based PolkaVM + `eth-rpc` proxy)
|
||||||
|
|
||||||
Use it to:
|
Use it to:
|
||||||
|
|
||||||
@@ -39,9 +39,9 @@ This repository contains none of the tests and only contains the testing framewo
|
|||||||
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
||||||
|
|
||||||
- Stable Rust
|
- Stable Rust
|
||||||
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||||
- Revive Dev Node - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
- Kitchensink - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||||
- ETH-RPC - All communication with Revive Dev Node is done through the ETH RPC.
|
- ETH-RPC - All communication with Kitchensink is done through the ETH RPC.
|
||||||
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
||||||
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
||||||
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
||||||
@@ -52,69 +52,192 @@ All of the above need to be installed and available in the path in order for the
|
|||||||
|
|
||||||
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cargo run --release -- execute-tests --help
|
||||||
|
Error: Executes tests in the MatterLabs format differentially on multiple targets concurrently
|
||||||
|
|
||||||
|
Usage: retester execute-tests [OPTIONS]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
-w, --working-directory <WORKING_DIRECTORY>
|
||||||
|
The working directory that the program will use for all of the temporary artifacts needed at runtime.
|
||||||
|
|
||||||
|
If not specified, then a temporary directory will be created and used by the program for all temporary artifacts.
|
||||||
|
|
||||||
|
[default: ]
|
||||||
|
|
||||||
|
-p, --platform <PLATFORMS>
|
||||||
|
The set of platforms that the differential tests should run on
|
||||||
|
|
||||||
|
[default: geth-evm-solc,revive-dev-node-polkavm-resolc]
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- geth-evm-solc: The Go-ethereum reference full node EVM implementation with the solc compiler
|
||||||
|
- kitchensink-polkavm-resolc: The kitchensink node with the PolkaVM backend with the resolc compiler
|
||||||
|
- kitchensink-revm-solc: The kitchensink node with the REVM backend with the solc compiler
|
||||||
|
- revive-dev-node-polkavm-resolc: The revive dev node with the PolkaVM backend with the resolc compiler
|
||||||
|
- revive-dev-node-revm-solc: The revive dev node with the REVM backend with the solc compiler
|
||||||
|
|
||||||
|
-c, --corpus <CORPUS>
|
||||||
|
A list of test corpus JSON files to be tested
|
||||||
|
|
||||||
|
-h, --help
|
||||||
|
Print help (see a summary with '-h')
|
||||||
|
|
||||||
|
Solc Configuration:
|
||||||
|
--solc.version <VERSION>
|
||||||
|
Specifies the default version of the Solc compiler that should be used if there is no override specified by one of the test cases
|
||||||
|
|
||||||
|
[default: 0.8.29]
|
||||||
|
|
||||||
|
Resolc Configuration:
|
||||||
|
--resolc.path <resolc.path>
|
||||||
|
Specifies the path of the resolc compiler to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the resolc binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: resolc]
|
||||||
|
|
||||||
|
Geth Configuration:
|
||||||
|
--geth.path <geth.path>
|
||||||
|
Specifies the path of the geth node to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the geth binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: geth]
|
||||||
|
|
||||||
|
--geth.start-timeout-ms <geth.start-timeout-ms>
|
||||||
|
The amount of time to wait upon startup before considering that the node timed out
|
||||||
|
|
||||||
|
[default: 5000]
|
||||||
|
|
||||||
|
Kitchensink Configuration:
|
||||||
|
--kitchensink.path <kitchensink.path>
|
||||||
|
Specifies the path of the kitchensink node to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the kitchensink binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: substrate-node]
|
||||||
|
|
||||||
|
--kitchensink.start-timeout-ms <kitchensink.start-timeout-ms>
|
||||||
|
The amount of time to wait upon startup before considering that the node timed out
|
||||||
|
|
||||||
|
[default: 5000]
|
||||||
|
|
||||||
|
--kitchensink.dont-use-dev-node
|
||||||
|
This configures the tool to use Kitchensink instead of using the revive-dev-node
|
||||||
|
|
||||||
|
Revive Dev Node Configuration:
|
||||||
|
--revive-dev-node.path <revive-dev-node.path>
|
||||||
|
Specifies the path of the revive dev node to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the revive dev node binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: revive-dev-node]
|
||||||
|
|
||||||
|
--revive-dev-node.start-timeout-ms <revive-dev-node.start-timeout-ms>
|
||||||
|
The amount of time to wait upon startup before considering that the node timed out
|
||||||
|
|
||||||
|
[default: 5000]
|
||||||
|
|
||||||
|
Eth RPC Configuration:
|
||||||
|
--eth-rpc.path <eth-rpc.path>
|
||||||
|
Specifies the path of the ETH RPC to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the ETH RPC binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: eth-rpc]
|
||||||
|
|
||||||
|
--eth-rpc.start-timeout-ms <eth-rpc.start-timeout-ms>
|
||||||
|
The amount of time to wait upon startup before considering that the node timed out
|
||||||
|
|
||||||
|
[default: 5000]
|
||||||
|
|
||||||
|
Genesis Configuration:
|
||||||
|
--genesis.path <genesis.path>
|
||||||
|
Specifies the path of the genesis file to use for the nodes that are started.
|
||||||
|
|
||||||
|
This is expected to be the path of a JSON geth genesis file.
|
||||||
|
|
||||||
|
Wallet Configuration:
|
||||||
|
--wallet.default-private-key <DEFAULT_KEY>
|
||||||
|
The private key of the default signer
|
||||||
|
|
||||||
|
[default: 0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d]
|
||||||
|
|
||||||
|
--wallet.additional-keys <ADDITIONAL_KEYS>
|
||||||
|
This argument controls which private keys the nodes should have access to and be added to its wallet signers. With a value of N, private keys (0, N] will be added to the signer set of the node
|
||||||
|
|
||||||
|
[default: 100000]
|
||||||
|
|
||||||
|
Concurrency Configuration:
|
||||||
|
--concurrency.number-of-nodes <NUMBER_OF_NODES>
|
||||||
|
Determines the amount of nodes that will be spawned for each chain
|
||||||
|
|
||||||
|
[default: 5]
|
||||||
|
|
||||||
|
--concurrency.number-of-threads <NUMBER_OF_THREADS>
|
||||||
|
Determines the amount of tokio worker threads that will will be used
|
||||||
|
|
||||||
|
[default: 16]
|
||||||
|
|
||||||
|
--concurrency.number-of-concurrent-tasks <NUMBER_CONCURRENT_TASKS>
|
||||||
|
Determines the amount of concurrent tasks that will be spawned to run tests.
|
||||||
|
|
||||||
|
Defaults to 10 x the number of nodes.
|
||||||
|
|
||||||
|
--concurrency.ignore-concurrency-limit
|
||||||
|
Determines if the concurrency limit should be ignored or not
|
||||||
|
|
||||||
|
Compilation Configuration:
|
||||||
|
--compilation.invalidate-cache
|
||||||
|
Controls if the compilation cache should be invalidated or not
|
||||||
|
|
||||||
|
Report Configuration:
|
||||||
|
--report.include-compiler-input
|
||||||
|
Controls if the compiler input is included in the final report
|
||||||
|
|
||||||
|
--report.include-compiler-output
|
||||||
|
Controls if the compiler output is included in the final report
|
||||||
|
```
|
||||||
|
|
||||||
|
To run tests with this tool you need a corpus JSON file that defines the tests included in the corpus. The simplest corpus file looks like the following:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||||
|
"path": "resolc-compiler-tests/fixtures/solidity"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
|
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
|
||||||
|
|
||||||
|
The above corpus file instructs the tool to look for all of the test cases contained within all of the metadata files of the specified directory.
|
||||||
|
|
||||||
The simplest command to run this tool is the following:
|
The simplest command to run this tool is the following:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
RUST_LOG="info" cargo run --release -- test \
|
RUST_LOG="info" cargo run --release -- execute-tests \
|
||||||
--test ./resolc-compiler-tests/fixtures/solidity \
|
|
||||||
--platform geth-evm-solc \
|
--platform geth-evm-solc \
|
||||||
|
--corpus corp.json \
|
||||||
--working-directory workdir \
|
--working-directory workdir \
|
||||||
|
--concurrency.number-of-nodes 5 \
|
||||||
|
--concurrency.ignore-concurrency-limit \
|
||||||
> logs.log \
|
> logs.log \
|
||||||
2> output.log
|
2> output.log
|
||||||
```
|
```
|
||||||
|
|
||||||
The above command will run the tool executing every one of the tests discovered in the path provided to the tool. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
|
The above command will run the tool executing every one of the tests discovered in the path specified in the corpus file. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
|
||||||
|
|
||||||
<details>
|
If you only want to run a subset of tests, then you can specify that in your corpus file. The following is an example:
|
||||||
<summary>User Managed Nodes</summary>
|
|
||||||
|
|
||||||
This section describes how the user can make use of nodes that they manage rather than allowing the tool to spawn and manage the nodes on the user's behalf.
|
```json
|
||||||
|
{
|
||||||
> ⚠️ This is an advanced feature of the tool and could lead test successes or failures to not be reproducible. Please use this feature with caution and only if you understand the implications of running your own node instead of having the framework manage your nodes. ⚠️
|
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||||
|
"paths": [
|
||||||
If you're an advanced user and you'd like to manage your own nodes instead of having the tool initialize, spawn, and manage them, then you can choose to run your own nodes and then provide them to the tool to make use of just like the following:
|
"path/to/a/single/metadata/file/I/want/to/run.json",
|
||||||
|
"path/to/a/directory/to/find/all/metadata/files/within"
|
||||||
```bash
|
]
|
||||||
#!/usr/bin/env bash
|
}
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
PLATFORM="revive-dev-node-revm-solc"
|
|
||||||
|
|
||||||
retester export-genesis "$PLATFORM" > chainspec.json
|
|
||||||
|
|
||||||
# Start revive-dev-node in a detached tmux session
|
|
||||||
tmux new-session -d -s revive-dev-node \
|
|
||||||
'RUST_LOG="error,evm=debug,sc_rpc_server=info,runtime::revive=debug" revive-dev-node \
|
|
||||||
--dev \
|
|
||||||
--chain chainspec.json \
|
|
||||||
--force-authoring \
|
|
||||||
--rpc-methods Unsafe \
|
|
||||||
--rpc-cors all \
|
|
||||||
--rpc-max-connections 4294967295 \
|
|
||||||
--pool-limit 4294967295 \
|
|
||||||
--pool-kbytes 4294967295'
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# Start eth-rpc in a detached tmux session
|
|
||||||
tmux new-session -d -s eth-rpc \
|
|
||||||
'RUST_LOG="info,eth-rpc=debug" eth-rpc \
|
|
||||||
--dev \
|
|
||||||
--node-rpc-url ws://127.0.0.1:9944 \
|
|
||||||
--rpc-max-connections 4294967295'
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# Run the tests (logs to files as before)
|
|
||||||
RUST_LOG="info" retester test \
|
|
||||||
--platform "$PLATFORM" \
|
|
||||||
--corpus ./revive-differential-tests/fixtures/solidity \
|
|
||||||
--working-directory ./workdir \
|
|
||||||
--concurrency.number-of-nodes 1 \
|
|
||||||
--concurrency.number-of-concurrent-tasks 5 \
|
|
||||||
--revive-dev-node.existing-rpc-url "http://localhost:8545" \
|
|
||||||
> logs.log
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|||||||
Binary file not shown.
@@ -10,15 +10,16 @@ rust-version.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
|
alloy-primitives = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
moka = { workspace = true, features = ["sync"] }
|
moka = { workspace = true, features = ["sync"] }
|
||||||
once_cell = { workspace = true }
|
once_cell = { workspace = true }
|
||||||
regex = { workspace = true }
|
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
schemars = { workspace = true }
|
schemars = { workspace = true }
|
||||||
strum = { workspace = true }
|
strum = { workspace = true }
|
||||||
|
tokio = { workspace = true, default-features = false, features = ["time"] }
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|||||||
@@ -0,0 +1,3 @@
|
|||||||
|
mod poll;
|
||||||
|
|
||||||
|
pub use poll::*;
|
||||||
@@ -0,0 +1,72 @@
|
|||||||
|
use std::ops::ControlFlow;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::{Context as _, Result, anyhow};
|
||||||
|
|
||||||
|
const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
|
/// A function that polls for a fallible future for some period of time and errors if it fails to
|
||||||
|
/// get a result after polling.
|
||||||
|
///
|
||||||
|
/// Given a future that returns a [`Result<ControlFlow<O, ()>>`], this function calls the future
|
||||||
|
/// repeatedly (with some wait period) until the future returns a [`ControlFlow::Break`] or until it
|
||||||
|
/// returns an [`Err`] in which case the function stops polling and returns the error.
|
||||||
|
///
|
||||||
|
/// If the future keeps returning [`ControlFlow::Continue`] and fails to return a [`Break`] within
|
||||||
|
/// the permitted polling duration then this function returns an [`Err`]
|
||||||
|
///
|
||||||
|
/// [`Break`]: ControlFlow::Break
|
||||||
|
/// [`Continue`]: ControlFlow::Continue
|
||||||
|
pub async fn poll<F, O>(
|
||||||
|
polling_duration: Duration,
|
||||||
|
polling_wait_behavior: PollingWaitBehavior,
|
||||||
|
mut future: impl FnMut() -> F,
|
||||||
|
) -> Result<O>
|
||||||
|
where
|
||||||
|
F: Future<Output = Result<ControlFlow<O, ()>>>,
|
||||||
|
{
|
||||||
|
let mut retries = 0;
|
||||||
|
let mut total_wait_duration = Duration::ZERO;
|
||||||
|
let max_allowed_wait_duration = polling_duration;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if total_wait_duration >= max_allowed_wait_duration {
|
||||||
|
break Err(anyhow!(
|
||||||
|
"Polling failed after {} retries and a total of {:?} of wait time",
|
||||||
|
retries,
|
||||||
|
total_wait_duration
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
match future()
|
||||||
|
.await
|
||||||
|
.context("Polled future returned an error during polling loop")?
|
||||||
|
{
|
||||||
|
ControlFlow::Continue(()) => {
|
||||||
|
let next_wait_duration = match polling_wait_behavior {
|
||||||
|
PollingWaitBehavior::Constant(duration) => duration,
|
||||||
|
PollingWaitBehavior::ExponentialBackoff => {
|
||||||
|
Duration::from_secs(2u64.pow(retries))
|
||||||
|
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let next_wait_duration =
|
||||||
|
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
|
||||||
|
total_wait_duration += next_wait_duration;
|
||||||
|
retries += 1;
|
||||||
|
|
||||||
|
tokio::time::sleep(next_wait_duration).await;
|
||||||
|
}
|
||||||
|
ControlFlow::Break(output) => {
|
||||||
|
break Ok(output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
|
||||||
|
pub enum PollingWaitBehavior {
|
||||||
|
Constant(Duration),
|
||||||
|
#[default]
|
||||||
|
ExponentialBackoff,
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
pub mod cached_fs;
|
pub mod cached_fs;
|
||||||
pub mod fs;
|
pub mod fs;
|
||||||
|
pub mod futures;
|
||||||
pub mod iterators;
|
pub mod iterators;
|
||||||
pub mod macros;
|
pub mod macros;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|||||||
@@ -31,20 +31,14 @@ pub enum PlatformIdentifier {
|
|||||||
GethEvmSolc,
|
GethEvmSolc,
|
||||||
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||||
LighthouseGethEvmSolc,
|
LighthouseGethEvmSolc,
|
||||||
|
/// The kitchensink node with the PolkaVM backend with the resolc compiler.
|
||||||
|
KitchensinkPolkavmResolc,
|
||||||
|
/// The kitchensink node with the REVM backend with the solc compiler.
|
||||||
|
KitchensinkRevmSolc,
|
||||||
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
||||||
ReviveDevNodePolkavmResolc,
|
ReviveDevNodePolkavmResolc,
|
||||||
/// The revive dev node with the REVM backend with the solc compiler.
|
/// The revive dev node with the REVM backend with the solc compiler.
|
||||||
ReviveDevNodeRevmSolc,
|
ReviveDevNodeRevmSolc,
|
||||||
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
|
|
||||||
ZombienetPolkavmResolc,
|
|
||||||
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
|
|
||||||
ZombienetRevmSolc,
|
|
||||||
/// A polkadot-omni-chain based node with a custom runtime with the PolkaVM backend and the
|
|
||||||
/// resolc compiler.
|
|
||||||
PolkadotOmniNodePolkavmResolc,
|
|
||||||
/// A polkadot-omni-chain based node with a custom runtime with the REVM backend and the solc
|
|
||||||
/// compiler.
|
|
||||||
PolkadotOmniNodeRevmSolc,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
||||||
@@ -97,12 +91,10 @@ pub enum NodeIdentifier {
|
|||||||
Geth,
|
Geth,
|
||||||
/// The go-ethereum node implementation.
|
/// The go-ethereum node implementation.
|
||||||
LighthouseGeth,
|
LighthouseGeth,
|
||||||
|
/// The Kitchensink node implementation.
|
||||||
|
Kitchensink,
|
||||||
/// The revive dev node implementation.
|
/// The revive dev node implementation.
|
||||||
ReviveDevNode,
|
ReviveDevNode,
|
||||||
/// A zombienet spawned nodes
|
|
||||||
Zombienet,
|
|
||||||
/// The polkadot-omni-node.
|
|
||||||
PolkadotOmniNode,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An enum representing the identifiers of the supported VMs.
|
/// An enum representing the identifiers of the supported VMs.
|
||||||
|
|||||||
@@ -1,13 +1,9 @@
|
|||||||
mod identifiers;
|
mod identifiers;
|
||||||
mod mode;
|
mod mode;
|
||||||
mod parsed_test_specifier;
|
|
||||||
mod private_key_allocator;
|
mod private_key_allocator;
|
||||||
mod round_robin_pool;
|
|
||||||
mod version_or_requirement;
|
mod version_or_requirement;
|
||||||
|
|
||||||
pub use identifiers::*;
|
pub use identifiers::*;
|
||||||
pub use mode::*;
|
pub use mode::*;
|
||||||
pub use parsed_test_specifier::*;
|
|
||||||
pub use private_key_allocator::*;
|
pub use private_key_allocator::*;
|
||||||
pub use round_robin_pool::*;
|
|
||||||
pub use version_or_requirement::*;
|
pub use version_or_requirement::*;
|
||||||
|
|||||||
@@ -1,11 +1,6 @@
|
|||||||
use crate::iterators::EitherIter;
|
|
||||||
use crate::types::VersionOrRequirement;
|
use crate::types::VersionOrRequirement;
|
||||||
use anyhow::{Context as _, bail};
|
|
||||||
use regex::Regex;
|
|
||||||
use schemars::JsonSchema;
|
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::fmt::Display;
|
use std::fmt::Display;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::LazyLock;
|
use std::sync::LazyLock;
|
||||||
@@ -23,18 +18,6 @@ pub struct Mode {
|
|||||||
pub version: Option<semver::VersionReq>,
|
pub version: Option<semver::VersionReq>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Ord for Mode {
|
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
|
||||||
self.to_string().cmp(&other.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for Mode {
|
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for Mode {
|
impl Display for Mode {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
self.pipeline.fmt(f)?;
|
self.pipeline.fmt(f)?;
|
||||||
@@ -50,19 +33,6 @@ impl Display for Mode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromStr for Mode {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
let parsed_mode = ParsedMode::from_str(s)?;
|
|
||||||
let mut iter = parsed_mode.to_modes();
|
|
||||||
let (Some(mode), None) = (iter.next(), iter.next()) else {
|
|
||||||
bail!("Failed to parse the mode")
|
|
||||||
};
|
|
||||||
Ok(mode)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Mode {
|
impl Mode {
|
||||||
/// Return all of the available mode combinations.
|
/// Return all of the available mode combinations.
|
||||||
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
||||||
@@ -201,250 +171,3 @@ impl ModeOptimizerSetting {
|
|||||||
!matches!(self, ModeOptimizerSetting::M0)
|
!matches!(self, ModeOptimizerSetting::M0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This represents a mode that has been parsed from test metadata.
|
|
||||||
///
|
|
||||||
/// Mode strings can take the following form (in pseudo-regex):
|
|
||||||
///
|
|
||||||
/// ```text
|
|
||||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
|
||||||
#[serde(try_from = "String", into = "String")]
|
|
||||||
pub struct ParsedMode {
|
|
||||||
pub pipeline: Option<ModePipeline>,
|
|
||||||
pub optimize_flag: Option<bool>,
|
|
||||||
pub optimize_setting: Option<ModeOptimizerSetting>,
|
|
||||||
pub version: Option<semver::VersionReq>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for ParsedMode {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
|
||||||
Regex::new(r"(?x)
|
|
||||||
^
|
|
||||||
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
|
||||||
\s*
|
|
||||||
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
|
||||||
\s*
|
|
||||||
(?P<version>[>=<^]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
|
||||||
$
|
|
||||||
").unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
let Some(caps) = REGEX.captures(s) else {
|
|
||||||
anyhow::bail!("Cannot parse mode '{s}' from string");
|
|
||||||
};
|
|
||||||
|
|
||||||
let pipeline = match caps.name("pipeline") {
|
|
||||||
Some(m) => Some(
|
|
||||||
ModePipeline::from_str(m.as_str())
|
|
||||||
.context("Failed to parse mode pipeline from string")?,
|
|
||||||
),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
|
||||||
|
|
||||||
let optimize_setting = match caps.name("optimize_setting") {
|
|
||||||
Some(m) => Some(
|
|
||||||
ModeOptimizerSetting::from_str(m.as_str())
|
|
||||||
.context("Failed to parse optimizer setting from string")?,
|
|
||||||
),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let version = match caps.name("version") {
|
|
||||||
Some(m) => Some(
|
|
||||||
semver::VersionReq::parse(m.as_str())
|
|
||||||
.map_err(|e| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"Cannot parse the version requirement '{}': {e}",
|
|
||||||
m.as_str()
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.context("Failed to parse semver requirement from mode string")?,
|
|
||||||
),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(ParsedMode {
|
|
||||||
pipeline,
|
|
||||||
optimize_flag,
|
|
||||||
optimize_setting,
|
|
||||||
version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for ParsedMode {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
let mut has_written = false;
|
|
||||||
|
|
||||||
if let Some(pipeline) = self.pipeline {
|
|
||||||
pipeline.fmt(f)?;
|
|
||||||
if let Some(optimize_flag) = self.optimize_flag {
|
|
||||||
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
|
||||||
}
|
|
||||||
has_written = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(optimize_setting) = self.optimize_setting {
|
|
||||||
if has_written {
|
|
||||||
f.write_str(" ")?;
|
|
||||||
}
|
|
||||||
optimize_setting.fmt(f)?;
|
|
||||||
has_written = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(version) = &self.version {
|
|
||||||
if has_written {
|
|
||||||
f.write_str(" ")?;
|
|
||||||
}
|
|
||||||
version.fmt(f)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ParsedMode> for String {
|
|
||||||
fn from(parsed_mode: ParsedMode) -> Self {
|
|
||||||
parsed_mode.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<String> for ParsedMode {
|
|
||||||
type Error = anyhow::Error;
|
|
||||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
|
||||||
ParsedMode::from_str(&value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ParsedMode {
|
|
||||||
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
|
||||||
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
|
||||||
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
|
||||||
|| EitherIter::A(ModePipeline::test_cases()),
|
|
||||||
|p| EitherIter::B(std::iter::once(*p)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
|
||||||
if flag {
|
|
||||||
ModeOptimizerSetting::M3
|
|
||||||
} else {
|
|
||||||
ModeOptimizerSetting::M0
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let optimize_flag_iter = match optimize_flag_setting {
|
|
||||||
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
|
||||||
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
|
||||||
|| EitherIter::A(optimize_flag_iter),
|
|
||||||
|s| EitherIter::B(std::iter::once(*s)),
|
|
||||||
);
|
|
||||||
|
|
||||||
pipeline_iter.flat_map(move |pipeline| {
|
|
||||||
optimize_settings_iter
|
|
||||||
.clone()
|
|
||||||
.map(move |optimize_setting| Mode {
|
|
||||||
pipeline,
|
|
||||||
optimize_setting,
|
|
||||||
version: self.version.clone(),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
|
||||||
/// This avoids any duplicate entries.
|
|
||||||
pub fn many_to_modes<'a>(
|
|
||||||
parsed: impl Iterator<Item = &'a ParsedMode>,
|
|
||||||
) -> impl Iterator<Item = Mode> {
|
|
||||||
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
|
||||||
modes.into_iter()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parsed_mode_from_str() {
|
|
||||||
let strings = vec![
|
|
||||||
("Mz", "Mz"),
|
|
||||||
("Y", "Y"),
|
|
||||||
("Y+", "Y+"),
|
|
||||||
("Y-", "Y-"),
|
|
||||||
("E", "E"),
|
|
||||||
("E+", "E+"),
|
|
||||||
("E-", "E-"),
|
|
||||||
("Y M0", "Y M0"),
|
|
||||||
("Y M1", "Y M1"),
|
|
||||||
("Y M2", "Y M2"),
|
|
||||||
("Y M3", "Y M3"),
|
|
||||||
("Y Ms", "Y Ms"),
|
|
||||||
("Y Mz", "Y Mz"),
|
|
||||||
("E M0", "E M0"),
|
|
||||||
("E M1", "E M1"),
|
|
||||||
("E M2", "E M2"),
|
|
||||||
("E M3", "E M3"),
|
|
||||||
("E Ms", "E Ms"),
|
|
||||||
("E Mz", "E Mz"),
|
|
||||||
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
|
||||||
("Y 0.8.0", "Y ^0.8.0"),
|
|
||||||
("E+ 0.8.0", "E+ ^0.8.0"),
|
|
||||||
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
|
||||||
("E Mz <0.7.0", "E Mz <0.7.0"),
|
|
||||||
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
|
||||||
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
|
||||||
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
|
||||||
// We don't see this in the wild but it is parsed.
|
|
||||||
("<=0.8", "<=0.8"),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (actual, expected) in strings {
|
|
||||||
let parsed = ParsedMode::from_str(actual)
|
|
||||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
|
||||||
assert_eq!(
|
|
||||||
expected,
|
|
||||||
parsed.to_string(),
|
|
||||||
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parsed_mode_to_test_modes() {
|
|
||||||
let strings = vec![
|
|
||||||
("Mz", vec!["Y Mz", "E Mz"]),
|
|
||||||
("Y", vec!["Y M0", "Y M3"]),
|
|
||||||
("E", vec!["E M0", "E M3"]),
|
|
||||||
("Y+", vec!["Y M3"]),
|
|
||||||
("Y-", vec!["Y M0"]),
|
|
||||||
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
|
||||||
(
|
|
||||||
"<=0.8",
|
|
||||||
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
|
||||||
),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (actual, expected) in strings {
|
|
||||||
let parsed = ParsedMode::from_str(actual)
|
|
||||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
|
||||||
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
|
||||||
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
expected_set, actual_set,
|
|
||||||
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,173 +0,0 @@
|
|||||||
use std::{
|
|
||||||
fmt::Display,
|
|
||||||
path::{Path, PathBuf},
|
|
||||||
str::FromStr,
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::{Context as _, bail};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::types::Mode;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
||||||
pub enum ParsedTestSpecifier {
|
|
||||||
/// All of the test cases in the file should be ran across all of the specified modes
|
|
||||||
FileOrDirectory {
|
|
||||||
/// The path of the metadata file containing the test cases.
|
|
||||||
metadata_or_directory_file_path: PathBuf,
|
|
||||||
},
|
|
||||||
/// Only a specific case within the metadata file should be ran across all of the modes in the
|
|
||||||
/// file.
|
|
||||||
Case {
|
|
||||||
/// The path of the metadata file containing the test cases.
|
|
||||||
metadata_file_path: PathBuf,
|
|
||||||
|
|
||||||
/// The index of the specific case to run.
|
|
||||||
case_idx: usize,
|
|
||||||
},
|
|
||||||
/// A specific case and a specific mode should be ran. This is the most specific out of all of
|
|
||||||
/// the specifier types.
|
|
||||||
CaseWithMode {
|
|
||||||
/// The path of the metadata file containing the test cases.
|
|
||||||
metadata_file_path: PathBuf,
|
|
||||||
|
|
||||||
/// The index of the specific case to run.
|
|
||||||
case_idx: usize,
|
|
||||||
|
|
||||||
/// The parsed mode that the test should be run in.
|
|
||||||
mode: Mode,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ParsedTestSpecifier {
|
|
||||||
pub fn metadata_path(&self) -> &Path {
|
|
||||||
match self {
|
|
||||||
ParsedTestSpecifier::FileOrDirectory {
|
|
||||||
metadata_or_directory_file_path: metadata_file_path,
|
|
||||||
}
|
|
||||||
| ParsedTestSpecifier::Case {
|
|
||||||
metadata_file_path, ..
|
|
||||||
}
|
|
||||||
| ParsedTestSpecifier::CaseWithMode {
|
|
||||||
metadata_file_path, ..
|
|
||||||
} => metadata_file_path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for ParsedTestSpecifier {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
ParsedTestSpecifier::FileOrDirectory {
|
|
||||||
metadata_or_directory_file_path,
|
|
||||||
} => {
|
|
||||||
write!(f, "{}", metadata_or_directory_file_path.display())
|
|
||||||
}
|
|
||||||
ParsedTestSpecifier::Case {
|
|
||||||
metadata_file_path,
|
|
||||||
case_idx,
|
|
||||||
} => {
|
|
||||||
write!(f, "{}::{}", metadata_file_path.display(), case_idx)
|
|
||||||
}
|
|
||||||
ParsedTestSpecifier::CaseWithMode {
|
|
||||||
metadata_file_path,
|
|
||||||
case_idx,
|
|
||||||
mode,
|
|
||||||
} => {
|
|
||||||
write!(
|
|
||||||
f,
|
|
||||||
"{}::{}::{}",
|
|
||||||
metadata_file_path.display(),
|
|
||||||
case_idx,
|
|
||||||
mode
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for ParsedTestSpecifier {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
let mut split_iter = s.split("::");
|
|
||||||
|
|
||||||
let Some(path_string) = split_iter.next() else {
|
|
||||||
bail!("Could not find the path in the test specifier")
|
|
||||||
};
|
|
||||||
let path = PathBuf::from(path_string)
|
|
||||||
.canonicalize()
|
|
||||||
.context("Failed to canonicalize the path of the test")?;
|
|
||||||
|
|
||||||
let Some(case_idx_string) = split_iter.next() else {
|
|
||||||
return Ok(Self::FileOrDirectory {
|
|
||||||
metadata_or_directory_file_path: path,
|
|
||||||
});
|
|
||||||
};
|
|
||||||
let case_idx = usize::from_str(case_idx_string)
|
|
||||||
.context("Failed to parse the case idx of the test specifier from string")?;
|
|
||||||
|
|
||||||
// At this point the provided path must be a file.
|
|
||||||
if !path.is_file() {
|
|
||||||
bail!(
|
|
||||||
"Test specifier with a path and case idx must point to a file and not a directory"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(mode_string) = split_iter.next() else {
|
|
||||||
return Ok(Self::Case {
|
|
||||||
metadata_file_path: path,
|
|
||||||
case_idx,
|
|
||||||
});
|
|
||||||
};
|
|
||||||
let mode = Mode::from_str(mode_string)
|
|
||||||
.context("Failed to parse the mode string in the parsed test specifier")?;
|
|
||||||
|
|
||||||
Ok(Self::CaseWithMode {
|
|
||||||
metadata_file_path: path,
|
|
||||||
case_idx,
|
|
||||||
mode,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ParsedTestSpecifier> for String {
|
|
||||||
fn from(value: ParsedTestSpecifier) -> Self {
|
|
||||||
value.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<String> for ParsedTestSpecifier {
|
|
||||||
type Error = anyhow::Error;
|
|
||||||
|
|
||||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
|
||||||
value.parse()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<&str> for ParsedTestSpecifier {
|
|
||||||
type Error = anyhow::Error;
|
|
||||||
|
|
||||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
|
||||||
value.parse()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for ParsedTestSpecifier {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
self.to_string().serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for ParsedTestSpecifier {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let string = String::deserialize(deserializer)?;
|
|
||||||
string.parse().map_err(serde::de::Error::custom)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
use alloy::primitives::U256;
|
|
||||||
use alloy::signers::local::PrivateKeySigner;
|
use alloy::signers::local::PrivateKeySigner;
|
||||||
use anyhow::{Context, Result, bail};
|
use alloy_primitives::U256;
|
||||||
|
use anyhow::{Result, bail};
|
||||||
|
|
||||||
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
|
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
|
||||||
/// sequentially and in order until the maximum private key specified is reached.
|
/// sequentially and in order until the maximum private key specified is reached.
|
||||||
@@ -10,26 +10,25 @@ pub struct PrivateKeyAllocator {
|
|||||||
next_private_key: U256,
|
next_private_key: U256,
|
||||||
|
|
||||||
/// The highest private key (exclusive) that can be returned by this allocator.
|
/// The highest private key (exclusive) that can be returned by this allocator.
|
||||||
highest_private_key_inclusive: U256,
|
highest_private_key_exclusive: U256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PrivateKeyAllocator {
|
impl PrivateKeyAllocator {
|
||||||
/// Creates a new instance of the private key allocator.
|
/// Creates a new instance of the private key allocator.
|
||||||
pub fn new(highest_private_key_inclusive: U256) -> Self {
|
pub fn new(highest_private_key_exclusive: U256) -> Self {
|
||||||
Self {
|
Self {
|
||||||
next_private_key: U256::ONE,
|
next_private_key: U256::ZERO,
|
||||||
highest_private_key_inclusive,
|
highest_private_key_exclusive,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocates a new private key and errors out if the maximum private key has been reached.
|
/// Allocates a new private key and errors out if the maximum private key has been reached.
|
||||||
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
|
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
|
||||||
if self.next_private_key > self.highest_private_key_inclusive {
|
if self.next_private_key >= self.highest_private_key_exclusive {
|
||||||
bail!("Attempted to allocate a private key but failed since all have been allocated");
|
bail!("Attempted to allocate a private key but failed since all have been allocated");
|
||||||
};
|
};
|
||||||
let private_key =
|
let private_key =
|
||||||
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())
|
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())?;
|
||||||
.context("Failed to convert the private key digits into a private key")?;
|
|
||||||
self.next_private_key += U256::ONE;
|
self.next_private_key += U256::ONE;
|
||||||
Ok(private_key)
|
Ok(private_key)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,24 +0,0 @@
|
|||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
|
|
||||||
pub struct RoundRobinPool<T> {
|
|
||||||
next_index: AtomicUsize,
|
|
||||||
items: Vec<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> RoundRobinPool<T> {
|
|
||||||
pub fn new(items: Vec<T>) -> Self {
|
|
||||||
Self {
|
|
||||||
next_index: Default::default(),
|
|
||||||
items,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn round_robin(&self) -> &T {
|
|
||||||
let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len();
|
|
||||||
self.items.get(current).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn iter(&self) -> impl Iterator<Item = &T> {
|
|
||||||
self.items.iter()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -16,6 +16,7 @@ revive-dt-solc-binaries = { workspace = true }
|
|||||||
revive-common = { workspace = true }
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
|
alloy-primitives = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
dashmap = { workspace = true }
|
dashmap = { workspace = true }
|
||||||
foundry-compilers-artifacts = { workspace = true }
|
foundry-compilers-artifacts = { workspace = true }
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use alloy::json_abi::JsonAbi;
|
use alloy::json_abi::JsonAbi;
|
||||||
use alloy::primitives::Address;
|
use alloy_primitives::Address;
|
||||||
use anyhow::{Context as _, Result};
|
use anyhow::{Context as _, Result};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|||||||
@@ -12,15 +12,10 @@ use dashmap::DashMap;
|
|||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||||
use revive_solc_json_interface::{
|
use revive_solc_json_interface::{
|
||||||
PolkaVMDefaultHeapMemorySize, PolkaVMDefaultStackMemorySize, SolcStandardJsonInput,
|
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||||
SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
||||||
SolcStandardJsonInputSettingsLibraries, SolcStandardJsonInputSettingsMetadata,
|
SolcStandardJsonOutput,
|
||||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsPolkaVM,
|
|
||||||
SolcStandardJsonInputSettingsPolkaVMMemory, SolcStandardJsonInputSettingsSelection,
|
|
||||||
SolcStandardJsonOutput, standard_json::input::settings::optimizer::Optimizer,
|
|
||||||
standard_json::input::settings::optimizer::details::Details,
|
|
||||||
};
|
};
|
||||||
use tracing::{Span, field::display};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
||||||
@@ -29,7 +24,6 @@ use crate::{
|
|||||||
use alloy::json_abi::JsonAbi;
|
use alloy::json_abi::JsonAbi;
|
||||||
use anyhow::{Context as _, Result};
|
use anyhow::{Context as _, Result};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||||
|
|
||||||
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
|
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
|
||||||
@@ -42,10 +36,6 @@ struct ResolcInner {
|
|||||||
solc: Solc,
|
solc: Solc,
|
||||||
/// Path to the `resolc` executable
|
/// Path to the `resolc` executable
|
||||||
resolc_path: PathBuf,
|
resolc_path: PathBuf,
|
||||||
/// The PVM heap size in bytes.
|
|
||||||
pvm_heap_size: u32,
|
|
||||||
/// The PVM stack size in bytes.
|
|
||||||
pvm_stack_size: u32,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Resolc {
|
impl Resolc {
|
||||||
@@ -72,35 +62,10 @@ impl Resolc {
|
|||||||
Self(Arc::new(ResolcInner {
|
Self(Arc::new(ResolcInner {
|
||||||
solc,
|
solc,
|
||||||
resolc_path: resolc_configuration.path.clone(),
|
resolc_path: resolc_configuration.path.clone(),
|
||||||
pvm_heap_size: resolc_configuration
|
|
||||||
.heap_size
|
|
||||||
.unwrap_or(PolkaVMDefaultHeapMemorySize),
|
|
||||||
pvm_stack_size: resolc_configuration
|
|
||||||
.stack_size
|
|
||||||
.unwrap_or(PolkaVMDefaultStackMemorySize),
|
|
||||||
}))
|
}))
|
||||||
})
|
})
|
||||||
.clone())
|
.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn polkavm_settings(&self) -> SolcStandardJsonInputSettingsPolkaVM {
|
|
||||||
SolcStandardJsonInputSettingsPolkaVM::new(
|
|
||||||
Some(SolcStandardJsonInputSettingsPolkaVMMemory::new(
|
|
||||||
Some(self.0.pvm_heap_size),
|
|
||||||
Some(self.0.pvm_stack_size),
|
|
||||||
)),
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_polkavm_settings(&self, input: &SolcStandardJsonInput) -> Result<serde_json::Value> {
|
|
||||||
let mut input_value = serde_json::to_value(input)
|
|
||||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
|
||||||
if let Some(settings) = input_value.get_mut("settings") {
|
|
||||||
settings["polkavm"] = serde_json::to_value(self.polkavm_settings()).unwrap();
|
|
||||||
}
|
|
||||||
Ok(input_value)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SolidityCompiler for Resolc {
|
impl SolidityCompiler for Resolc {
|
||||||
@@ -115,16 +80,6 @@ impl SolidityCompiler for Resolc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", ret)]
|
#[tracing::instrument(level = "debug", ret)]
|
||||||
#[tracing::instrument(
|
|
||||||
level = "error",
|
|
||||||
skip_all,
|
|
||||||
fields(
|
|
||||||
resolc_version = %self.version(),
|
|
||||||
solc_version = %self.0.solc.version(),
|
|
||||||
json_in = tracing::field::Empty
|
|
||||||
),
|
|
||||||
err(Debug)
|
|
||||||
)]
|
|
||||||
fn build(
|
fn build(
|
||||||
&self,
|
&self,
|
||||||
CompilerInput {
|
CompilerInput {
|
||||||
@@ -155,8 +110,8 @@ impl SolidityCompiler for Resolc {
|
|||||||
.collect(),
|
.collect(),
|
||||||
settings: SolcStandardJsonInputSettings {
|
settings: SolcStandardJsonInputSettings {
|
||||||
evm_version,
|
evm_version,
|
||||||
libraries: SolcStandardJsonInputSettingsLibraries {
|
libraries: Some(
|
||||||
inner: libraries
|
libraries
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(source_code, libraries_map)| {
|
.map(|(source_code, libraries_map)| {
|
||||||
(
|
(
|
||||||
@@ -170,29 +125,22 @@ impl SolidityCompiler for Resolc {
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
},
|
),
|
||||||
remappings: BTreeSet::<String>::new(),
|
remappings: None,
|
||||||
output_selection: SolcStandardJsonInputSettingsSelection::new_required(),
|
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
||||||
via_ir: Some(true),
|
via_ir: Some(true),
|
||||||
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||||
optimization
|
optimization
|
||||||
.unwrap_or(ModeOptimizerSetting::M0)
|
.unwrap_or(ModeOptimizerSetting::M0)
|
||||||
.optimizations_enabled(),
|
.optimizations_enabled(),
|
||||||
Optimizer::default_mode(),
|
None,
|
||||||
Details::disabled(&Version::new(0, 0, 0)),
|
&Version::new(0, 0, 0),
|
||||||
|
false,
|
||||||
),
|
),
|
||||||
polkavm: self.polkavm_settings(),
|
metadata: None,
|
||||||
metadata: SolcStandardJsonInputSettingsMetadata::default(),
|
polkavm: None,
|
||||||
detect_missing_libraries: false,
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
// Manually inject polkavm settings since it's marked skip_serializing in the upstream crate
|
|
||||||
let std_input_json = self.inject_polkavm_settings(&input)?;
|
|
||||||
|
|
||||||
Span::current().record(
|
|
||||||
"json_in",
|
|
||||||
display(serde_json::to_string(&std_input_json).unwrap()),
|
|
||||||
);
|
|
||||||
|
|
||||||
let path = &self.0.resolc_path;
|
let path = &self.0.resolc_path;
|
||||||
let mut command = AsyncCommand::new(path);
|
let mut command = AsyncCommand::new(path);
|
||||||
@@ -200,8 +148,6 @@ impl SolidityCompiler for Resolc {
|
|||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::piped())
|
||||||
.arg("--solc")
|
|
||||||
.arg(self.0.solc.path())
|
|
||||||
.arg("--standard-json");
|
.arg("--standard-json");
|
||||||
|
|
||||||
if let Some(ref base_path) = base_path {
|
if let Some(ref base_path) = base_path {
|
||||||
@@ -221,9 +167,8 @@ impl SolidityCompiler for Resolc {
|
|||||||
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
||||||
|
|
||||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||||
let serialized_input = serde_json::to_vec(&std_input_json)
|
let serialized_input = serde_json::to_vec(&input)
|
||||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||||
|
|
||||||
stdin_pipe
|
stdin_pipe
|
||||||
.write_all(&serialized_input)
|
.write_all(&serialized_input)
|
||||||
.await
|
.await
|
||||||
@@ -249,18 +194,14 @@ impl SolidityCompiler for Resolc {
|
|||||||
anyhow::bail!("Compilation failed with an error: {message}");
|
anyhow::bail!("Compilation failed with an error: {message}");
|
||||||
}
|
}
|
||||||
|
|
||||||
let parsed: SolcStandardJsonOutput = {
|
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
||||||
let mut deserializer = serde_json::Deserializer::from_slice(&stdout);
|
|
||||||
deserializer.disable_recursion_limit();
|
|
||||||
serde::de::Deserialize::deserialize(&mut deserializer)
|
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
anyhow::anyhow!(
|
anyhow::anyhow!(
|
||||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||||
String::from_utf8_lossy(&stderr)
|
String::from_utf8_lossy(&stderr)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.context("Failed to parse resolc standard JSON output")?
|
.context("Failed to parse resolc standard JSON output")?;
|
||||||
};
|
|
||||||
|
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
output = %serde_json::to_string(&parsed).unwrap(),
|
output = %serde_json::to_string(&parsed).unwrap(),
|
||||||
@@ -269,7 +210,7 @@ impl SolidityCompiler for Resolc {
|
|||||||
|
|
||||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||||
// errors instead of returning the compiler output that might contain errors.
|
// errors instead of returning the compiler output that might contain errors.
|
||||||
for error in parsed.errors.iter() {
|
for error in parsed.errors.iter().flatten() {
|
||||||
if error.severity == "error" {
|
if error.severity == "error" {
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
?error,
|
?error,
|
||||||
@@ -281,12 +222,12 @@ impl SolidityCompiler for Resolc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if parsed.contracts.is_empty() {
|
let Some(contracts) = parsed.contracts else {
|
||||||
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
||||||
}
|
};
|
||||||
|
|
||||||
let mut compiler_output = CompilerOutput::default();
|
let mut compiler_output = CompilerOutput::default();
|
||||||
for (source_path, contracts) in parsed.contracts.into_iter() {
|
for (source_path, contracts) in contracts.into_iter() {
|
||||||
let src_for_msg = source_path.clone();
|
let src_for_msg = source_path.clone();
|
||||||
let source_path = PathBuf::from(source_path)
|
let source_path = PathBuf::from(source_path)
|
||||||
.canonicalize()
|
.canonicalize()
|
||||||
@@ -294,22 +235,15 @@ impl SolidityCompiler for Resolc {
|
|||||||
|
|
||||||
let map = compiler_output.contracts.entry(source_path).or_default();
|
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||||
for (contract_name, contract_information) in contracts.into_iter() {
|
for (contract_name, contract_information) in contracts.into_iter() {
|
||||||
let Some(bytecode) = contract_information
|
let bytecode = contract_information
|
||||||
.evm
|
.evm
|
||||||
.and_then(|evm| evm.bytecode.clone())
|
.and_then(|evm| evm.bytecode.clone())
|
||||||
else {
|
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
||||||
tracing::debug!(
|
|
||||||
"Skipping abstract or interface contract {} - no bytecode",
|
|
||||||
contract_name
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let abi = {
|
let abi = {
|
||||||
let metadata = &contract_information.metadata;
|
let metadata = contract_information
|
||||||
if metadata.is_null() {
|
.metadata
|
||||||
anyhow::bail!("No metadata found for the contract");
|
.as_ref()
|
||||||
}
|
.context("No metadata found for the contract")?;
|
||||||
|
|
||||||
let solc_metadata_str = match metadata {
|
let solc_metadata_str = match metadata {
|
||||||
serde_json::Value::String(solc_metadata_str) => {
|
serde_json::Value::String(solc_metadata_str) => {
|
||||||
solc_metadata_str.as_str()
|
solc_metadata_str.as_str()
|
||||||
|
|||||||
@@ -10,9 +10,8 @@ use std::{
|
|||||||
|
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
use revive_dt_config::{SolcConfiguration, WorkingDirectoryConfiguration};
|
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||||
use revive_dt_solc_binaries::download_solc;
|
use revive_dt_solc_binaries::download_solc;
|
||||||
use tracing::{Span, field::display, info};
|
|
||||||
|
|
||||||
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
|
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
|
||||||
|
|
||||||
@@ -40,7 +39,9 @@ struct SolcInner {
|
|||||||
|
|
||||||
impl Solc {
|
impl Solc {
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>,
|
context: impl AsRef<SolcConfiguration>
|
||||||
|
+ AsRef<ResolcConfiguration>
|
||||||
|
+ AsRef<WorkingDirectoryConfiguration>,
|
||||||
version: impl Into<Option<VersionOrRequirement>>,
|
version: impl Into<Option<VersionOrRequirement>>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
// This is a cache for the compiler objects so that whenever the same compiler version is
|
// This is a cache for the compiler objects so that whenever the same compiler version is
|
||||||
@@ -68,11 +69,6 @@ impl Solc {
|
|||||||
Ok(COMPILERS_CACHE
|
Ok(COMPILERS_CACHE
|
||||||
.entry((path.clone(), version.clone()))
|
.entry((path.clone(), version.clone()))
|
||||||
.or_insert_with(|| {
|
.or_insert_with(|| {
|
||||||
info!(
|
|
||||||
solc_path = %path.display(),
|
|
||||||
solc_version = %version,
|
|
||||||
"Created a new solc compiler object"
|
|
||||||
);
|
|
||||||
Self(Arc::new(SolcInner {
|
Self(Arc::new(SolcInner {
|
||||||
solc_path: path,
|
solc_path: path,
|
||||||
solc_version: version,
|
solc_version: version,
|
||||||
@@ -92,12 +88,6 @@ impl SolidityCompiler for Solc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", ret)]
|
#[tracing::instrument(level = "debug", ret)]
|
||||||
#[tracing::instrument(
|
|
||||||
level = "error",
|
|
||||||
skip_all,
|
|
||||||
fields(json_in = tracing::field::Empty),
|
|
||||||
err(Debug)
|
|
||||||
)]
|
|
||||||
fn build(
|
fn build(
|
||||||
&self,
|
&self,
|
||||||
CompilerInput {
|
CompilerInput {
|
||||||
@@ -176,14 +166,12 @@ impl SolidityCompiler for Solc {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
|
||||||
|
|
||||||
let path = &self.0.solc_path;
|
let path = &self.0.solc_path;
|
||||||
let mut command = AsyncCommand::new(path);
|
let mut command = AsyncCommand::new(path);
|
||||||
command
|
command
|
||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::null())
|
.stderr(Stdio::piped())
|
||||||
.arg("--standard-json");
|
.arg("--standard-json");
|
||||||
|
|
||||||
if let Some(ref base_path) = base_path {
|
if let Some(ref base_path) = base_path {
|
||||||
@@ -217,18 +205,20 @@ impl SolidityCompiler for Solc {
|
|||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
let json_in = serde_json::to_string_pretty(&input)
|
let json_in = serde_json::to_string_pretty(&input)
|
||||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||||
|
let message = String::from_utf8_lossy(&output.stderr);
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
status = %output.status,
|
status = %output.status,
|
||||||
|
message = %message,
|
||||||
json_input = json_in,
|
json_input = json_in,
|
||||||
"Compilation using solc failed"
|
"Compilation using solc failed"
|
||||||
);
|
);
|
||||||
anyhow::bail!("Compilation failed");
|
anyhow::bail!("Compilation failed with an error: {message}");
|
||||||
}
|
}
|
||||||
|
|
||||||
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
anyhow::anyhow!(
|
anyhow::anyhow!(
|
||||||
"failed to parse resolc JSON output: {e}\nstdout: {}",
|
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||||
String::from_utf8_lossy(&output.stdout)
|
String::from_utf8_lossy(&output.stdout)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -7,10 +7,7 @@ pragma solidity >=0.6.9;
|
|||||||
import "./callable.sol";
|
import "./callable.sol";
|
||||||
|
|
||||||
contract Main {
|
contract Main {
|
||||||
function main(
|
function main(uint[1] calldata p1, Callable callable) public returns(uint) {
|
||||||
uint[1] calldata p1,
|
|
||||||
Callable callable
|
|
||||||
) public pure returns (uint) {
|
|
||||||
return callable.f(p1);
|
return callable.f(p1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ semver = { workspace = true }
|
|||||||
temp-dir = { workspace = true }
|
temp-dir = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
serde_with = { workspace = true }
|
|
||||||
strum = { workspace = true }
|
strum = { workspace = true }
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
|
|||||||
+137
-728
File diff suppressed because it is too large
Load Diff
@@ -21,7 +21,6 @@ revive-dt-node = { workspace = true }
|
|||||||
revive-dt-node-interaction = { workspace = true }
|
revive-dt-node-interaction = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
revive-dt-report = { workspace = true }
|
||||||
|
|
||||||
ansi_term = { workspace = true }
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
bson = { workspace = true }
|
bson = { workspace = true }
|
||||||
@@ -37,7 +36,6 @@ schemars = { workspace = true }
|
|||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
subxt = { workspace = true }
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use std::{
|
|||||||
borrow::Cow,
|
borrow::Cow,
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
sync::{Arc, LazyLock},
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use futures::FutureExt;
|
use futures::FutureExt;
|
||||||
@@ -19,7 +19,7 @@ use anyhow::{Context as _, Error, Result};
|
|||||||
use revive_dt_report::ExecutionSpecificReporter;
|
use revive_dt_report::ExecutionSpecificReporter;
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::sync::{Mutex, RwLock, Semaphore};
|
use tokio::sync::{Mutex, RwLock};
|
||||||
use tracing::{Instrument, debug, debug_span, instrument};
|
use tracing::{Instrument, debug, debug_span, instrument};
|
||||||
|
|
||||||
pub struct CachedCompiler<'a> {
|
pub struct CachedCompiler<'a> {
|
||||||
@@ -165,22 +165,10 @@ impl<'a> CachedCompiler<'a> {
|
|||||||
cache_value.compiler_output
|
cache_value.compiler_output
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
let compiler_output = compilation_callback()
|
compilation_callback()
|
||||||
.await
|
.await
|
||||||
.context("Compilation callback failed (cache miss path)")?
|
.context("Compilation callback failed (cache miss path)")?
|
||||||
.compiler_output;
|
.compiler_output
|
||||||
self.artifacts_cache
|
|
||||||
.insert(
|
|
||||||
&cache_key,
|
|
||||||
&CacheValue {
|
|
||||||
compiler_output: compiler_output.clone(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.context(
|
|
||||||
"Failed to write the cached value of the compilation artifacts",
|
|
||||||
)?;
|
|
||||||
compiler_output
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -198,12 +186,6 @@ async fn compile_contracts(
|
|||||||
compiler: &dyn SolidityCompiler,
|
compiler: &dyn SolidityCompiler,
|
||||||
reporter: &ExecutionSpecificReporter,
|
reporter: &ExecutionSpecificReporter,
|
||||||
) -> Result<CompilerOutput> {
|
) -> Result<CompilerOutput> {
|
||||||
// Puts a limit on how many compilations we can perform at any given instance which helps us
|
|
||||||
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
|
|
||||||
// it on Linux so we don't know if these issues also persist there or not.)
|
|
||||||
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
|
|
||||||
let _permit = SPAWN_GATE.acquire().await?;
|
|
||||||
|
|
||||||
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||||
.with_allowed_extension("sol")
|
.with_allowed_extension("sol")
|
||||||
.with_use_cached_fs(true)
|
.with_use_cached_fs(true)
|
||||||
@@ -325,6 +307,26 @@ impl ArtifactsCache {
|
|||||||
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
||||||
Some(value)
|
Some(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
|
pub async fn get_or_insert_with(
|
||||||
|
&self,
|
||||||
|
key: &CacheKey<'_>,
|
||||||
|
callback: impl AsyncFnOnce() -> Result<CacheValue>,
|
||||||
|
) -> Result<CacheValue> {
|
||||||
|
match self.get(key).await {
|
||||||
|
Some(value) => {
|
||||||
|
debug!("Cache hit");
|
||||||
|
Ok(value)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
debug!("Cache miss");
|
||||||
|
let value = callback().await?;
|
||||||
|
self.insert(key, &value).await?;
|
||||||
|
Ok(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||||
@@ -1,749 +0,0 @@
|
|||||||
use std::{
|
|
||||||
collections::HashMap,
|
|
||||||
sync::{
|
|
||||||
Arc,
|
|
||||||
atomic::{AtomicUsize, Ordering},
|
|
||||||
},
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use alloy::{
|
|
||||||
hex,
|
|
||||||
json_abi::JsonAbi,
|
|
||||||
network::{Ethereum, TransactionBuilder},
|
|
||||||
primitives::{Address, TxHash, U256},
|
|
||||||
providers::Provider,
|
|
||||||
rpc::types::{
|
|
||||||
TransactionReceipt, TransactionRequest,
|
|
||||||
trace::geth::{
|
|
||||||
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType,
|
|
||||||
GethDebugTracingOptions,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use anyhow::{Context as _, Result, bail};
|
|
||||||
use futures::{FutureExt as _, TryFutureExt};
|
|
||||||
use indexmap::IndexMap;
|
|
||||||
use revive_dt_common::types::PrivateKeyAllocator;
|
|
||||||
use revive_dt_format::{
|
|
||||||
metadata::{ContractInstance, ContractPathAndIdent},
|
|
||||||
steps::{
|
|
||||||
AllocateAccountStep, Calldata, EtherValue, FunctionCallStep, Method, RepeatStep, Step,
|
|
||||||
StepIdx, StepPath,
|
|
||||||
},
|
|
||||||
traits::{ResolutionContext, ResolverApi},
|
|
||||||
};
|
|
||||||
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
|
|
||||||
use tracing::{Span, debug, error, field::display, info, instrument};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
differential_benchmarks::{ExecutionState, WatcherEvent},
|
|
||||||
helpers::{CachedCompiler, TestDefinition, TestPlatformInformation},
|
|
||||||
};
|
|
||||||
|
|
||||||
static DRIVER_COUNT: AtomicUsize = AtomicUsize::new(0);
|
|
||||||
|
|
||||||
/// The differential tests driver for a single platform.
|
|
||||||
pub struct Driver<'a, I> {
|
|
||||||
/// The id of the driver.
|
|
||||||
driver_id: usize,
|
|
||||||
|
|
||||||
/// The information of the platform that this driver is for.
|
|
||||||
platform_information: &'a TestPlatformInformation<'a>,
|
|
||||||
|
|
||||||
/// The resolver of the platform.
|
|
||||||
resolver: Arc<dyn ResolverApi + 'a>,
|
|
||||||
|
|
||||||
/// The definition of the test that the driver is instructed to execute.
|
|
||||||
test_definition: &'a TestDefinition<'a>,
|
|
||||||
|
|
||||||
/// The private key allocator used by this driver and other drivers when account allocations are
|
|
||||||
/// needed.
|
|
||||||
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
|
||||||
|
|
||||||
/// The execution state associated with the platform.
|
|
||||||
execution_state: ExecutionState,
|
|
||||||
|
|
||||||
/// The send side of the watcher's unbounded channel associated with this driver.
|
|
||||||
watcher_tx: UnboundedSender<WatcherEvent>,
|
|
||||||
|
|
||||||
/// The number of steps that were executed on the driver.
|
|
||||||
steps_executed: usize,
|
|
||||||
|
|
||||||
/// This function controls if the driver should wait for transactions to be included in a block
|
|
||||||
/// or not before proceeding forward.
|
|
||||||
await_transaction_inclusion: bool,
|
|
||||||
|
|
||||||
/// This is the queue of steps that are to be executed by the driver for this test case. Each
|
|
||||||
/// time `execute_step` is called one of the steps is executed.
|
|
||||||
steps_iterator: I,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, I> Driver<'a, I>
|
|
||||||
where
|
|
||||||
I: Iterator<Item = (StepPath, Step)>,
|
|
||||||
{
|
|
||||||
// region:Constructors & Initialization
|
|
||||||
pub async fn new(
|
|
||||||
platform_information: &'a TestPlatformInformation<'a>,
|
|
||||||
test_definition: &'a TestDefinition<'a>,
|
|
||||||
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
|
||||||
cached_compiler: &CachedCompiler<'a>,
|
|
||||||
watcher_tx: UnboundedSender<WatcherEvent>,
|
|
||||||
await_transaction_inclusion: bool,
|
|
||||||
steps: I,
|
|
||||||
) -> Result<Self> {
|
|
||||||
let mut this = Driver {
|
|
||||||
driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst),
|
|
||||||
platform_information,
|
|
||||||
resolver: platform_information
|
|
||||||
.node
|
|
||||||
.resolver()
|
|
||||||
.await
|
|
||||||
.context("Failed to create resolver")?,
|
|
||||||
test_definition,
|
|
||||||
private_key_allocator,
|
|
||||||
execution_state: ExecutionState::empty(),
|
|
||||||
steps_executed: 0,
|
|
||||||
steps_iterator: steps,
|
|
||||||
await_transaction_inclusion,
|
|
||||||
watcher_tx,
|
|
||||||
};
|
|
||||||
this.init_execution_state(cached_compiler)
|
|
||||||
.await
|
|
||||||
.context("Failed to initialize the execution state of the platform")?;
|
|
||||||
Ok(this)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn init_execution_state(&mut self, cached_compiler: &CachedCompiler<'a>) -> Result<()> {
|
|
||||||
let compiler_output = cached_compiler
|
|
||||||
.compile_contracts(
|
|
||||||
self.test_definition.metadata,
|
|
||||||
self.test_definition.metadata_file_path,
|
|
||||||
self.test_definition.mode.clone(),
|
|
||||||
None,
|
|
||||||
self.platform_information.compiler.as_ref(),
|
|
||||||
self.platform_information.platform,
|
|
||||||
&self.platform_information.reporter,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
|
|
||||||
.context("Failed to produce the pre-linking compiled contracts")?;
|
|
||||||
|
|
||||||
let deployer_address = self.test_definition.case.deployer_address();
|
|
||||||
|
|
||||||
let mut deployed_libraries = None::<HashMap<_, _>>;
|
|
||||||
let mut contract_sources = self
|
|
||||||
.test_definition
|
|
||||||
.metadata
|
|
||||||
.contract_sources()
|
|
||||||
.inspect_err(|err| error!(?err, "Failed to retrieve contract sources from metadata"))
|
|
||||||
.context("Failed to get the contract instances from the metadata file")?;
|
|
||||||
for library_instance in self
|
|
||||||
.test_definition
|
|
||||||
.metadata
|
|
||||||
.libraries
|
|
||||||
.iter()
|
|
||||||
.flatten()
|
|
||||||
.flat_map(|(_, map)| map.values())
|
|
||||||
{
|
|
||||||
debug!(%library_instance, "Deploying Library Instance");
|
|
||||||
|
|
||||||
let ContractPathAndIdent {
|
|
||||||
contract_source_path: library_source_path,
|
|
||||||
contract_ident: library_ident,
|
|
||||||
} = contract_sources
|
|
||||||
.remove(library_instance)
|
|
||||||
.context("Failed to get the contract sources of the contract instance")?;
|
|
||||||
|
|
||||||
let (code, abi) = compiler_output
|
|
||||||
.contracts
|
|
||||||
.get(&library_source_path)
|
|
||||||
.and_then(|contracts| contracts.get(library_ident.as_str()))
|
|
||||||
.context("Failed to get the code and abi for the instance")?;
|
|
||||||
|
|
||||||
let code = alloy::hex::decode(code)?;
|
|
||||||
|
|
||||||
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
|
||||||
TransactionRequest::default().from(deployer_address),
|
|
||||||
code,
|
|
||||||
);
|
|
||||||
let receipt = self
|
|
||||||
.execute_transaction(tx, None, Duration::from_secs(5 * 60))
|
|
||||||
.and_then(|(_, receipt_fut)| receipt_fut)
|
|
||||||
.await
|
|
||||||
.inspect_err(|err| {
|
|
||||||
error!(
|
|
||||||
?err,
|
|
||||||
%library_instance,
|
|
||||||
"Failed to deploy the library"
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
debug!(?library_instance, "Deployed library");
|
|
||||||
|
|
||||||
let library_address = receipt
|
|
||||||
.contract_address
|
|
||||||
.expect("Failed to deploy the library");
|
|
||||||
|
|
||||||
deployed_libraries.get_or_insert_default().insert(
|
|
||||||
library_instance.clone(),
|
|
||||||
(library_ident.clone(), library_address, abi.clone()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let compiler_output = cached_compiler
|
|
||||||
.compile_contracts(
|
|
||||||
self.test_definition.metadata,
|
|
||||||
self.test_definition.metadata_file_path,
|
|
||||||
self.test_definition.mode.clone(),
|
|
||||||
deployed_libraries.as_ref(),
|
|
||||||
self.platform_information.compiler.as_ref(),
|
|
||||||
self.platform_information.platform,
|
|
||||||
&self.platform_information.reporter,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.inspect_err(|err| error!(?err, "Post-linking compilation failed"))
|
|
||||||
.context("Failed to compile the post-link contracts")?;
|
|
||||||
|
|
||||||
for (contract_path, contract_name_to_info_mapping) in compiler_output.contracts.iter() {
|
|
||||||
for (contract_name, (contract_bytecode, _)) in contract_name_to_info_mapping.iter() {
|
|
||||||
let contract_bytecode = hex::decode(contract_bytecode)
|
|
||||||
.expect("Impossible for us to get an undecodable bytecode after linking");
|
|
||||||
|
|
||||||
self.platform_information
|
|
||||||
.reporter
|
|
||||||
.report_contract_information_event(
|
|
||||||
contract_path.to_path_buf(),
|
|
||||||
contract_name.clone(),
|
|
||||||
contract_bytecode.len(),
|
|
||||||
)
|
|
||||||
.expect("Should not fail");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.execution_state = ExecutionState::new(
|
|
||||||
compiler_output.contracts,
|
|
||||||
deployed_libraries.unwrap_or_default(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
// endregion:Constructors & Initialization
|
|
||||||
|
|
||||||
// region:Step Handling
|
|
||||||
pub async fn execute_all(mut self) -> Result<usize> {
|
|
||||||
while let Some(result) = self.execute_next_step().await {
|
|
||||||
result?
|
|
||||||
}
|
|
||||||
Ok(self.steps_executed)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn execute_next_step(&mut self) -> Option<Result<()>> {
|
|
||||||
let (step_path, step) = self.steps_iterator.next()?;
|
|
||||||
info!(%step_path, "Executing Step");
|
|
||||||
Some(
|
|
||||||
self.execute_step(&step_path, &step)
|
|
||||||
.await
|
|
||||||
.inspect(|_| info!(%step_path, "Step execution succeeded"))
|
|
||||||
.inspect_err(|err| error!(%step_path, ?err, "Step execution failed")),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(
|
|
||||||
driver_id = self.driver_id,
|
|
||||||
%step_path,
|
|
||||||
),
|
|
||||||
err(Debug),
|
|
||||||
)]
|
|
||||||
async fn execute_step(&mut self, step_path: &StepPath, step: &Step) -> Result<()> {
|
|
||||||
let steps_executed = match step {
|
|
||||||
Step::FunctionCall(step) => self
|
|
||||||
.execute_function_call(step_path, step.as_ref())
|
|
||||||
.await
|
|
||||||
.context("Function call step Failed"),
|
|
||||||
Step::Repeat(step) => self
|
|
||||||
.execute_repeat_step(step_path, step.as_ref())
|
|
||||||
.await
|
|
||||||
.context("Repetition Step Failed"),
|
|
||||||
Step::AllocateAccount(step) => self
|
|
||||||
.execute_account_allocation(step_path, step.as_ref())
|
|
||||||
.await
|
|
||||||
.context("Account Allocation Step Failed"),
|
|
||||||
// The following steps are disabled in the benchmarking driver.
|
|
||||||
Step::BalanceAssertion(..) | Step::StorageEmptyAssertion(..) => Ok(0),
|
|
||||||
}?;
|
|
||||||
self.steps_executed += steps_executed;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
|
|
||||||
pub async fn execute_function_call(
|
|
||||||
&mut self,
|
|
||||||
step_path: &StepPath,
|
|
||||||
step: &FunctionCallStep,
|
|
||||||
) -> Result<usize> {
|
|
||||||
let deployment_receipts = self
|
|
||||||
.handle_function_call_contract_deployment(step_path, step)
|
|
||||||
.await
|
|
||||||
.context("Failed to deploy contracts for the function call step")?;
|
|
||||||
let transaction_hash = self
|
|
||||||
.handle_function_call_execution(step_path, step, deployment_receipts)
|
|
||||||
.await
|
|
||||||
.context("Failed to handle the function call execution")?;
|
|
||||||
self.handle_function_call_variable_assignment(step, transaction_hash)
|
|
||||||
.await
|
|
||||||
.context("Failed to handle function call variable assignment")?;
|
|
||||||
Ok(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_function_call_contract_deployment(
|
|
||||||
&mut self,
|
|
||||||
step_path: &StepPath,
|
|
||||||
step: &FunctionCallStep,
|
|
||||||
) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
|
|
||||||
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
|
||||||
for instance in step.find_all_contract_instances().into_iter() {
|
|
||||||
if !self
|
|
||||||
.execution_state
|
|
||||||
.deployed_contracts
|
|
||||||
.contains_key(&instance)
|
|
||||||
{
|
|
||||||
instances_we_must_deploy.entry(instance).or_insert(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Method::Deployer = step.method {
|
|
||||||
instances_we_must_deploy.swap_remove(&step.instance);
|
|
||||||
instances_we_must_deploy.insert(step.instance.clone(), true);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut receipts = HashMap::new();
|
|
||||||
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
|
|
||||||
let calldata = deploy_with_constructor_arguments.then_some(&step.calldata);
|
|
||||||
let value = deploy_with_constructor_arguments
|
|
||||||
.then_some(step.value)
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
let caller = {
|
|
||||||
let context = self.default_resolution_context();
|
|
||||||
step.caller
|
|
||||||
.resolve_address(self.resolver.as_ref(), context)
|
|
||||||
.await?
|
|
||||||
};
|
|
||||||
if let (_, _, Some(receipt)) = self
|
|
||||||
.get_or_deploy_contract_instance(
|
|
||||||
&instance,
|
|
||||||
caller,
|
|
||||||
calldata,
|
|
||||||
value,
|
|
||||||
Some(step_path),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.context("Failed to get or deploy contract instance during input execution")?
|
|
||||||
{
|
|
||||||
receipts.insert(instance.clone(), receipt);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(receipts)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_function_call_execution(
|
|
||||||
&mut self,
|
|
||||||
step_path: &StepPath,
|
|
||||||
step: &FunctionCallStep,
|
|
||||||
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
|
||||||
) -> Result<TxHash> {
|
|
||||||
match step.method {
|
|
||||||
// This step was already executed when `handle_step` was called. We just need to
|
|
||||||
// lookup the transaction receipt in this case and continue on.
|
|
||||||
Method::Deployer => deployment_receipts
|
|
||||||
.remove(&step.instance)
|
|
||||||
.context("Failed to find deployment receipt for constructor call")
|
|
||||||
.map(|receipt| receipt.transaction_hash),
|
|
||||||
Method::Fallback | Method::FunctionName(_) => {
|
|
||||||
let tx = step
|
|
||||||
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let (tx_hash, receipt_future) = self
|
|
||||||
.execute_transaction(tx.clone(), Some(step_path), Duration::from_secs(30 * 60))
|
|
||||||
.await?;
|
|
||||||
if self.await_transaction_inclusion {
|
|
||||||
let receipt = receipt_future
|
|
||||||
.await
|
|
||||||
.context("Failed while waiting for transaction inclusion in block")?;
|
|
||||||
|
|
||||||
if !receipt.status() {
|
|
||||||
error!(
|
|
||||||
?tx,
|
|
||||||
tx.hash = %receipt.transaction_hash,
|
|
||||||
?receipt,
|
|
||||||
"Encountered a failing benchmark transaction"
|
|
||||||
);
|
|
||||||
bail!(
|
|
||||||
"Encountered a failing transaction in benchmarks: {}",
|
|
||||||
receipt.transaction_hash
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(tx_hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_function_call_call_frame_tracing(
|
|
||||||
&mut self,
|
|
||||||
tx_hash: TxHash,
|
|
||||||
) -> Result<CallFrame> {
|
|
||||||
self.platform_information
|
|
||||||
.node
|
|
||||||
.trace_transaction(
|
|
||||||
tx_hash,
|
|
||||||
GethDebugTracingOptions {
|
|
||||||
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
|
||||||
GethDebugBuiltInTracerType::CallTracer,
|
|
||||||
)),
|
|
||||||
tracer_config: GethDebugTracerConfig(serde_json::json! {{
|
|
||||||
"onlyTopCall": true,
|
|
||||||
"withLog": false,
|
|
||||||
"withStorage": false,
|
|
||||||
"withMemory": false,
|
|
||||||
"withStack": false,
|
|
||||||
"withReturnData": true
|
|
||||||
}}),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(|trace| {
|
|
||||||
trace
|
|
||||||
.try_into_call_frame()
|
|
||||||
.expect("Impossible - we requested a callframe trace so we must get it back")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_function_call_variable_assignment(
|
|
||||||
&mut self,
|
|
||||||
step: &FunctionCallStep,
|
|
||||||
tx_hash: TxHash,
|
|
||||||
) -> Result<()> {
|
|
||||||
let Some(ref assignments) = step.variable_assignments else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
// Handling the return data variable assignments.
|
|
||||||
let callframe = OnceCell::new();
|
|
||||||
for (variable_name, output_word) in assignments.return_data.iter().zip(
|
|
||||||
callframe
|
|
||||||
.get_or_try_init(|| self.handle_function_call_call_frame_tracing(tx_hash))
|
|
||||||
.await
|
|
||||||
.context("Failed to get the callframe trace for transaction")?
|
|
||||||
.output
|
|
||||||
.as_ref()
|
|
||||||
.unwrap_or_default()
|
|
||||||
.to_vec()
|
|
||||||
.chunks(32),
|
|
||||||
) {
|
|
||||||
let value = U256::from_be_slice(output_word);
|
|
||||||
self.execution_state
|
|
||||||
.variables
|
|
||||||
.insert(variable_name.clone(), value);
|
|
||||||
tracing::info!(
|
|
||||||
variable_name,
|
|
||||||
variable_value = hex::encode(value.to_be_bytes::<32>()),
|
|
||||||
"Assigned variable"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
|
|
||||||
async fn execute_repeat_step(
|
|
||||||
&mut self,
|
|
||||||
step_path: &StepPath,
|
|
||||||
step: &RepeatStep,
|
|
||||||
) -> Result<usize> {
|
|
||||||
let tasks = (0..step.repeat)
|
|
||||||
.map(|_| Driver {
|
|
||||||
driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst),
|
|
||||||
platform_information: self.platform_information,
|
|
||||||
resolver: self.resolver.clone(),
|
|
||||||
test_definition: self.test_definition,
|
|
||||||
private_key_allocator: self.private_key_allocator.clone(),
|
|
||||||
execution_state: self.execution_state.clone(),
|
|
||||||
steps_executed: 0,
|
|
||||||
steps_iterator: {
|
|
||||||
let steps = step
|
|
||||||
.steps
|
|
||||||
.iter()
|
|
||||||
.cloned()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(step_idx, step)| {
|
|
||||||
let step_idx = StepIdx::new(step_idx);
|
|
||||||
let step_path = step_path.append(step_idx);
|
|
||||||
(step_path, step)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
steps.into_iter()
|
|
||||||
},
|
|
||||||
await_transaction_inclusion: self.await_transaction_inclusion,
|
|
||||||
watcher_tx: self.watcher_tx.clone(),
|
|
||||||
})
|
|
||||||
.map(|driver| driver.execute_all());
|
|
||||||
|
|
||||||
// TODO: Determine how we want to know the `ignore_block_before` and if it's through the
|
|
||||||
// receipt and how this would impact the architecture and the possibility of us not waiting
|
|
||||||
// for receipts in the future.
|
|
||||||
self.watcher_tx
|
|
||||||
.send(WatcherEvent::RepetitionStartEvent {
|
|
||||||
ignore_block_before: 0,
|
|
||||||
})
|
|
||||||
.context("Failed to send message on the watcher's tx")?;
|
|
||||||
|
|
||||||
let res = futures::future::try_join_all(tasks)
|
|
||||||
.await
|
|
||||||
.context("Repetition execution failed")?;
|
|
||||||
Ok(res.into_iter().sum())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", fields(driver_id = self.driver_id), skip_all, err(Debug))]
|
|
||||||
pub async fn execute_account_allocation(
|
|
||||||
&mut self,
|
|
||||||
_: &StepPath,
|
|
||||||
step: &AllocateAccountStep,
|
|
||||||
) -> Result<usize> {
|
|
||||||
let Some(variable_name) = step.variable_name.strip_prefix("$VARIABLE:") else {
|
|
||||||
bail!("Account allocation must start with $VARIABLE:");
|
|
||||||
};
|
|
||||||
|
|
||||||
let private_key = self
|
|
||||||
.private_key_allocator
|
|
||||||
.lock()
|
|
||||||
.await
|
|
||||||
.allocate()
|
|
||||||
.context("Account allocation through the private key allocator failed")?;
|
|
||||||
let account = private_key.address();
|
|
||||||
let variable = U256::from_be_slice(account.0.as_slice());
|
|
||||||
|
|
||||||
self.execution_state
|
|
||||||
.variables
|
|
||||||
.insert(variable_name.to_string(), variable);
|
|
||||||
|
|
||||||
Ok(1)
|
|
||||||
}
|
|
||||||
// endregion:Step Handling
|
|
||||||
|
|
||||||
// region:Contract Deployment
|
|
||||||
#[instrument(
|
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(
|
|
||||||
driver_id = self.driver_id,
|
|
||||||
%contract_instance,
|
|
||||||
%deployer
|
|
||||||
),
|
|
||||||
err(Debug),
|
|
||||||
)]
|
|
||||||
async fn get_or_deploy_contract_instance(
|
|
||||||
&mut self,
|
|
||||||
contract_instance: &ContractInstance,
|
|
||||||
deployer: Address,
|
|
||||||
calldata: Option<&Calldata>,
|
|
||||||
value: Option<EtherValue>,
|
|
||||||
step_path: Option<&StepPath>,
|
|
||||||
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
|
||||||
if let Some((_, address, abi)) = self
|
|
||||||
.execution_state
|
|
||||||
.deployed_contracts
|
|
||||||
.get(contract_instance)
|
|
||||||
{
|
|
||||||
info!(
|
|
||||||
|
|
||||||
%address,
|
|
||||||
"Contract instance already deployed."
|
|
||||||
);
|
|
||||||
Ok((*address, abi.clone(), None))
|
|
||||||
} else {
|
|
||||||
info!("Contract instance requires deployment.");
|
|
||||||
let (address, abi, receipt) = self
|
|
||||||
.deploy_contract(contract_instance, deployer, calldata, value, step_path)
|
|
||||||
.await
|
|
||||||
.context("Failed to deploy contract")?;
|
|
||||||
info!(
|
|
||||||
%address,
|
|
||||||
"Contract instance has been deployed."
|
|
||||||
);
|
|
||||||
Ok((address, abi, Some(receipt)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(
|
|
||||||
driver_id = self.driver_id,
|
|
||||||
%contract_instance,
|
|
||||||
%deployer
|
|
||||||
),
|
|
||||||
err(Debug),
|
|
||||||
)]
|
|
||||||
async fn deploy_contract(
|
|
||||||
&mut self,
|
|
||||||
contract_instance: &ContractInstance,
|
|
||||||
deployer: Address,
|
|
||||||
calldata: Option<&Calldata>,
|
|
||||||
value: Option<EtherValue>,
|
|
||||||
step_path: Option<&StepPath>,
|
|
||||||
) -> Result<(Address, JsonAbi, TransactionReceipt)> {
|
|
||||||
let Some(ContractPathAndIdent {
|
|
||||||
contract_source_path,
|
|
||||||
contract_ident,
|
|
||||||
}) = self
|
|
||||||
.test_definition
|
|
||||||
.metadata
|
|
||||||
.contract_sources()?
|
|
||||||
.remove(contract_instance)
|
|
||||||
else {
|
|
||||||
anyhow::bail!(
|
|
||||||
"Contract source not found for instance {:?}",
|
|
||||||
contract_instance
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some((code, abi)) = self
|
|
||||||
.execution_state
|
|
||||||
.compiled_contracts
|
|
||||||
.get(&contract_source_path)
|
|
||||||
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
|
|
||||||
.cloned()
|
|
||||||
else {
|
|
||||||
anyhow::bail!(
|
|
||||||
"Failed to find information for contract {:?}",
|
|
||||||
contract_instance
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut code = match alloy::hex::decode(&code) {
|
|
||||||
Ok(code) => code,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(
|
|
||||||
?error,
|
|
||||||
contract_source_path = contract_source_path.display().to_string(),
|
|
||||||
contract_ident = contract_ident.as_ref(),
|
|
||||||
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
|
|
||||||
);
|
|
||||||
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(calldata) = calldata {
|
|
||||||
let calldata = calldata
|
|
||||||
.calldata(self.resolver.as_ref(), self.default_resolution_context())
|
|
||||||
.await?;
|
|
||||||
code.extend(calldata);
|
|
||||||
}
|
|
||||||
|
|
||||||
let tx = {
|
|
||||||
let tx = TransactionRequest::default().from(deployer);
|
|
||||||
let tx = match value {
|
|
||||||
Some(ref value) => tx.value(value.into_inner()),
|
|
||||||
_ => tx,
|
|
||||||
};
|
|
||||||
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
|
|
||||||
};
|
|
||||||
|
|
||||||
let receipt = match self
|
|
||||||
.execute_transaction(tx, step_path, Duration::from_secs(5 * 60))
|
|
||||||
.and_then(|(_, receipt_fut)| receipt_fut)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(receipt) => receipt,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(?error, "Contract deployment transaction failed.");
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(address) = receipt.contract_address else {
|
|
||||||
anyhow::bail!("Contract deployment didn't return an address");
|
|
||||||
};
|
|
||||||
tracing::info!(
|
|
||||||
instance_name = ?contract_instance,
|
|
||||||
instance_address = ?address,
|
|
||||||
"Deployed contract"
|
|
||||||
);
|
|
||||||
self.platform_information
|
|
||||||
.reporter
|
|
||||||
.report_contract_deployed_event(contract_instance.clone(), address)?;
|
|
||||||
|
|
||||||
self.execution_state.deployed_contracts.insert(
|
|
||||||
contract_instance.clone(),
|
|
||||||
(contract_ident, address, abi.clone()),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok((address, abi, receipt))
|
|
||||||
}
|
|
||||||
// endregion:Contract Deployment
|
|
||||||
|
|
||||||
// region:Resolution & Resolver
|
|
||||||
fn default_resolution_context(&self) -> ResolutionContext<'_> {
|
|
||||||
ResolutionContext::default()
|
|
||||||
.with_deployed_contracts(&self.execution_state.deployed_contracts)
|
|
||||||
.with_variables(&self.execution_state.variables)
|
|
||||||
}
|
|
||||||
// endregion:Resolution & Resolver
|
|
||||||
|
|
||||||
// region:Transaction Execution
|
|
||||||
/// Executes the transaction on the driver's node with some custom waiting logic for the receipt
|
|
||||||
#[instrument(
|
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(
|
|
||||||
driver_id = self.driver_id,
|
|
||||||
transaction = ?transaction,
|
|
||||||
transaction_hash = tracing::field::Empty
|
|
||||||
),
|
|
||||||
err(Debug)
|
|
||||||
)]
|
|
||||||
async fn execute_transaction(
|
|
||||||
&self,
|
|
||||||
transaction: TransactionRequest,
|
|
||||||
step_path: Option<&StepPath>,
|
|
||||||
receipt_wait_duration: Duration,
|
|
||||||
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
|
|
||||||
let node = self.platform_information.node;
|
|
||||||
let provider = node.provider().await.context("Creating provider failed")?;
|
|
||||||
|
|
||||||
let pending_transaction_builder = provider
|
|
||||||
.send_transaction(transaction)
|
|
||||||
.await
|
|
||||||
.context("Failed to submit transaction")?;
|
|
||||||
|
|
||||||
let transaction_hash = *pending_transaction_builder.tx_hash();
|
|
||||||
let receipt_future = pending_transaction_builder
|
|
||||||
.with_timeout(Some(receipt_wait_duration))
|
|
||||||
.with_required_confirmations(2)
|
|
||||||
.get_receipt()
|
|
||||||
.map(|res| res.context("Failed to get the receipt of the transaction"));
|
|
||||||
Span::current().record("transaction_hash", display(transaction_hash));
|
|
||||||
|
|
||||||
info!("Submitted transaction");
|
|
||||||
if let Some(step_path) = step_path {
|
|
||||||
self.watcher_tx
|
|
||||||
.send(WatcherEvent::SubmittedTransaction {
|
|
||||||
transaction_hash,
|
|
||||||
step_path: step_path.clone(),
|
|
||||||
})
|
|
||||||
.context("Failed to send the transaction hash to the watcher")?;
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((transaction_hash, receipt_future))
|
|
||||||
}
|
|
||||||
// endregion:Transaction Execution
|
|
||||||
}
|
|
||||||
@@ -1,195 +0,0 @@
|
|||||||
//! The main entry point for differential benchmarking.
|
|
||||||
|
|
||||||
use std::{collections::BTreeMap, sync::Arc};
|
|
||||||
|
|
||||||
use anyhow::Context as _;
|
|
||||||
use futures::{FutureExt, StreamExt};
|
|
||||||
use revive_dt_common::types::PrivateKeyAllocator;
|
|
||||||
use revive_dt_core::Platform;
|
|
||||||
use revive_dt_format::{
|
|
||||||
corpus::Corpus,
|
|
||||||
steps::{Step, StepIdx, StepPath},
|
|
||||||
};
|
|
||||||
use tokio::sync::Mutex;
|
|
||||||
use tracing::{Instrument, error, info, info_span, instrument, warn};
|
|
||||||
|
|
||||||
use revive_dt_config::{BenchmarkingContext, Context};
|
|
||||||
use revive_dt_report::Reporter;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
differential_benchmarks::{Driver, Watcher, WatcherEvent},
|
|
||||||
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Handles the differential testing executing it according to the information defined in the
|
|
||||||
/// context
|
|
||||||
#[instrument(level = "info", err(Debug), skip_all)]
|
|
||||||
pub async fn handle_differential_benchmarks(
|
|
||||||
mut context: BenchmarkingContext,
|
|
||||||
reporter: Reporter,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
// A bit of a hack but we need to override the number of nodes specified through the CLI since
|
|
||||||
// benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to
|
|
||||||
// do this. But, for the time being, we need to override the cli arguments.
|
|
||||||
if context.concurrency_configuration.number_of_nodes != 1 {
|
|
||||||
warn!(
|
|
||||||
specified_number_of_nodes = context.concurrency_configuration.number_of_nodes,
|
|
||||||
updated_number_of_nodes = 1,
|
|
||||||
"Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments."
|
|
||||||
);
|
|
||||||
context.concurrency_configuration.number_of_nodes = 1;
|
|
||||||
};
|
|
||||||
let full_context = Context::Benchmark(Box::new(context.clone()));
|
|
||||||
|
|
||||||
// Discover all of the metadata files that are defined in the context.
|
|
||||||
let corpus = context
|
|
||||||
.corpus_configuration
|
|
||||||
.test_specifiers
|
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.try_fold(Corpus::default(), Corpus::with_test_specifier)
|
|
||||||
.context("Failed to parse the test corpus")?;
|
|
||||||
info!(
|
|
||||||
len = corpus.metadata_file_count(),
|
|
||||||
"Discovered metadata files"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Discover the list of platforms that the tests should run on based on the context.
|
|
||||||
let platforms = context
|
|
||||||
.platforms
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.map(Into::<&dyn Platform>::into)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Starting the nodes of the various platforms specified in the context. Note that we use the
|
|
||||||
// node pool since it contains all of the code needed to spawn nodes from A to Z and therefore
|
|
||||||
// it's the preferred way for us to start nodes even when we're starting just a single node. The
|
|
||||||
// added overhead from it is quite small (performance wise) since it's involved only when we're
|
|
||||||
// creating the test definitions, but it might have other maintenance overhead as it obscures
|
|
||||||
// the fact that only a single node is spawned.
|
|
||||||
let platforms_and_nodes = {
|
|
||||||
let mut map = BTreeMap::new();
|
|
||||||
|
|
||||||
for platform in platforms.iter() {
|
|
||||||
let platform_identifier = platform.platform_identifier();
|
|
||||||
|
|
||||||
let node_pool = NodePool::new(full_context.clone(), *platform)
|
|
||||||
.await
|
|
||||||
.inspect_err(|err| {
|
|
||||||
error!(
|
|
||||||
?err,
|
|
||||||
%platform_identifier,
|
|
||||||
"Failed to initialize the node pool for the platform."
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.context("Failed to initialize the node pool")?;
|
|
||||||
|
|
||||||
map.insert(platform_identifier, (*platform, node_pool));
|
|
||||||
}
|
|
||||||
|
|
||||||
map
|
|
||||||
};
|
|
||||||
info!("Spawned the platform nodes");
|
|
||||||
|
|
||||||
// Preparing test definitions for the execution.
|
|
||||||
let test_definitions = create_test_definitions_stream(
|
|
||||||
&full_context,
|
|
||||||
&corpus,
|
|
||||||
&platforms_and_nodes,
|
|
||||||
None,
|
|
||||||
reporter.clone(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.await;
|
|
||||||
info!(len = test_definitions.len(), "Created test definitions");
|
|
||||||
|
|
||||||
// Creating the objects that will be shared between the various runs. The cached compiler is the
|
|
||||||
// only one at the current moment of time that's safe to share between runs.
|
|
||||||
let cached_compiler = CachedCompiler::new(
|
|
||||||
context
|
|
||||||
.working_directory
|
|
||||||
.as_path()
|
|
||||||
.join("compilation_cache"),
|
|
||||||
context
|
|
||||||
.compilation_configuration
|
|
||||||
.invalidate_compilation_cache,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(Arc::new)
|
|
||||||
.context("Failed to initialize cached compiler")?;
|
|
||||||
|
|
||||||
// Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd
|
|
||||||
// like to run all of the workloads for one platform, and then the next sequentially as we'd
|
|
||||||
// like for the effect of concurrency to be minimized when we're doing the benchmarking.
|
|
||||||
for platform in platforms.iter() {
|
|
||||||
let platform_identifier = platform.platform_identifier();
|
|
||||||
|
|
||||||
let span = info_span!("Benchmarking for the platform", %platform_identifier);
|
|
||||||
let _guard = span.enter();
|
|
||||||
|
|
||||||
for test_definition in test_definitions.iter() {
|
|
||||||
let platform_information = &test_definition.platforms[&platform_identifier];
|
|
||||||
|
|
||||||
let span = info_span!(
|
|
||||||
"Executing workload",
|
|
||||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
|
||||||
case_idx = %test_definition.case_idx,
|
|
||||||
mode = %test_definition.mode,
|
|
||||||
);
|
|
||||||
let _guard = span.enter();
|
|
||||||
|
|
||||||
// Initializing all of the components requires to execute this particular workload.
|
|
||||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
|
||||||
context.wallet_configuration.highest_private_key_exclusive(),
|
|
||||||
)));
|
|
||||||
let (watcher, watcher_tx) = Watcher::new(
|
|
||||||
platform_information
|
|
||||||
.node
|
|
||||||
.subscribe_to_full_blocks_information()
|
|
||||||
.await
|
|
||||||
.context("Failed to subscribe to full blocks information from the node")?,
|
|
||||||
test_definition
|
|
||||||
.reporter
|
|
||||||
.execution_specific_reporter(0usize, platform_identifier),
|
|
||||||
);
|
|
||||||
let driver = Driver::new(
|
|
||||||
platform_information,
|
|
||||||
test_definition,
|
|
||||||
private_key_allocator,
|
|
||||||
cached_compiler.as_ref(),
|
|
||||||
watcher_tx.clone(),
|
|
||||||
context.await_transaction_inclusion,
|
|
||||||
test_definition
|
|
||||||
.case
|
|
||||||
.steps_iterator_for_benchmarks(context.default_repetition_count)
|
|
||||||
.enumerate()
|
|
||||||
.map(|(step_idx, step)| -> (StepPath, Step) {
|
|
||||||
(StepPath::new(vec![StepIdx::new(step_idx)]), step)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.context("Failed to create the benchmarks driver")?;
|
|
||||||
|
|
||||||
futures::future::try_join(
|
|
||||||
watcher.run(),
|
|
||||||
driver
|
|
||||||
.execute_all()
|
|
||||||
.instrument(info_span!("Executing Benchmarks", %platform_identifier))
|
|
||||||
.inspect(|_| {
|
|
||||||
info!("All transactions submitted - driver completed execution");
|
|
||||||
watcher_tx
|
|
||||||
.send(WatcherEvent::AllTransactionsSubmitted)
|
|
||||||
.unwrap()
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.context("Failed to run the driver and executor")
|
|
||||||
.inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded"))
|
|
||||||
.inspect_err(|err| error!(?err, "Workload Execution Failed"))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
use std::{collections::HashMap, path::PathBuf};
|
|
||||||
|
|
||||||
use alloy::{
|
|
||||||
json_abi::JsonAbi,
|
|
||||||
primitives::{Address, U256},
|
|
||||||
};
|
|
||||||
|
|
||||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
/// The state associated with the test execution of one of the workloads.
|
|
||||||
pub struct ExecutionState {
|
|
||||||
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
|
||||||
/// against them and therefore they're ready to be deployed on-demand.
|
|
||||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
|
||||||
|
|
||||||
/// A map of all of the deployed contracts and information about them.
|
|
||||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
|
||||||
|
|
||||||
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
|
||||||
pub variables: HashMap<String, U256>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExecutionState {
|
|
||||||
pub fn new(
|
|
||||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
|
||||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
compiled_contracts,
|
|
||||||
deployed_contracts,
|
|
||||||
variables: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn empty() -> Self {
|
|
||||||
Self {
|
|
||||||
compiled_contracts: Default::default(),
|
|
||||||
deployed_contracts: Default::default(),
|
|
||||||
variables: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
mod driver;
|
|
||||||
mod entry_point;
|
|
||||||
mod execution_state;
|
|
||||||
mod watcher;
|
|
||||||
|
|
||||||
pub use driver::*;
|
|
||||||
pub use entry_point::*;
|
|
||||||
pub use execution_state::*;
|
|
||||||
pub use watcher::*;
|
|
||||||
@@ -1,218 +0,0 @@
|
|||||||
use std::{
|
|
||||||
collections::HashMap,
|
|
||||||
pin::Pin,
|
|
||||||
sync::Arc,
|
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
|
||||||
};
|
|
||||||
|
|
||||||
use alloy::primitives::{BlockNumber, TxHash};
|
|
||||||
use anyhow::Result;
|
|
||||||
use futures::{Stream, StreamExt};
|
|
||||||
use revive_dt_format::steps::StepPath;
|
|
||||||
use revive_dt_report::{ExecutionSpecificReporter, MinedBlockInformation, TransactionInformation};
|
|
||||||
use tokio::sync::{
|
|
||||||
RwLock,
|
|
||||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
|
||||||
};
|
|
||||||
use tracing::{info, instrument};
|
|
||||||
|
|
||||||
/// This struct defines the watcher used in the benchmarks. A watcher is only valid for 1 workload
|
|
||||||
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
|
||||||
/// workload and is not designed for reuse.
|
|
||||||
pub struct Watcher {
|
|
||||||
/// The receive side of the channel that all of the drivers and various other parts of the code
|
|
||||||
/// send events to the watcher on.
|
|
||||||
rx: UnboundedReceiver<WatcherEvent>,
|
|
||||||
|
|
||||||
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
|
||||||
/// and a single node from that platform.
|
|
||||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
|
||||||
|
|
||||||
/// The reporter used to send events to the report aggregator.
|
|
||||||
reporter: ExecutionSpecificReporter,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Watcher {
|
|
||||||
pub fn new(
|
|
||||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
|
||||||
reporter: ExecutionSpecificReporter,
|
|
||||||
) -> (Self, UnboundedSender<WatcherEvent>) {
|
|
||||||
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
|
||||||
(
|
|
||||||
Self {
|
|
||||||
rx,
|
|
||||||
blocks_stream,
|
|
||||||
reporter,
|
|
||||||
},
|
|
||||||
tx,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all)]
|
|
||||||
pub async fn run(mut self) -> Result<()> {
|
|
||||||
// The first event that the watcher receives must be a `RepetitionStartEvent` that informs
|
|
||||||
// the watcher of the last block number that it should ignore and what the block number is
|
|
||||||
// for the first important block that it should look for.
|
|
||||||
let ignore_block_before = loop {
|
|
||||||
let Some(WatcherEvent::RepetitionStartEvent {
|
|
||||||
ignore_block_before,
|
|
||||||
}) = self.rx.recv().await
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
break ignore_block_before;
|
|
||||||
};
|
|
||||||
|
|
||||||
// This is the set of the transaction hashes that the watcher should be looking for and
|
|
||||||
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
|
||||||
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
|
||||||
let watch_for_transaction_hashes =
|
|
||||||
Arc::new(RwLock::new(HashMap::<TxHash, (StepPath, SystemTime)>::new()));
|
|
||||||
|
|
||||||
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
|
||||||
// txs are expected to come through the receive side of the channel. We do not want to rely
|
|
||||||
// on the channel closing alone for the watcher to know that all of the transactions were
|
|
||||||
// submitted and for there to be an explicit event sent by the core orchestrator that
|
|
||||||
// informs the watcher that no further transactions are to be expected and that it can
|
|
||||||
// safely ignore the channel.
|
|
||||||
let all_transactions_submitted = Arc::new(RwLock::new(false));
|
|
||||||
|
|
||||||
let watcher_event_watching_task = {
|
|
||||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
|
||||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
|
||||||
async move {
|
|
||||||
while let Some(watcher_event) = self.rx.recv().await {
|
|
||||||
match watcher_event {
|
|
||||||
// Subsequent repetition starts are ignored since certain workloads can
|
|
||||||
// contain nested repetitions and therefore there's no use in doing any
|
|
||||||
// action if the repetitions are nested.
|
|
||||||
WatcherEvent::RepetitionStartEvent { .. } => {}
|
|
||||||
WatcherEvent::SubmittedTransaction {
|
|
||||||
transaction_hash,
|
|
||||||
step_path,
|
|
||||||
} => {
|
|
||||||
watch_for_transaction_hashes
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.insert(transaction_hash, (step_path, SystemTime::now()));
|
|
||||||
}
|
|
||||||
WatcherEvent::AllTransactionsSubmitted => {
|
|
||||||
*all_transactions_submitted.write().await = true;
|
|
||||||
self.rx.close();
|
|
||||||
info!("Watcher's Events Watching Task Finished");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let reporter = self.reporter.clone();
|
|
||||||
let block_information_watching_task = {
|
|
||||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
|
||||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
|
||||||
let mut blocks_information_stream = self.blocks_stream;
|
|
||||||
async move {
|
|
||||||
while let Some(mut block) = blocks_information_stream.next().await {
|
|
||||||
// If the block number is equal to or less than the last block before the
|
|
||||||
// repetition then we ignore it and continue on to the next block.
|
|
||||||
if block.ethereum_block_information.block_number <= ignore_block_before {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let watch_for_transaction_hashes =
|
|
||||||
watch_for_transaction_hashes.read().await;
|
|
||||||
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
|
||||||
let Some((step_path, _)) = watch_for_transaction_hashes.get(tx_hash)
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
*block.tx_counts.entry(step_path.clone()).or_default() += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reporter
|
|
||||||
.report_block_mined_event(block.clone())
|
|
||||||
.expect("Can't fail");
|
|
||||||
|
|
||||||
if *all_transactions_submitted.read().await
|
|
||||||
&& watch_for_transaction_hashes.read().await.is_empty()
|
|
||||||
{
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove all of the transaction hashes observed in this block from the txs we
|
|
||||||
// are currently watching for.
|
|
||||||
let mut watch_for_transaction_hashes =
|
|
||||||
watch_for_transaction_hashes.write().await;
|
|
||||||
let mut relevant_transactions_observed = 0;
|
|
||||||
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
|
||||||
let Some((step_path, submission_time)) =
|
|
||||||
watch_for_transaction_hashes.remove(tx_hash)
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
relevant_transactions_observed += 1;
|
|
||||||
let transaction_information = TransactionInformation {
|
|
||||||
transaction_hash: *tx_hash,
|
|
||||||
submission_timestamp: submission_time
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.expect("Can't fail")
|
|
||||||
.as_secs() as _,
|
|
||||||
block_timestamp: block.ethereum_block_information.block_timestamp,
|
|
||||||
block_number: block.ethereum_block_information.block_number,
|
|
||||||
};
|
|
||||||
reporter
|
|
||||||
.report_step_transaction_information_event(
|
|
||||||
step_path,
|
|
||||||
transaction_information,
|
|
||||||
)
|
|
||||||
.expect("Can't fail")
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(
|
|
||||||
block_number = block.ethereum_block_information.block_number,
|
|
||||||
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
|
|
||||||
relevant_transactions_observed,
|
|
||||||
remaining_transactions = watch_for_transaction_hashes.len(),
|
|
||||||
"Observed a block"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Watcher's Block Watching Task Finished");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let (_, _) =
|
|
||||||
futures::future::join(watcher_event_watching_task, block_information_watching_task)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
||||||
pub enum WatcherEvent {
|
|
||||||
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
|
||||||
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
|
||||||
/// reason behind this is that we do not want the initialization transactions (e.g., contract
|
|
||||||
/// deployments) to be included in the overall TPS and GPS measurements since these blocks will
|
|
||||||
/// most likely only contain a single transaction since they're just being used for
|
|
||||||
/// initialization.
|
|
||||||
RepetitionStartEvent {
|
|
||||||
/// This is the block number of the last block seen before the repetition started. This is
|
|
||||||
/// used to instruct the watcher to ignore all block prior to this block when it starts
|
|
||||||
/// streaming the blocks.
|
|
||||||
ignore_block_before: BlockNumber,
|
|
||||||
},
|
|
||||||
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
|
||||||
/// transaction with this hash in the blocks that it watches.
|
|
||||||
SubmittedTransaction {
|
|
||||||
/// The hash of the submitted transaction.
|
|
||||||
transaction_hash: TxHash,
|
|
||||||
/// The step path of the step that the transaction belongs to.
|
|
||||||
step_path: StepPath,
|
|
||||||
},
|
|
||||||
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
|
||||||
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
|
||||||
/// any longer.
|
|
||||||
AllTransactionsSubmitted,
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,371 +0,0 @@
|
|||||||
//! The main entry point into differential testing.
|
|
||||||
|
|
||||||
use std::{
|
|
||||||
collections::{BTreeMap, BTreeSet},
|
|
||||||
io::{BufWriter, Write, stderr},
|
|
||||||
sync::Arc,
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
|
|
||||||
use ansi_term::{ANSIStrings, Color};
|
|
||||||
use anyhow::Context as _;
|
|
||||||
use futures::{FutureExt, StreamExt};
|
|
||||||
use revive_dt_common::{cached_fs::read_to_string, types::PrivateKeyAllocator};
|
|
||||||
use revive_dt_core::Platform;
|
|
||||||
use revive_dt_format::corpus::Corpus;
|
|
||||||
use tokio::sync::{Mutex, RwLock, Semaphore};
|
|
||||||
use tracing::{Instrument, error, info, info_span, instrument};
|
|
||||||
|
|
||||||
use revive_dt_config::{Context, OutputFormat, TestExecutionContext};
|
|
||||||
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
differential_tests::Driver,
|
|
||||||
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Handles the differential testing executing it according to the information defined in the
|
|
||||||
/// context
|
|
||||||
#[instrument(level = "info", err(Debug), skip_all)]
|
|
||||||
pub async fn handle_differential_tests(
|
|
||||||
context: TestExecutionContext,
|
|
||||||
reporter: Reporter,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
let reporter_clone = reporter.clone();
|
|
||||||
|
|
||||||
// Discover all of the metadata files that are defined in the context.
|
|
||||||
let corpus = context
|
|
||||||
.corpus_configuration
|
|
||||||
.test_specifiers
|
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.try_fold(Corpus::default(), Corpus::with_test_specifier)
|
|
||||||
.context("Failed to parse the test corpus")?;
|
|
||||||
info!(
|
|
||||||
len = corpus.metadata_file_count(),
|
|
||||||
"Discovered metadata files"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Discover the list of platforms that the tests should run on based on the context.
|
|
||||||
let platforms = context
|
|
||||||
.platforms
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.map(Into::<&dyn Platform>::into)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Starting the nodes of the various platforms specified in the context.
|
|
||||||
let platforms_and_nodes = {
|
|
||||||
let mut map = BTreeMap::new();
|
|
||||||
|
|
||||||
for platform in platforms.iter() {
|
|
||||||
let platform_identifier = platform.platform_identifier();
|
|
||||||
|
|
||||||
let context = Context::Test(Box::new(context.clone()));
|
|
||||||
let node_pool = NodePool::new(context, *platform)
|
|
||||||
.await
|
|
||||||
.inspect_err(|err| {
|
|
||||||
error!(
|
|
||||||
?err,
|
|
||||||
%platform_identifier,
|
|
||||||
"Failed to initialize the node pool for the platform."
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.context("Failed to initialize the node pool")?;
|
|
||||||
|
|
||||||
map.insert(platform_identifier, (*platform, node_pool));
|
|
||||||
}
|
|
||||||
|
|
||||||
map
|
|
||||||
};
|
|
||||||
info!("Spawned the platform nodes");
|
|
||||||
|
|
||||||
// Preparing test definitions.
|
|
||||||
let only_execute_failed_tests = match context.ignore_success_configuration.path.as_ref() {
|
|
||||||
Some(path) => {
|
|
||||||
let report = read_to_string(path)
|
|
||||||
.context("Failed to read the report file to ignore the succeeding test cases")?;
|
|
||||||
Some(serde_json::from_str(&report).context("Failed to deserialize report")?)
|
|
||||||
}
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
let full_context = Context::Test(Box::new(context.clone()));
|
|
||||||
let test_definitions = create_test_definitions_stream(
|
|
||||||
&full_context,
|
|
||||||
&corpus,
|
|
||||||
&platforms_and_nodes,
|
|
||||||
only_execute_failed_tests.as_ref(),
|
|
||||||
reporter.clone(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.await;
|
|
||||||
info!(len = test_definitions.len(), "Created test definitions");
|
|
||||||
|
|
||||||
// Creating everything else required for the driver to run.
|
|
||||||
let cached_compiler = CachedCompiler::new(
|
|
||||||
context
|
|
||||||
.working_directory
|
|
||||||
.as_path()
|
|
||||||
.join("compilation_cache"),
|
|
||||||
context
|
|
||||||
.compilation_configuration
|
|
||||||
.invalidate_compilation_cache,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(Arc::new)
|
|
||||||
.context("Failed to initialize cached compiler")?;
|
|
||||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
|
||||||
context.wallet_configuration.highest_private_key_exclusive(),
|
|
||||||
)));
|
|
||||||
|
|
||||||
// Creating the driver and executing all of the steps.
|
|
||||||
let semaphore = context
|
|
||||||
.concurrency_configuration
|
|
||||||
.concurrency_limit()
|
|
||||||
.map(Semaphore::new)
|
|
||||||
.map(Arc::new);
|
|
||||||
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
|
|
||||||
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|
|
||||||
|(test_id, test_definition)| {
|
|
||||||
let running_task_list = running_task_list.clone();
|
|
||||||
let semaphore = semaphore.clone();
|
|
||||||
|
|
||||||
let private_key_allocator = private_key_allocator.clone();
|
|
||||||
let cached_compiler = cached_compiler.clone();
|
|
||||||
let mode = test_definition.mode.clone();
|
|
||||||
let span = info_span!(
|
|
||||||
"Executing Test Case",
|
|
||||||
test_id,
|
|
||||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
|
||||||
case_idx = %test_definition.case_idx,
|
|
||||||
mode = %mode,
|
|
||||||
);
|
|
||||||
async move {
|
|
||||||
let permit = match semaphore.as_ref() {
|
|
||||||
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
running_task_list.write().await.insert(test_id);
|
|
||||||
let driver = match Driver::new_root(
|
|
||||||
test_definition,
|
|
||||||
private_key_allocator,
|
|
||||||
&cached_compiler,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(driver) => driver,
|
|
||||||
Err(error) => {
|
|
||||||
test_definition
|
|
||||||
.reporter
|
|
||||||
.report_test_failed_event(format!("{error:#}"))
|
|
||||||
.expect("Can't fail");
|
|
||||||
error!("Test Case Failed");
|
|
||||||
drop(permit);
|
|
||||||
running_task_list.write().await.remove(&test_id);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
info!("Created the driver for the test case");
|
|
||||||
|
|
||||||
match driver.execute_all().await {
|
|
||||||
Ok(steps_executed) => test_definition
|
|
||||||
.reporter
|
|
||||||
.report_test_succeeded_event(steps_executed)
|
|
||||||
.expect("Can't fail"),
|
|
||||||
Err(error) => {
|
|
||||||
test_definition
|
|
||||||
.reporter
|
|
||||||
.report_test_failed_event(format!("{error:#}"))
|
|
||||||
.expect("Can't fail");
|
|
||||||
error!("Test Case Failed");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
info!("Finished the execution of the test case");
|
|
||||||
drop(permit);
|
|
||||||
running_task_list.write().await.remove(&test_id);
|
|
||||||
}
|
|
||||||
.instrument(span)
|
|
||||||
},
|
|
||||||
))
|
|
||||||
.inspect(|_| {
|
|
||||||
info!("Finished executing all test cases");
|
|
||||||
reporter_clone
|
|
||||||
.report_completion_event()
|
|
||||||
.expect("Can't fail")
|
|
||||||
});
|
|
||||||
let cli_reporting_task = start_cli_reporting_task(context.output_format, reporter);
|
|
||||||
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
loop {
|
|
||||||
let remaining_tasks = running_task_list.read().await;
|
|
||||||
info!(
|
|
||||||
count = remaining_tasks.len(),
|
|
||||||
?remaining_tasks,
|
|
||||||
"Remaining Tests"
|
|
||||||
);
|
|
||||||
drop(remaining_tasks);
|
|
||||||
tokio::time::sleep(Duration::from_secs(10)).await
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
futures::future::join(driver_task, cli_reporting_task).await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
|
||||||
async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporter) {
|
|
||||||
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
|
||||||
drop(reporter);
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
|
|
||||||
let mut global_success_count = 0;
|
|
||||||
let mut global_failure_count = 0;
|
|
||||||
let mut global_ignore_count = 0;
|
|
||||||
|
|
||||||
let mut buf = BufWriter::new(stderr());
|
|
||||||
while let Ok(event) = aggregator_events_rx.recv().await {
|
|
||||||
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
|
||||||
metadata_file_path,
|
|
||||||
mode,
|
|
||||||
case_status,
|
|
||||||
} = event
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
match output_format {
|
|
||||||
OutputFormat::Legacy => {
|
|
||||||
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
|
||||||
for (case_idx, case_status) in case_status.into_iter() {
|
|
||||||
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
|
||||||
let _ = match case_status {
|
|
||||||
TestCaseStatus::Succeeded { steps_executed } => {
|
|
||||||
global_success_count += 1;
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"{}",
|
|
||||||
ANSIStrings(&[
|
|
||||||
Color::Green.bold().paint("Case Succeeded"),
|
|
||||||
Color::Green
|
|
||||||
.paint(format!(" - Steps Executed: {steps_executed}")),
|
|
||||||
])
|
|
||||||
)
|
|
||||||
}
|
|
||||||
TestCaseStatus::Failed { reason } => {
|
|
||||||
global_failure_count += 1;
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"{}",
|
|
||||||
ANSIStrings(&[
|
|
||||||
Color::Red.bold().paint("Case Failed"),
|
|
||||||
Color::Red.paint(format!(" - Reason: {}", reason.trim())),
|
|
||||||
])
|
|
||||||
)
|
|
||||||
}
|
|
||||||
TestCaseStatus::Ignored { reason, .. } => {
|
|
||||||
global_ignore_count += 1;
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"{}",
|
|
||||||
ANSIStrings(&[
|
|
||||||
Color::Yellow.bold().paint("Case Ignored"),
|
|
||||||
Color::Yellow.paint(format!(" - Reason: {}", reason.trim())),
|
|
||||||
])
|
|
||||||
)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
let _ = writeln!(buf);
|
|
||||||
}
|
|
||||||
OutputFormat::CargoTestLike => {
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"\t{} {} - {}\n",
|
|
||||||
Color::Green.paint("Running"),
|
|
||||||
metadata_file_path.display(),
|
|
||||||
mode
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let mut success_count = 0;
|
|
||||||
let mut failure_count = 0;
|
|
||||||
let mut ignored_count = 0;
|
|
||||||
writeln!(buf, "running {} tests", case_status.len()).unwrap();
|
|
||||||
for (case_idx, case_result) in case_status.iter() {
|
|
||||||
let status = match case_result {
|
|
||||||
TestCaseStatus::Succeeded { .. } => {
|
|
||||||
success_count += 1;
|
|
||||||
global_success_count += 1;
|
|
||||||
Color::Green.paint("ok")
|
|
||||||
}
|
|
||||||
TestCaseStatus::Failed { reason } => {
|
|
||||||
failure_count += 1;
|
|
||||||
global_failure_count += 1;
|
|
||||||
Color::Red.paint(format!("FAILED, {reason}"))
|
|
||||||
}
|
|
||||||
TestCaseStatus::Ignored { reason, .. } => {
|
|
||||||
ignored_count += 1;
|
|
||||||
global_ignore_count += 1;
|
|
||||||
Color::Yellow.paint(format!("ignored, {reason:?}"))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
writeln!(buf, "test case_idx_{} ... {}", case_idx, status).unwrap();
|
|
||||||
}
|
|
||||||
writeln!(buf).unwrap();
|
|
||||||
|
|
||||||
let status = if failure_count > 0 {
|
|
||||||
Color::Red.paint("FAILED")
|
|
||||||
} else {
|
|
||||||
Color::Green.paint("ok")
|
|
||||||
};
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"test result: {}. {} passed; {} failed; {} ignored",
|
|
||||||
status, success_count, failure_count, ignored_count,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
writeln!(buf).unwrap();
|
|
||||||
|
|
||||||
if aggregator_events_rx.is_empty() {
|
|
||||||
buf = tokio::task::spawn_blocking(move || {
|
|
||||||
buf.flush().unwrap();
|
|
||||||
buf
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
info!("Aggregator Broadcast Channel Closed");
|
|
||||||
|
|
||||||
// Summary at the end.
|
|
||||||
match output_format {
|
|
||||||
OutputFormat::Legacy => {
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"{} cases: {} cases succeeded, {} cases failed in {} seconds",
|
|
||||||
global_success_count + global_failure_count + global_ignore_count,
|
|
||||||
Color::Green.paint(global_success_count.to_string()),
|
|
||||||
Color::Red.paint(global_failure_count.to_string()),
|
|
||||||
start.elapsed().as_secs()
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
OutputFormat::CargoTestLike => {
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"run finished. {} passed; {} failed; {} ignored; finished in {}s",
|
|
||||||
global_success_count,
|
|
||||||
global_failure_count,
|
|
||||||
global_ignore_count,
|
|
||||||
start.elapsed().as_secs()
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
use std::{collections::HashMap, path::PathBuf};
|
|
||||||
|
|
||||||
use alloy::{
|
|
||||||
json_abi::JsonAbi,
|
|
||||||
primitives::{Address, U256},
|
|
||||||
};
|
|
||||||
|
|
||||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
/// The state associated with the test execution of one of the tests.
|
|
||||||
pub struct ExecutionState {
|
|
||||||
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
|
||||||
/// against them and therefore they're ready to be deployed on-demand.
|
|
||||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
|
||||||
|
|
||||||
/// A map of all of the deployed contracts and information about them.
|
|
||||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
|
||||||
|
|
||||||
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
|
||||||
pub variables: HashMap<String, U256>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExecutionState {
|
|
||||||
pub fn new(
|
|
||||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
|
||||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
compiled_contracts,
|
|
||||||
deployed_contracts,
|
|
||||||
variables: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
//! This module contains all of the code responsible for performing differential tests including the
|
|
||||||
//! driver implementation, state implementation, and the core logic that allows for tests to be
|
|
||||||
//! executed.
|
|
||||||
|
|
||||||
mod driver;
|
|
||||||
mod entry_point;
|
|
||||||
mod execution_state;
|
|
||||||
|
|
||||||
pub use driver::*;
|
|
||||||
pub use entry_point::*;
|
|
||||||
pub use execution_state::*;
|
|
||||||
@@ -0,0 +1,900 @@
|
|||||||
|
//! The test driver handles the compilation and execution of the test cases.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use alloy::consensus::EMPTY_ROOT_HASH;
|
||||||
|
use alloy::hex;
|
||||||
|
use alloy::json_abi::JsonAbi;
|
||||||
|
use alloy::network::{Ethereum, TransactionBuilder};
|
||||||
|
use alloy::primitives::{TxHash, U256};
|
||||||
|
use alloy::rpc::types::TransactionReceipt;
|
||||||
|
use alloy::rpc::types::trace::geth::{
|
||||||
|
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType,
|
||||||
|
GethDebugTracingOptions, GethTrace, PreStateConfig,
|
||||||
|
};
|
||||||
|
use alloy::{
|
||||||
|
primitives::Address,
|
||||||
|
rpc::types::{TransactionRequest, trace::geth::DiffMode},
|
||||||
|
};
|
||||||
|
use anyhow::{Context as _, bail};
|
||||||
|
use futures::{TryStreamExt, future::try_join_all};
|
||||||
|
use indexmap::IndexMap;
|
||||||
|
use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator};
|
||||||
|
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
|
||||||
|
use revive_dt_report::ExecutionSpecificReporter;
|
||||||
|
use semver::Version;
|
||||||
|
|
||||||
|
use revive_dt_format::case::Case;
|
||||||
|
use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent};
|
||||||
|
use revive_dt_format::steps::{
|
||||||
|
BalanceAssertionStep, Calldata, EtherValue, Expected, ExpectedOutput, FunctionCallStep, Method,
|
||||||
|
StepIdx, StepPath, StorageEmptyAssertionStep,
|
||||||
|
};
|
||||||
|
use revive_dt_format::{metadata::Metadata, steps::Step};
|
||||||
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
use tokio::try_join;
|
||||||
|
use tracing::{Instrument, info, info_span, instrument};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct CaseState {
|
||||||
|
/// A map of all of the compiled contracts for the given metadata file.
|
||||||
|
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
|
|
||||||
|
/// This map stores the contracts deployments for this case.
|
||||||
|
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
|
||||||
|
/// This map stores the variables used for each one of the cases contained in the metadata
|
||||||
|
/// file.
|
||||||
|
variables: HashMap<String, U256>,
|
||||||
|
|
||||||
|
/// Stores the version used for the current case.
|
||||||
|
compiler_version: Version,
|
||||||
|
|
||||||
|
/// The execution reporter.
|
||||||
|
execution_reporter: ExecutionSpecificReporter,
|
||||||
|
|
||||||
|
/// The private key allocator used for this case state. This is an Arc Mutex to allow for the
|
||||||
|
/// state to be cloned and for all of the clones to refer to the same allocator.
|
||||||
|
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CaseState {
|
||||||
|
pub fn new(
|
||||||
|
compiler_version: Version,
|
||||||
|
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
|
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
execution_reporter: ExecutionSpecificReporter,
|
||||||
|
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
compiled_contracts,
|
||||||
|
deployed_contracts,
|
||||||
|
variables: Default::default(),
|
||||||
|
compiler_version,
|
||||||
|
execution_reporter,
|
||||||
|
private_key_allocator,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_step(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
step: &Step,
|
||||||
|
step_path: &StepPath,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<StepOutput> {
|
||||||
|
match step {
|
||||||
|
Step::FunctionCall(input) => {
|
||||||
|
let (receipt, geth_trace, diff_mode) = self
|
||||||
|
.handle_input(metadata, input, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to handle function call step")?;
|
||||||
|
Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode))
|
||||||
|
}
|
||||||
|
Step::BalanceAssertion(balance_assertion) => {
|
||||||
|
self.handle_balance_assertion(metadata, balance_assertion, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to handle balance assertion step")?;
|
||||||
|
Ok(StepOutput::BalanceAssertion)
|
||||||
|
}
|
||||||
|
Step::StorageEmptyAssertion(storage_empty) => {
|
||||||
|
self.handle_storage_empty(metadata, storage_empty, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to handle storage empty assertion step")?;
|
||||||
|
Ok(StepOutput::StorageEmptyAssertion)
|
||||||
|
}
|
||||||
|
Step::Repeat(repetition_step) => {
|
||||||
|
self.handle_repeat(
|
||||||
|
metadata,
|
||||||
|
repetition_step.repeat,
|
||||||
|
&repetition_step.steps,
|
||||||
|
step_path,
|
||||||
|
node,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context("Failed to handle the repetition step")?;
|
||||||
|
Ok(StepOutput::Repetition)
|
||||||
|
}
|
||||||
|
Step::AllocateAccount(account_allocation) => {
|
||||||
|
self.handle_account_allocation(account_allocation.variable_name.as_str())
|
||||||
|
.await
|
||||||
|
.context("Failed to allocate account")?;
|
||||||
|
Ok(StepOutput::AccountAllocation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.inspect(|_| info!("Step Succeeded"))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", name = "Handling Input", skip_all)]
|
||||||
|
pub async fn handle_input(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
input: &FunctionCallStep,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
|
||||||
|
let resolver = node.resolver().await?;
|
||||||
|
|
||||||
|
let deployment_receipts = self
|
||||||
|
.handle_input_contract_deployment(metadata, input, node)
|
||||||
|
.await
|
||||||
|
.context("Failed during contract deployment phase of input handling")?;
|
||||||
|
let execution_receipt = self
|
||||||
|
.handle_input_execution(input, deployment_receipts, node)
|
||||||
|
.await
|
||||||
|
.context("Failed during transaction execution phase of input handling")?;
|
||||||
|
let tracing_result = self
|
||||||
|
.handle_input_call_frame_tracing(execution_receipt.transaction_hash, node)
|
||||||
|
.await
|
||||||
|
.context("Failed during callframe tracing phase of input handling")?;
|
||||||
|
self.handle_input_variable_assignment(input, &tracing_result)
|
||||||
|
.context("Failed to assign variables from callframe output")?;
|
||||||
|
let (_, (geth_trace, diff_mode)) = try_join!(
|
||||||
|
self.handle_input_expectations(
|
||||||
|
input,
|
||||||
|
&execution_receipt,
|
||||||
|
resolver.as_ref(),
|
||||||
|
&tracing_result
|
||||||
|
),
|
||||||
|
self.handle_input_diff(execution_receipt.transaction_hash, node)
|
||||||
|
)
|
||||||
|
.context("Failed while evaluating expectations and diffs in parallel")?;
|
||||||
|
Ok((execution_receipt, geth_trace, diff_mode))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", name = "Handling Balance Assertion", skip_all)]
|
||||||
|
pub async fn handle_balance_assertion(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
balance_assertion: &BalanceAssertionStep,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to deploy contract for balance assertion")?;
|
||||||
|
self.handle_balance_assertion_execution(balance_assertion, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to execute balance assertion")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", name = "Handling Storage Assertion", skip_all)]
|
||||||
|
pub async fn handle_storage_empty(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
storage_empty: &StorageEmptyAssertionStep,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to deploy contract for storage empty assertion")?;
|
||||||
|
self.handle_storage_empty_assertion_execution(storage_empty, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to execute storage empty assertion")?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", name = "Handling Repetition", skip_all)]
|
||||||
|
pub async fn handle_repeat(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
repetitions: usize,
|
||||||
|
steps: &[Step],
|
||||||
|
step_path: &StepPath,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let tasks = (0..repetitions).map(|_| {
|
||||||
|
let mut state = self.clone();
|
||||||
|
async move {
|
||||||
|
for (step_idx, step) in steps.iter().enumerate() {
|
||||||
|
let step_path = step_path.append(step_idx);
|
||||||
|
state.handle_step(metadata, step, &step_path, node).await?;
|
||||||
|
}
|
||||||
|
Ok::<(), anyhow::Error>(())
|
||||||
|
}
|
||||||
|
});
|
||||||
|
try_join_all(tasks).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", name = "Handling Account Allocation", skip_all)]
|
||||||
|
pub async fn handle_account_allocation(&mut self, variable_name: &str) -> anyhow::Result<()> {
|
||||||
|
let Some(variable_name) = variable_name.strip_prefix("$VARIABLE:") else {
|
||||||
|
bail!("Account allocation must start with $VARIABLE:");
|
||||||
|
};
|
||||||
|
|
||||||
|
let private_key = self.private_key_allocator.lock().await.allocate()?;
|
||||||
|
let account = private_key.address();
|
||||||
|
let variable = U256::from_be_slice(account.0.as_slice());
|
||||||
|
|
||||||
|
self.variables.insert(variable_name.to_string(), variable);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handles the contract deployment for a given input performing it if it needs to be performed.
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
async fn handle_input_contract_deployment(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
input: &FunctionCallStep,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<HashMap<ContractInstance, TransactionReceipt>> {
|
||||||
|
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
||||||
|
for instance in input.find_all_contract_instances().into_iter() {
|
||||||
|
if !self.deployed_contracts.contains_key(&instance) {
|
||||||
|
instances_we_must_deploy.entry(instance).or_insert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Method::Deployer = input.method {
|
||||||
|
instances_we_must_deploy.swap_remove(&input.instance);
|
||||||
|
instances_we_must_deploy.insert(input.instance.clone(), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut receipts = HashMap::new();
|
||||||
|
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
|
||||||
|
let calldata = deploy_with_constructor_arguments.then_some(&input.calldata);
|
||||||
|
let value = deploy_with_constructor_arguments
|
||||||
|
.then_some(input.value)
|
||||||
|
.flatten();
|
||||||
|
|
||||||
|
let caller = {
|
||||||
|
let context = self.default_resolution_context();
|
||||||
|
let resolver = node.resolver().await?;
|
||||||
|
input
|
||||||
|
.caller
|
||||||
|
.resolve_address(resolver.as_ref(), context)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
if let (_, _, Some(receipt)) = self
|
||||||
|
.get_or_deploy_contract_instance(&instance, metadata, caller, calldata, value, node)
|
||||||
|
.await
|
||||||
|
.context("Failed to get or deploy contract instance during input execution")?
|
||||||
|
{
|
||||||
|
receipts.insert(instance.clone(), receipt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(receipts)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handles the execution of the input in terms of the calls that need to be made.
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
async fn handle_input_execution(
|
||||||
|
&mut self,
|
||||||
|
input: &FunctionCallStep,
|
||||||
|
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<TransactionReceipt> {
|
||||||
|
match input.method {
|
||||||
|
// This input was already executed when `handle_input` was called. We just need to
|
||||||
|
// lookup the transaction receipt in this case and continue on.
|
||||||
|
Method::Deployer => deployment_receipts
|
||||||
|
.remove(&input.instance)
|
||||||
|
.context("Failed to find deployment receipt for constructor call"),
|
||||||
|
Method::Fallback | Method::FunctionName(_) => {
|
||||||
|
let resolver = node.resolver().await?;
|
||||||
|
let tx = match input
|
||||||
|
.legacy_transaction(resolver.as_ref(), self.default_resolution_context())
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(tx) => tx,
|
||||||
|
Err(err) => {
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match node.execute_transaction(tx).await {
|
||||||
|
Ok(receipt) => Ok(receipt),
|
||||||
|
Err(err) => Err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
async fn handle_input_call_frame_tracing(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<CallFrame> {
|
||||||
|
node.trace_transaction(
|
||||||
|
tx_hash,
|
||||||
|
GethDebugTracingOptions {
|
||||||
|
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
||||||
|
GethDebugBuiltInTracerType::CallTracer,
|
||||||
|
)),
|
||||||
|
tracer_config: GethDebugTracerConfig(serde_json::json! {{
|
||||||
|
"onlyTopCall": true,
|
||||||
|
"withLog": false,
|
||||||
|
"withStorage": false,
|
||||||
|
"withMemory": false,
|
||||||
|
"withStack": false,
|
||||||
|
"withReturnData": true
|
||||||
|
}}),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(|trace| {
|
||||||
|
trace
|
||||||
|
.try_into_call_frame()
|
||||||
|
.expect("Impossible - we requested a callframe trace so we must get it back")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
fn handle_input_variable_assignment(
|
||||||
|
&mut self,
|
||||||
|
input: &FunctionCallStep,
|
||||||
|
tracing_result: &CallFrame,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let Some(ref assignments) = input.variable_assignments else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
// Handling the return data variable assignments.
|
||||||
|
for (variable_name, output_word) in assignments.return_data.iter().zip(
|
||||||
|
tracing_result
|
||||||
|
.output
|
||||||
|
.as_ref()
|
||||||
|
.unwrap_or_default()
|
||||||
|
.to_vec()
|
||||||
|
.chunks(32),
|
||||||
|
) {
|
||||||
|
let value = U256::from_be_slice(output_word);
|
||||||
|
self.variables.insert(variable_name.clone(), value);
|
||||||
|
tracing::info!(
|
||||||
|
variable_name,
|
||||||
|
variable_value = hex::encode(value.to_be_bytes::<32>()),
|
||||||
|
"Assigned variable"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
async fn handle_input_expectations(
|
||||||
|
&self,
|
||||||
|
input: &FunctionCallStep,
|
||||||
|
execution_receipt: &TransactionReceipt,
|
||||||
|
resolver: &(impl ResolverApi + ?Sized),
|
||||||
|
tracing_result: &CallFrame,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
// Resolving the `input.expected` into a series of expectations that we can then assert on.
|
||||||
|
let mut expectations = match input {
|
||||||
|
FunctionCallStep {
|
||||||
|
expected: Some(Expected::Calldata(calldata)),
|
||||||
|
..
|
||||||
|
} => vec![ExpectedOutput::new().with_calldata(calldata.clone())],
|
||||||
|
FunctionCallStep {
|
||||||
|
expected: Some(Expected::Expected(expected)),
|
||||||
|
..
|
||||||
|
} => vec![expected.clone()],
|
||||||
|
FunctionCallStep {
|
||||||
|
expected: Some(Expected::ExpectedMany(expected)),
|
||||||
|
..
|
||||||
|
} => expected.clone(),
|
||||||
|
FunctionCallStep { expected: None, .. } => vec![ExpectedOutput::new().with_success()],
|
||||||
|
};
|
||||||
|
|
||||||
|
// This is a bit of a special case and we have to support it separately on it's own. If it's
|
||||||
|
// a call to the deployer method, then the tests will assert that it "returns" the address
|
||||||
|
// of the contract. Deployments do not return the address of the contract but the runtime
|
||||||
|
// code of the contracts. Therefore, this assertion would always fail. So, we replace it
|
||||||
|
// with an assertion of "check if it succeeded"
|
||||||
|
if let Method::Deployer = &input.method {
|
||||||
|
for expectation in expectations.iter_mut() {
|
||||||
|
expectation.return_data = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
futures::stream::iter(expectations.into_iter().map(Ok))
|
||||||
|
.try_for_each_concurrent(None, |expectation| async move {
|
||||||
|
self.handle_input_expectation_item(
|
||||||
|
execution_receipt,
|
||||||
|
resolver,
|
||||||
|
expectation,
|
||||||
|
tracing_result,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
async fn handle_input_expectation_item(
|
||||||
|
&self,
|
||||||
|
execution_receipt: &TransactionReceipt,
|
||||||
|
resolver: &(impl ResolverApi + ?Sized),
|
||||||
|
expectation: ExpectedOutput,
|
||||||
|
tracing_result: &CallFrame,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
if let Some(ref version_requirement) = expectation.compiler_version {
|
||||||
|
if !version_requirement.matches(&self.compiler_version) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let resolution_context = self
|
||||||
|
.default_resolution_context()
|
||||||
|
.with_block_number(execution_receipt.block_number.as_ref())
|
||||||
|
.with_transaction_hash(&execution_receipt.transaction_hash);
|
||||||
|
|
||||||
|
// Handling the receipt state assertion.
|
||||||
|
let expected = !expectation.exception;
|
||||||
|
let actual = execution_receipt.status();
|
||||||
|
if actual != expected {
|
||||||
|
tracing::error!(
|
||||||
|
expected,
|
||||||
|
actual,
|
||||||
|
?execution_receipt,
|
||||||
|
?tracing_result,
|
||||||
|
"Transaction status assertion failed"
|
||||||
|
);
|
||||||
|
anyhow::bail!(
|
||||||
|
"Transaction status assertion failed - Expected {expected} but got {actual}",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handling the calldata assertion
|
||||||
|
if let Some(ref expected_calldata) = expectation.return_data {
|
||||||
|
let expected = expected_calldata;
|
||||||
|
let actual = &tracing_result.output.as_ref().unwrap_or_default();
|
||||||
|
if !expected
|
||||||
|
.is_equivalent(actual, resolver, resolution_context)
|
||||||
|
.await
|
||||||
|
.context("Failed to resolve calldata equivalence for return data assertion")?
|
||||||
|
{
|
||||||
|
tracing::error!(
|
||||||
|
?execution_receipt,
|
||||||
|
?expected,
|
||||||
|
%actual,
|
||||||
|
"Calldata assertion failed"
|
||||||
|
);
|
||||||
|
anyhow::bail!("Calldata assertion failed - Expected {expected:?} but got {actual}",);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handling the events assertion
|
||||||
|
if let Some(ref expected_events) = expectation.events {
|
||||||
|
// Handling the events length assertion.
|
||||||
|
let expected = expected_events.len();
|
||||||
|
let actual = execution_receipt.logs().len();
|
||||||
|
if actual != expected {
|
||||||
|
tracing::error!(expected, actual, "Event count assertion failed",);
|
||||||
|
anyhow::bail!(
|
||||||
|
"Event count assertion failed - Expected {expected} but got {actual}",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handling the events assertion.
|
||||||
|
for (event_idx, (expected_event, actual_event)) in expected_events
|
||||||
|
.iter()
|
||||||
|
.zip(execution_receipt.logs())
|
||||||
|
.enumerate()
|
||||||
|
{
|
||||||
|
// Handling the emitter assertion.
|
||||||
|
if let Some(ref expected_address) = expected_event.address {
|
||||||
|
let expected = expected_address
|
||||||
|
.resolve_address(resolver, resolution_context)
|
||||||
|
.await?;
|
||||||
|
let actual = actual_event.address();
|
||||||
|
if actual != expected {
|
||||||
|
tracing::error!(
|
||||||
|
event_idx,
|
||||||
|
%expected,
|
||||||
|
%actual,
|
||||||
|
"Event emitter assertion failed",
|
||||||
|
);
|
||||||
|
anyhow::bail!(
|
||||||
|
"Event emitter assertion failed - Expected {expected} but got {actual}",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handling the topics assertion.
|
||||||
|
for (expected, actual) in expected_event
|
||||||
|
.topics
|
||||||
|
.as_slice()
|
||||||
|
.iter()
|
||||||
|
.zip(actual_event.topics())
|
||||||
|
{
|
||||||
|
let expected = Calldata::new_compound([expected]);
|
||||||
|
if !expected
|
||||||
|
.is_equivalent(&actual.0, resolver, resolution_context)
|
||||||
|
.await
|
||||||
|
.context("Failed to resolve event topic equivalence")?
|
||||||
|
{
|
||||||
|
tracing::error!(
|
||||||
|
event_idx,
|
||||||
|
?execution_receipt,
|
||||||
|
?expected,
|
||||||
|
?actual,
|
||||||
|
"Event topics assertion failed",
|
||||||
|
);
|
||||||
|
anyhow::bail!(
|
||||||
|
"Event topics assertion failed - Expected {expected:?} but got {actual:?}",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handling the values assertion.
|
||||||
|
let expected = &expected_event.values;
|
||||||
|
let actual = &actual_event.data().data;
|
||||||
|
if !expected
|
||||||
|
.is_equivalent(&actual.0, resolver, resolution_context)
|
||||||
|
.await
|
||||||
|
.context("Failed to resolve event value equivalence")?
|
||||||
|
{
|
||||||
|
tracing::error!(
|
||||||
|
event_idx,
|
||||||
|
?execution_receipt,
|
||||||
|
?expected,
|
||||||
|
?actual,
|
||||||
|
"Event value assertion failed",
|
||||||
|
);
|
||||||
|
anyhow::bail!(
|
||||||
|
"Event value assertion failed - Expected {expected:?} but got {actual:?}",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
async fn handle_input_diff(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<(GethTrace, DiffMode)> {
|
||||||
|
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||||
|
diff_mode: Some(true),
|
||||||
|
disable_code: None,
|
||||||
|
disable_storage: None,
|
||||||
|
});
|
||||||
|
|
||||||
|
let trace = node
|
||||||
|
.trace_transaction(tx_hash, trace_options)
|
||||||
|
.await
|
||||||
|
.context("Failed to obtain geth prestate tracer output")?;
|
||||||
|
let diff = node
|
||||||
|
.state_diff(tx_hash)
|
||||||
|
.await
|
||||||
|
.context("Failed to obtain state diff for transaction")?;
|
||||||
|
|
||||||
|
Ok((trace, diff))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
pub async fn handle_balance_assertion_contract_deployment(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
balance_assertion: &BalanceAssertionStep,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let Some(address) = balance_assertion.address.as_resolvable_address() else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
self.get_or_deploy_contract_instance(
|
||||||
|
&instance,
|
||||||
|
metadata,
|
||||||
|
FunctionCallStep::default_caller_address(),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
node,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
pub async fn handle_balance_assertion_execution(
|
||||||
|
&mut self,
|
||||||
|
BalanceAssertionStep {
|
||||||
|
address,
|
||||||
|
expected_balance: amount,
|
||||||
|
..
|
||||||
|
}: &BalanceAssertionStep,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let resolver = node.resolver().await?;
|
||||||
|
let address = address
|
||||||
|
.resolve_address(resolver.as_ref(), self.default_resolution_context())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let balance = node.balance_of(address).await?;
|
||||||
|
|
||||||
|
let expected = *amount;
|
||||||
|
let actual = balance;
|
||||||
|
if expected != actual {
|
||||||
|
tracing::error!(%expected, %actual, %address, "Balance assertion failed");
|
||||||
|
anyhow::bail!(
|
||||||
|
"Balance assertion failed - Expected {} but got {} for {} resolved to {}",
|
||||||
|
expected,
|
||||||
|
actual,
|
||||||
|
address,
|
||||||
|
address,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
pub async fn handle_storage_empty_assertion_contract_deployment(
|
||||||
|
&mut self,
|
||||||
|
metadata: &Metadata,
|
||||||
|
storage_empty_assertion: &StorageEmptyAssertionStep,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let Some(address) = storage_empty_assertion.address.as_resolvable_address() else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
self.get_or_deploy_contract_instance(
|
||||||
|
&instance,
|
||||||
|
metadata,
|
||||||
|
FunctionCallStep::default_caller_address(),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
node,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
pub async fn handle_storage_empty_assertion_execution(
|
||||||
|
&mut self,
|
||||||
|
StorageEmptyAssertionStep {
|
||||||
|
address,
|
||||||
|
is_storage_empty,
|
||||||
|
..
|
||||||
|
}: &StorageEmptyAssertionStep,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let resolver = node.resolver().await?;
|
||||||
|
let address = address
|
||||||
|
.resolve_address(resolver.as_ref(), self.default_resolution_context())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let storage = node.latest_state_proof(address, Default::default()).await?;
|
||||||
|
let is_empty = storage.storage_hash == EMPTY_ROOT_HASH;
|
||||||
|
|
||||||
|
let expected = is_storage_empty;
|
||||||
|
let actual = is_empty;
|
||||||
|
|
||||||
|
if *expected != actual {
|
||||||
|
tracing::error!(%expected, %actual, %address, "Storage Empty Assertion failed");
|
||||||
|
anyhow::bail!(
|
||||||
|
"Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}",
|
||||||
|
expected,
|
||||||
|
actual,
|
||||||
|
address,
|
||||||
|
address,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the information of a deployed contract or library from the state. If it's found to not
|
||||||
|
/// be deployed then it will be deployed.
|
||||||
|
///
|
||||||
|
/// If a [`CaseIdx`] is not specified then this contact instance address will be stored in the
|
||||||
|
/// cross-case deployed contracts address mapping.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub async fn get_or_deploy_contract_instance(
|
||||||
|
&mut self,
|
||||||
|
contract_instance: &ContractInstance,
|
||||||
|
metadata: &Metadata,
|
||||||
|
deployer: Address,
|
||||||
|
calldata: Option<&Calldata>,
|
||||||
|
value: Option<EtherValue>,
|
||||||
|
node: &dyn EthereumNode,
|
||||||
|
) -> anyhow::Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
||||||
|
if let Some((_, address, abi)) = self.deployed_contracts.get(contract_instance) {
|
||||||
|
return Ok((*address, abi.clone(), None));
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(ContractPathAndIdent {
|
||||||
|
contract_source_path,
|
||||||
|
contract_ident,
|
||||||
|
}) = metadata.contract_sources()?.remove(contract_instance)
|
||||||
|
else {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Contract source not found for instance {:?}",
|
||||||
|
contract_instance
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some((code, abi)) = self
|
||||||
|
.compiled_contracts
|
||||||
|
.get(&contract_source_path)
|
||||||
|
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
|
||||||
|
.cloned()
|
||||||
|
else {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Failed to find information for contract {:?}",
|
||||||
|
contract_instance
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut code = match alloy::hex::decode(&code) {
|
||||||
|
Ok(code) => code,
|
||||||
|
Err(error) => {
|
||||||
|
tracing::error!(
|
||||||
|
?error,
|
||||||
|
contract_source_path = contract_source_path.display().to_string(),
|
||||||
|
contract_ident = contract_ident.as_ref(),
|
||||||
|
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
|
||||||
|
);
|
||||||
|
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(calldata) = calldata {
|
||||||
|
let resolver = node.resolver().await?;
|
||||||
|
let calldata = calldata
|
||||||
|
.calldata(resolver.as_ref(), self.default_resolution_context())
|
||||||
|
.await?;
|
||||||
|
code.extend(calldata);
|
||||||
|
}
|
||||||
|
|
||||||
|
let tx = {
|
||||||
|
let tx = TransactionRequest::default().from(deployer);
|
||||||
|
let tx = match value {
|
||||||
|
Some(ref value) => tx.value(value.into_inner()),
|
||||||
|
_ => tx,
|
||||||
|
};
|
||||||
|
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
|
||||||
|
};
|
||||||
|
|
||||||
|
let receipt = match node.execute_transaction(tx).await {
|
||||||
|
Ok(receipt) => receipt,
|
||||||
|
Err(error) => {
|
||||||
|
tracing::error!(?error, "Contract deployment transaction failed.");
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(address) = receipt.contract_address else {
|
||||||
|
anyhow::bail!("Contract deployment didn't return an address");
|
||||||
|
};
|
||||||
|
tracing::info!(
|
||||||
|
instance_name = ?contract_instance,
|
||||||
|
instance_address = ?address,
|
||||||
|
"Deployed contract"
|
||||||
|
);
|
||||||
|
self.execution_reporter
|
||||||
|
.report_contract_deployed_event(contract_instance.clone(), address)?;
|
||||||
|
|
||||||
|
self.deployed_contracts.insert(
|
||||||
|
contract_instance.clone(),
|
||||||
|
(contract_ident, address, abi.clone()),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok((address, abi, Some(receipt)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_resolution_context(&self) -> ResolutionContext<'_> {
|
||||||
|
ResolutionContext::default()
|
||||||
|
.with_deployed_contracts(&self.deployed_contracts)
|
||||||
|
.with_variables(&self.variables)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct CaseDriver<'a> {
|
||||||
|
metadata: &'a Metadata,
|
||||||
|
case: &'a Case,
|
||||||
|
platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> CaseDriver<'a> {
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn new(
|
||||||
|
metadata: &'a Metadata,
|
||||||
|
case: &'a Case,
|
||||||
|
platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>,
|
||||||
|
) -> CaseDriver<'a> {
|
||||||
|
Self {
|
||||||
|
metadata,
|
||||||
|
case,
|
||||||
|
platform_state,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", name = "Executing Case", skip_all)]
|
||||||
|
pub async fn execute(&mut self) -> anyhow::Result<usize> {
|
||||||
|
let mut steps_executed = 0;
|
||||||
|
for (step_idx, step) in self
|
||||||
|
.case
|
||||||
|
.steps_iterator()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(idx, v)| (StepIdx::new(idx), v))
|
||||||
|
{
|
||||||
|
let metadata = self.metadata;
|
||||||
|
let step_futures =
|
||||||
|
self.platform_state
|
||||||
|
.iter_mut()
|
||||||
|
.map(|(node, platform_id, case_state)| {
|
||||||
|
let platform_id = *platform_id;
|
||||||
|
let node_ref = *node;
|
||||||
|
let step = step.clone();
|
||||||
|
let span = info_span!(
|
||||||
|
"Handling Step",
|
||||||
|
%step_idx,
|
||||||
|
platform = %platform_id,
|
||||||
|
);
|
||||||
|
async move {
|
||||||
|
let step_path = StepPath::from_iterator([step_idx]);
|
||||||
|
case_state
|
||||||
|
.handle_step(metadata, &step, &step_path, node_ref)
|
||||||
|
.await
|
||||||
|
.map_err(|e| (platform_id, e))
|
||||||
|
}
|
||||||
|
.instrument(span)
|
||||||
|
});
|
||||||
|
|
||||||
|
match try_join_all(step_futures).await {
|
||||||
|
Ok(_outputs) => {
|
||||||
|
steps_executed += 1;
|
||||||
|
}
|
||||||
|
Err((platform_id, error)) => {
|
||||||
|
tracing::error!(
|
||||||
|
%step_idx,
|
||||||
|
platform = %platform_id,
|
||||||
|
?error,
|
||||||
|
"Step failed on platform",
|
||||||
|
);
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(steps_executed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
|
pub enum StepOutput {
|
||||||
|
FunctionCall(TransactionReceipt, GethTrace, DiffMode),
|
||||||
|
BalanceAssertion,
|
||||||
|
StorageEmptyAssertion,
|
||||||
|
Repetition,
|
||||||
|
AccountAllocation,
|
||||||
|
}
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
mod cached_compiler;
|
|
||||||
mod pool;
|
|
||||||
mod test;
|
|
||||||
|
|
||||||
pub use cached_compiler::*;
|
|
||||||
pub use pool::*;
|
|
||||||
pub use test::*;
|
|
||||||
@@ -1,349 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::{borrow::Cow, path::Path};
|
|
||||||
|
|
||||||
use futures::{Stream, StreamExt, stream};
|
|
||||||
use indexmap::{IndexMap, indexmap};
|
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
|
||||||
use revive_dt_config::Context;
|
|
||||||
use revive_dt_format::corpus::Corpus;
|
|
||||||
use serde_json::{Value, json};
|
|
||||||
|
|
||||||
use revive_dt_compiler::Mode;
|
|
||||||
use revive_dt_compiler::SolidityCompiler;
|
|
||||||
use revive_dt_format::{
|
|
||||||
case::{Case, CaseIdx},
|
|
||||||
metadata::MetadataFile,
|
|
||||||
};
|
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
|
||||||
use revive_dt_report::{ExecutionSpecificReporter, Report, Reporter, TestCaseStatus};
|
|
||||||
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
|
|
||||||
use tracing::{debug, error, info};
|
|
||||||
|
|
||||||
use crate::Platform;
|
|
||||||
use crate::helpers::NodePool;
|
|
||||||
|
|
||||||
pub async fn create_test_definitions_stream<'a>(
|
|
||||||
// This is only required for creating the compiler objects and is not used anywhere else in the
|
|
||||||
// function.
|
|
||||||
context: &Context,
|
|
||||||
corpus: &'a Corpus,
|
|
||||||
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
|
|
||||||
only_execute_failed_tests: Option<&Report>,
|
|
||||||
reporter: Reporter,
|
|
||||||
) -> impl Stream<Item = TestDefinition<'a>> {
|
|
||||||
let cloned_reporter = reporter.clone();
|
|
||||||
stream::iter(
|
|
||||||
corpus
|
|
||||||
.cases_iterator()
|
|
||||||
.inspect(move |(metadata_file, ..)| {
|
|
||||||
cloned_reporter
|
|
||||||
.report_metadata_file_discovery_event(
|
|
||||||
metadata_file.metadata_file_path.clone(),
|
|
||||||
metadata_file.content.clone(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
})
|
|
||||||
.map(move |(metadata_file, case_idx, case, mode)| {
|
|
||||||
let reporter = reporter.clone();
|
|
||||||
|
|
||||||
(
|
|
||||||
metadata_file,
|
|
||||||
case_idx,
|
|
||||||
case,
|
|
||||||
mode.clone(),
|
|
||||||
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
|
||||||
solc_mode: mode.as_ref().clone(),
|
|
||||||
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
|
||||||
case_idx: CaseIdx::new(case_idx),
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
// Inform the reporter of each one of the test cases that were discovered which we expect to
|
|
||||||
// run.
|
|
||||||
.inspect(|(_, _, _, _, reporter)| {
|
|
||||||
reporter
|
|
||||||
.report_test_case_discovery_event()
|
|
||||||
.expect("Can't fail");
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
// Creating the Test Definition objects from all of the various objects we have and creating
|
|
||||||
// their required dependencies (e.g., compiler).
|
|
||||||
.filter_map(
|
|
||||||
move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
|
||||||
let mut platforms = BTreeMap::new();
|
|
||||||
for (platform, node_pool) in platforms_and_nodes.values() {
|
|
||||||
let node = node_pool.round_robbin();
|
|
||||||
let compiler = platform
|
|
||||||
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
|
|
||||||
.await
|
|
||||||
.inspect_err(|err| {
|
|
||||||
error!(
|
|
||||||
?err,
|
|
||||||
platform_identifier = %platform.platform_identifier(),
|
|
||||||
"Failed to instantiate the compiler"
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.ok()?;
|
|
||||||
|
|
||||||
reporter
|
|
||||||
.report_node_assigned_event(
|
|
||||||
node.id(),
|
|
||||||
platform.platform_identifier(),
|
|
||||||
node.connection_string(),
|
|
||||||
)
|
|
||||||
.expect("Can't fail");
|
|
||||||
|
|
||||||
let reporter =
|
|
||||||
reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
|
|
||||||
|
|
||||||
platforms.insert(
|
|
||||||
platform.platform_identifier(),
|
|
||||||
TestPlatformInformation {
|
|
||||||
platform: *platform,
|
|
||||||
node,
|
|
||||||
compiler,
|
|
||||||
reporter,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(TestDefinition {
|
|
||||||
/* Metadata file information */
|
|
||||||
metadata: metadata_file,
|
|
||||||
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
|
||||||
|
|
||||||
/* Mode Information */
|
|
||||||
mode: mode.clone(),
|
|
||||||
|
|
||||||
/* Case Information */
|
|
||||||
case_idx: CaseIdx::new(case_idx),
|
|
||||||
case,
|
|
||||||
|
|
||||||
/* Platform and Node Assignment Information */
|
|
||||||
platforms,
|
|
||||||
|
|
||||||
/* Reporter */
|
|
||||||
reporter,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
)
|
|
||||||
// Filter out the test cases which are incompatible or that can't run in the current setup.
|
|
||||||
.filter_map(move |test| async move {
|
|
||||||
match test.check_compatibility(only_execute_failed_tests) {
|
|
||||||
Ok(()) => Some(test),
|
|
||||||
Err((reason, additional_information)) => {
|
|
||||||
debug!(
|
|
||||||
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
|
||||||
case_idx = %test.case_idx,
|
|
||||||
mode = %test.mode,
|
|
||||||
reason,
|
|
||||||
additional_information =
|
|
||||||
serde_json::to_string(&additional_information).unwrap(),
|
|
||||||
"Ignoring Test Case"
|
|
||||||
);
|
|
||||||
test.reporter
|
|
||||||
.report_test_ignored_event(
|
|
||||||
reason.to_string(),
|
|
||||||
additional_information
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.into(), v))
|
|
||||||
.collect::<IndexMap<_, _>>(),
|
|
||||||
)
|
|
||||||
.expect("Can't fail");
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.inspect(|test| {
|
|
||||||
info!(
|
|
||||||
metadata_file_path = %test.metadata_file_path.display(),
|
|
||||||
case_idx = %test.case_idx,
|
|
||||||
mode = %test.mode,
|
|
||||||
"Created a test case definition"
|
|
||||||
);
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This is a full description of a differential test to run alongside the full metadata file, the
|
|
||||||
/// specific case to be tested, the platforms that the tests should run on, the specific nodes of
|
|
||||||
/// these platforms that they should run on, the compilers to use, and everything else needed making
|
|
||||||
/// it a complete description.
|
|
||||||
pub struct TestDefinition<'a> {
|
|
||||||
/* Metadata file information */
|
|
||||||
pub metadata: &'a MetadataFile,
|
|
||||||
pub metadata_file_path: &'a Path,
|
|
||||||
|
|
||||||
/* Mode Information */
|
|
||||||
pub mode: Cow<'a, Mode>,
|
|
||||||
|
|
||||||
/* Case Information */
|
|
||||||
pub case_idx: CaseIdx,
|
|
||||||
pub case: &'a Case,
|
|
||||||
|
|
||||||
/* Platform and Node Assignment Information */
|
|
||||||
pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>,
|
|
||||||
|
|
||||||
/* Reporter */
|
|
||||||
pub reporter: TestSpecificReporter,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> TestDefinition<'a> {
|
|
||||||
/// Checks if this test can be ran with the current configuration.
|
|
||||||
pub fn check_compatibility(
|
|
||||||
&self,
|
|
||||||
only_execute_failed_tests: Option<&Report>,
|
|
||||||
) -> TestCheckFunctionResult {
|
|
||||||
self.check_metadata_file_ignored()?;
|
|
||||||
self.check_case_file_ignored()?;
|
|
||||||
self.check_target_compatibility()?;
|
|
||||||
self.check_evm_version_compatibility()?;
|
|
||||||
self.check_compiler_compatibility()?;
|
|
||||||
self.check_ignore_succeeded(only_execute_failed_tests)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the metadata file is ignored or not.
|
|
||||||
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
|
||||||
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
|
||||||
Err(("Metadata file is ignored.", indexmap! {}))
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the case file is ignored or not.
|
|
||||||
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
|
||||||
if self.case.ignore.is_some_and(|ignore| ignore) {
|
|
||||||
Err(("Case is ignored.", indexmap! {}))
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the platforms all support the desired targets in the metadata file.
|
|
||||||
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
|
||||||
let mut error_map = indexmap! {
|
|
||||||
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
|
||||||
};
|
|
||||||
let mut is_allowed = true;
|
|
||||||
for (_, platform_information) in self.platforms.iter() {
|
|
||||||
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
|
||||||
None => true,
|
|
||||||
Some(required_vm_identifiers) => {
|
|
||||||
required_vm_identifiers.contains(&platform_information.platform.vm_identifier())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
is_allowed &= is_allowed_for_platform;
|
|
||||||
error_map.insert(
|
|
||||||
platform_information.platform.platform_identifier().into(),
|
|
||||||
json!(is_allowed_for_platform),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if is_allowed {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err((
|
|
||||||
"One of the platforms do do not support the targets allowed by the test.",
|
|
||||||
error_map,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks for the compatibility of the EVM version with the platforms specified.
|
|
||||||
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
|
||||||
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut error_map = indexmap! {
|
|
||||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
|
||||||
};
|
|
||||||
let mut is_allowed = true;
|
|
||||||
for (_, platform_information) in self.platforms.iter() {
|
|
||||||
let is_allowed_for_platform =
|
|
||||||
evm_version_requirement.matches(&platform_information.node.evm_version());
|
|
||||||
is_allowed &= is_allowed_for_platform;
|
|
||||||
error_map.insert(
|
|
||||||
platform_information.platform.platform_identifier().into(),
|
|
||||||
json!(is_allowed_for_platform),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if is_allowed {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err((
|
|
||||||
"EVM version is incompatible for the platforms specified",
|
|
||||||
error_map,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the platforms compilers support the mode that the test is for.
|
|
||||||
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
|
||||||
let mut error_map = indexmap! {
|
|
||||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
|
||||||
};
|
|
||||||
let mut is_allowed = true;
|
|
||||||
for (_, platform_information) in self.platforms.iter() {
|
|
||||||
let is_allowed_for_platform = platform_information
|
|
||||||
.compiler
|
|
||||||
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
|
||||||
is_allowed &= is_allowed_for_platform;
|
|
||||||
error_map.insert(
|
|
||||||
platform_information.platform.platform_identifier().into(),
|
|
||||||
json!(is_allowed_for_platform),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if is_allowed {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err((
|
|
||||||
"Compilers do not support this mode either for the provided platforms.",
|
|
||||||
error_map,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the test case should be executed or not based on the passed report and whether the
|
|
||||||
/// user has instructed the tool to ignore the already succeeding test cases.
|
|
||||||
fn check_ignore_succeeded(
|
|
||||||
&self,
|
|
||||||
only_execute_failed_tests: Option<&Report>,
|
|
||||||
) -> TestCheckFunctionResult {
|
|
||||||
let Some(report) = only_execute_failed_tests else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
let test_case_status = report
|
|
||||||
.execution_information
|
|
||||||
.get(&(self.metadata_file_path.to_path_buf().into()))
|
|
||||||
.and_then(|obj| obj.case_reports.get(&self.case_idx))
|
|
||||||
.and_then(|obj| obj.mode_execution_reports.get(&self.mode))
|
|
||||||
.and_then(|obj| obj.status.as_ref());
|
|
||||||
|
|
||||||
match test_case_status {
|
|
||||||
Some(TestCaseStatus::Failed { .. }) => Ok(()),
|
|
||||||
Some(TestCaseStatus::Ignored { .. }) => Err((
|
|
||||||
"Ignored since it was ignored in a previous run",
|
|
||||||
indexmap! {},
|
|
||||||
)),
|
|
||||||
Some(TestCaseStatus::Succeeded { .. }) => {
|
|
||||||
Err(("Ignored since it succeeded in a prior run", indexmap! {}))
|
|
||||||
}
|
|
||||||
None => Ok(()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct TestPlatformInformation<'a> {
|
|
||||||
pub platform: &'a dyn Platform,
|
|
||||||
pub node: &'a dyn EthereumNode,
|
|
||||||
pub compiler: Box<dyn SolidityCompiler>,
|
|
||||||
pub reporter: ExecutionSpecificReporter,
|
|
||||||
}
|
|
||||||
|
|
||||||
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
|
|
||||||
+124
-328
@@ -14,16 +14,13 @@ use revive_dt_common::types::*;
|
|||||||
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_node::{
|
use revive_dt_node::{
|
||||||
Node,
|
Node, geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
|
||||||
node_implementations::{
|
|
||||||
geth::GethNode, lighthouse_geth::LighthouseGethNode,
|
|
||||||
polkadot_omni_node::PolkadotOmnichainNode, substrate::SubstrateNode,
|
|
||||||
zombienet::ZombienetNode,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
|
pub mod driver;
|
||||||
|
|
||||||
/// A trait that describes the interface for the platforms that are supported by the tool.
|
/// A trait that describes the interface for the platforms that are supported by the tool.
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
pub trait Platform {
|
pub trait Platform {
|
||||||
@@ -62,9 +59,6 @@ pub trait Platform {
|
|||||||
context: Context,
|
context: Context,
|
||||||
version: Option<VersionOrRequirement>,
|
version: Option<VersionOrRequirement>,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
||||||
|
|
||||||
/// Exports the genesis/chainspec for the node.
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
@@ -94,8 +88,7 @@ impl Platform for GethEvmSolcPlatform {
|
|||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
Ok(thread::spawn(move || {
|
Ok(thread::spawn(move || {
|
||||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
let node = GethNode::new(context);
|
||||||
let node = GethNode::new(context, use_fallback_gas_filler);
|
|
||||||
let node = spawn_node::<GethNode>(node, genesis)?;
|
let node = spawn_node::<GethNode>(node, genesis)?;
|
||||||
Ok(Box::new(node) as Box<_>)
|
Ok(Box::new(node) as Box<_>)
|
||||||
}))
|
}))
|
||||||
@@ -111,15 +104,6 @@ impl Platform for GethEvmSolcPlatform {
|
|||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
let node_genesis = GethNode::node_genesis(genesis.clone(), &wallet);
|
|
||||||
serde_json::to_value(node_genesis)
|
|
||||||
.context("Failed to convert node genesis to a serde_value")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
@@ -149,8 +133,7 @@ impl Platform for LighthouseGethEvmSolcPlatform {
|
|||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
Ok(thread::spawn(move || {
|
Ok(thread::spawn(move || {
|
||||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
let node = LighthouseGethNode::new(context);
|
||||||
let node = LighthouseGethNode::new(context, use_fallback_gas_filler);
|
|
||||||
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
||||||
Ok(Box::new(node) as Box<_>)
|
Ok(Box::new(node) as Box<_>)
|
||||||
}))
|
}))
|
||||||
@@ -166,14 +149,109 @@ impl Platform for LighthouseGethEvmSolcPlatform {
|
|||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
|
pub struct KitchensinkPolkavmResolcPlatform;
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
let node_genesis = LighthouseGethNode::node_genesis(genesis.clone(), &wallet);
|
impl Platform for KitchensinkPolkavmResolcPlatform {
|
||||||
serde_json::to_value(node_genesis)
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
.context("Failed to convert node genesis to a serde_value")
|
PlatformIdentifier::KitchensinkPolkavmResolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Kitchensink
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::PolkaVM
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Resolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
kitchensink_path,
|
||||||
|
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
context,
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Resolc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct KitchensinkRevmSolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for KitchensinkRevmSolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::KitchensinkRevmSolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Kitchensink
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::Evm
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Solc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
kitchensink_path,
|
||||||
|
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
context,
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Solc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -202,23 +280,15 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
|||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
.clone();
|
||||||
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
|
||||||
|
|
||||||
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
|
|
||||||
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
Ok(thread::spawn(move || {
|
Ok(thread::spawn(move || {
|
||||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
|
||||||
let node = SubstrateNode::new(
|
let node = SubstrateNode::new(
|
||||||
revive_dev_node_path,
|
revive_dev_node_path,
|
||||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||||
Some(revive_dev_node_consensus),
|
|
||||||
context,
|
context,
|
||||||
ð_rpc_connection_strings,
|
|
||||||
use_fallback_gas_filler,
|
|
||||||
);
|
);
|
||||||
let node = spawn_node(node, genesis)?;
|
let node = spawn_node(node, genesis)?;
|
||||||
Ok(Box::new(node) as Box<_>)
|
Ok(Box::new(node) as Box<_>)
|
||||||
@@ -235,16 +305,6 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
|||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.as_path();
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
|
|
||||||
|
|
||||||
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
@@ -272,23 +332,15 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
|
|||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
.clone();
|
||||||
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
|
||||||
|
|
||||||
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
|
|
||||||
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
Ok(thread::spawn(move || {
|
Ok(thread::spawn(move || {
|
||||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
|
||||||
let node = SubstrateNode::new(
|
let node = SubstrateNode::new(
|
||||||
revive_dev_node_path,
|
revive_dev_node_path,
|
||||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||||
Some(revive_dev_node_consensus),
|
|
||||||
context,
|
context,
|
||||||
ð_rpc_connection_strings,
|
|
||||||
use_fallback_gas_filler,
|
|
||||||
);
|
);
|
||||||
let node = spawn_node(node, genesis)?;
|
let node = spawn_node(node, genesis)?;
|
||||||
Ok(Box::new(node) as Box<_>)
|
Ok(Box::new(node) as Box<_>)
|
||||||
@@ -305,254 +357,6 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
|
|||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.as_path();
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
|
|
||||||
|
|
||||||
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
|
||||||
pub struct ZombienetPolkavmResolcPlatform;
|
|
||||||
|
|
||||||
impl Platform for ZombienetPolkavmResolcPlatform {
|
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
|
||||||
PlatformIdentifier::ZombienetPolkavmResolc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
|
||||||
NodeIdentifier::Zombienet
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
|
||||||
VmIdentifier::PolkaVM
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
|
||||||
CompilerIdentifier::Resolc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_node(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
|
||||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.clone();
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
|
||||||
Ok(thread::spawn(move || {
|
|
||||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
|
||||||
let node =
|
|
||||||
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
|
|
||||||
let node = spawn_node(node, genesis)?;
|
|
||||||
Ok(Box::new(node) as Box<_>)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
version: Option<VersionOrRequirement>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let compiler = Resolc::new(context, version).await;
|
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.as_path();
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
|
||||||
pub struct ZombienetRevmSolcPlatform;
|
|
||||||
|
|
||||||
impl Platform for ZombienetRevmSolcPlatform {
|
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
|
||||||
PlatformIdentifier::ZombienetRevmSolc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
|
||||||
NodeIdentifier::Zombienet
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
|
||||||
VmIdentifier::Evm
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
|
||||||
CompilerIdentifier::Solc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_node(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
|
||||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.clone();
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
|
||||||
Ok(thread::spawn(move || {
|
|
||||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
|
||||||
let node =
|
|
||||||
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
|
|
||||||
let node = spawn_node(node, genesis)?;
|
|
||||||
Ok(Box::new(node) as Box<_>)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
version: Option<VersionOrRequirement>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let compiler = Solc::new(context, version).await;
|
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.as_path();
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
|
||||||
pub struct PolkadotOmniNodePolkavmResolcPlatform;
|
|
||||||
|
|
||||||
impl Platform for PolkadotOmniNodePolkavmResolcPlatform {
|
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
|
||||||
PlatformIdentifier::PolkadotOmniNodePolkavmResolc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
|
||||||
NodeIdentifier::PolkadotOmniNode
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
|
||||||
VmIdentifier::PolkaVM
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
|
||||||
CompilerIdentifier::Resolc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_node(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
|
||||||
Ok(thread::spawn(move || {
|
|
||||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
|
||||||
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
|
|
||||||
let node = spawn_node(node, genesis)?;
|
|
||||||
Ok(Box::new(node) as Box<_>)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
version: Option<VersionOrRequirement>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let compiler = Resolc::new(context, version).await;
|
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let polkadot_omnichain_node_configuration =
|
|
||||||
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
PolkadotOmnichainNode::node_genesis(
|
|
||||||
&wallet,
|
|
||||||
polkadot_omnichain_node_configuration
|
|
||||||
.chain_spec_path
|
|
||||||
.as_ref()
|
|
||||||
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
|
||||||
pub struct PolkadotOmniNodeRevmSolcPlatform;
|
|
||||||
|
|
||||||
impl Platform for PolkadotOmniNodeRevmSolcPlatform {
|
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
|
||||||
PlatformIdentifier::PolkadotOmniNodeRevmSolc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
|
||||||
NodeIdentifier::PolkadotOmniNode
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
|
||||||
VmIdentifier::Evm
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
|
||||||
CompilerIdentifier::Solc
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_node(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
|
||||||
Ok(thread::spawn(move || {
|
|
||||||
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
|
||||||
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
|
|
||||||
let node = spawn_node(node, genesis)?;
|
|
||||||
Ok(Box::new(node) as Box<_>)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
version: Option<VersionOrRequirement>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let compiler = Solc::new(context, version).await;
|
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let polkadot_omnichain_node_configuration =
|
|
||||||
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
PolkadotOmnichainNode::node_genesis(
|
|
||||||
&wallet,
|
|
||||||
polkadot_omnichain_node_configuration
|
|
||||||
.chain_spec_path
|
|
||||||
.as_ref()
|
|
||||||
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
||||||
@@ -562,22 +366,18 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
|
|||||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||||
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
||||||
}
|
}
|
||||||
|
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||||
|
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
|
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||||
|
Box::new(KitchensinkRevmSolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||||
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
||||||
}
|
}
|
||||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||||
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
|
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
|
||||||
}
|
}
|
||||||
PlatformIdentifier::ZombienetPolkavmResolc => {
|
|
||||||
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
|
|
||||||
}
|
|
||||||
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
|
|
||||||
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
|
|
||||||
Box::new(PolkadotOmniNodePolkavmResolcPlatform) as Box<_>
|
|
||||||
}
|
|
||||||
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
|
|
||||||
Box::new(PolkadotOmniNodeRevmSolcPlatform) as Box<_>
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -589,22 +389,18 @@ impl From<PlatformIdentifier> for &dyn Platform {
|
|||||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||||
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
||||||
}
|
}
|
||||||
|
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||||
|
&KitchensinkPolkavmResolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
|
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||||
|
&KitchensinkRevmSolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||||
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
||||||
}
|
}
|
||||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||||
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
|
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
|
||||||
}
|
}
|
||||||
PlatformIdentifier::ZombienetPolkavmResolc => {
|
|
||||||
&ZombienetPolkavmResolcPlatform as &dyn Platform
|
|
||||||
}
|
|
||||||
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
|
|
||||||
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
|
|
||||||
&PolkadotOmniNodePolkavmResolcPlatform as &dyn Platform
|
|
||||||
}
|
|
||||||
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
|
|
||||||
&PolkadotOmniNodeRevmSolcPlatform as &dyn Platform
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
+742
-86
@@ -1,22 +1,55 @@
|
|||||||
mod differential_benchmarks;
|
mod cached_compiler;
|
||||||
mod differential_tests;
|
mod pool;
|
||||||
mod helpers;
|
|
||||||
|
|
||||||
use anyhow::{Context as _, bail};
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::{BTreeSet, HashMap},
|
||||||
|
io::{BufWriter, Write, stderr},
|
||||||
|
path::Path,
|
||||||
|
sync::Arc,
|
||||||
|
time::Instant,
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
network::{Ethereum, TransactionBuilder},
|
||||||
|
rpc::types::TransactionRequest,
|
||||||
|
};
|
||||||
|
use anyhow::Context as _;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use revive_dt_report::{ReportAggregator, TestCaseStatus};
|
use futures::stream;
|
||||||
|
use futures::{Stream, StreamExt};
|
||||||
|
use indexmap::{IndexMap, indexmap};
|
||||||
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
use revive_dt_report::{
|
||||||
|
ExecutionSpecificReporter, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
|
||||||
|
TestSpecificReporter, TestSpecifier,
|
||||||
|
};
|
||||||
use schemars::schema_for;
|
use schemars::schema_for;
|
||||||
use tracing::{info, level_filters::LevelFilter};
|
use serde_json::{Value, json};
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
use tracing::{debug, error, info, info_span, instrument};
|
||||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||||
|
|
||||||
use revive_dt_config::Context;
|
use revive_dt_common::{
|
||||||
use revive_dt_core::Platform;
|
iterators::EitherIter,
|
||||||
use revive_dt_format::metadata::Metadata;
|
types::{Mode, PrivateKeyAllocator},
|
||||||
|
|
||||||
use crate::{
|
|
||||||
differential_benchmarks::handle_differential_benchmarks,
|
|
||||||
differential_tests::handle_differential_tests,
|
|
||||||
};
|
};
|
||||||
|
use revive_dt_compiler::SolidityCompiler;
|
||||||
|
use revive_dt_config::{Context, *};
|
||||||
|
use revive_dt_core::{
|
||||||
|
Platform,
|
||||||
|
driver::{CaseDriver, CaseState},
|
||||||
|
};
|
||||||
|
use revive_dt_format::{
|
||||||
|
case::{Case, CaseIdx},
|
||||||
|
corpus::Corpus,
|
||||||
|
metadata::{ContractPathAndIdent, Metadata, MetadataFile},
|
||||||
|
mode::ParsedMode,
|
||||||
|
steps::{FunctionCallStep, Step},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::cached_compiler::CachedCompiler;
|
||||||
|
use crate::pool::NodePool;
|
||||||
|
|
||||||
fn main() -> anyhow::Result<()> {
|
fn main() -> anyhow::Result<()> {
|
||||||
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||||
@@ -31,97 +64,720 @@ fn main() -> anyhow::Result<()> {
|
|||||||
.with_writer(writer)
|
.with_writer(writer)
|
||||||
.with_thread_ids(false)
|
.with_thread_ids(false)
|
||||||
.with_thread_names(false)
|
.with_thread_names(false)
|
||||||
.with_env_filter(
|
.with_env_filter(EnvFilter::from_default_env())
|
||||||
EnvFilter::builder()
|
|
||||||
.with_default_directive(LevelFilter::OFF.into())
|
|
||||||
.from_env_lossy(),
|
|
||||||
)
|
|
||||||
.with_ansi(false)
|
.with_ansi(false)
|
||||||
.pretty()
|
.pretty()
|
||||||
.finish();
|
.finish();
|
||||||
tracing::subscriber::set_global_default(subscriber)?;
|
tracing::subscriber::set_global_default(subscriber)?;
|
||||||
info!("Differential testing tool is starting");
|
info!("Differential testing tool is starting");
|
||||||
|
|
||||||
let mut context = Context::try_parse()?;
|
let context = Context::try_parse()?;
|
||||||
context.update_for_profile();
|
|
||||||
|
|
||||||
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
||||||
|
|
||||||
match context {
|
match context {
|
||||||
Context::Test(context) => tokio::runtime::Builder::new_multi_thread()
|
Context::ExecuteTests(context) => {
|
||||||
|
let tests = collect_corpora(&context)
|
||||||
|
.context("Failed to collect corpus files from provided arguments")?
|
||||||
|
.into_iter()
|
||||||
|
.inspect(|(corpus, _)| {
|
||||||
|
reporter
|
||||||
|
.report_corpus_file_discovery_event(corpus.clone())
|
||||||
|
.expect("Can't fail")
|
||||||
|
})
|
||||||
|
.flat_map(|(_, files)| files.into_iter())
|
||||||
|
.inspect(|metadata_file| {
|
||||||
|
reporter
|
||||||
|
.report_metadata_file_discovery_event(
|
||||||
|
metadata_file.metadata_file_path.clone(),
|
||||||
|
metadata_file.content.clone(),
|
||||||
|
)
|
||||||
|
.expect("Can't fail")
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
tokio::runtime::Builder::new_multi_thread()
|
||||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||||
.enable_all()
|
.enable_all()
|
||||||
.build()
|
.build()
|
||||||
.expect("Failed building the Runtime")
|
.expect("Failed building the Runtime")
|
||||||
.block_on(async move {
|
.block_on(async move {
|
||||||
let differential_tests_handling_task =
|
execute_corpus(*context, &tests, reporter, report_aggregator_task)
|
||||||
handle_differential_tests(*context, reporter);
|
.await
|
||||||
|
.context("Failed to execute corpus")
|
||||||
let (_, report) = futures::future::try_join(
|
})
|
||||||
differential_tests_handling_task,
|
|
||||||
report_aggregator_task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let contains_failure = report
|
|
||||||
.execution_information
|
|
||||||
.values()
|
|
||||||
.flat_map(|values| values.case_reports.values())
|
|
||||||
.flat_map(|values| values.mode_execution_reports.values())
|
|
||||||
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
|
|
||||||
|
|
||||||
if contains_failure {
|
|
||||||
bail!("Some tests failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}),
|
|
||||||
Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread()
|
|
||||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.expect("Failed building the Runtime")
|
|
||||||
.block_on(async move {
|
|
||||||
let differential_benchmarks_handling_task =
|
|
||||||
handle_differential_benchmarks(*context, reporter);
|
|
||||||
|
|
||||||
let (_, report) = futures::future::try_join(
|
|
||||||
differential_benchmarks_handling_task,
|
|
||||||
report_aggregator_task,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let contains_failure = report
|
|
||||||
.execution_information
|
|
||||||
.values()
|
|
||||||
.flat_map(|values| values.case_reports.values())
|
|
||||||
.flat_map(|values| values.mode_execution_reports.values())
|
|
||||||
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
|
|
||||||
|
|
||||||
if contains_failure {
|
|
||||||
bail!("Some benchmarks failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}),
|
|
||||||
Context::ExportGenesis(ref export_genesis_context) => {
|
|
||||||
let platform = Into::<&dyn Platform>::into(export_genesis_context.platform);
|
|
||||||
let genesis = platform.export_genesis(context)?;
|
|
||||||
let genesis_json = serde_json::to_string_pretty(&genesis)
|
|
||||||
.context("Failed to serialize the genesis to JSON")?;
|
|
||||||
println!("{genesis_json}");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
Context::ExportJsonSchema => {
|
Context::ExportJsonSchema => {
|
||||||
let schema = schema_for!(Metadata);
|
let schema = schema_for!(Metadata);
|
||||||
println!(
|
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||||
"{}",
|
|
||||||
serde_json::to_string_pretty(&schema)
|
|
||||||
.context("Failed to export the JSON schema")?
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
|
||||||
|
fn collect_corpora(
|
||||||
|
context: &TestExecutionContext,
|
||||||
|
) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
|
||||||
|
let mut corpora = HashMap::new();
|
||||||
|
|
||||||
|
for path in &context.corpus {
|
||||||
|
let span = info_span!("Processing corpus file", path = %path.display());
|
||||||
|
let _guard = span.enter();
|
||||||
|
|
||||||
|
let corpus = Corpus::try_from_path(path)?;
|
||||||
|
info!(
|
||||||
|
name = corpus.name(),
|
||||||
|
number_of_contained_paths = corpus.path_count(),
|
||||||
|
"Deserialized corpus file"
|
||||||
|
);
|
||||||
|
let tests = corpus.enumerate_tests();
|
||||||
|
corpora.insert(corpus, tests);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(corpora)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_driver(
|
||||||
|
context: TestExecutionContext,
|
||||||
|
metadata_files: &[MetadataFile],
|
||||||
|
reporter: Reporter,
|
||||||
|
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||||
|
platforms: Vec<&dyn Platform>,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let mut nodes = Vec::<(&dyn Platform, NodePool)>::new();
|
||||||
|
for platform in platforms.into_iter() {
|
||||||
|
let pool = NodePool::new(Context::ExecuteTests(Box::new(context.clone())), platform)
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
platform_identifier = %platform.platform_identifier(),
|
||||||
|
"Failed to initialize the node pool for the platform."
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.context("Failed to initialize the node pool")?;
|
||||||
|
nodes.push((platform, pool));
|
||||||
|
}
|
||||||
|
|
||||||
|
let tests_stream = tests_stream(
|
||||||
|
&context,
|
||||||
|
metadata_files.iter(),
|
||||||
|
nodes.as_slice(),
|
||||||
|
reporter.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let driver_task = start_driver_task(&context, tests_stream)
|
||||||
|
.await
|
||||||
|
.context("Failed to start driver task")?;
|
||||||
|
let cli_reporting_task = start_cli_reporting_task(reporter);
|
||||||
|
|
||||||
|
let (_, _, rtn) = tokio::join!(cli_reporting_task, driver_task, report_aggregator_task);
|
||||||
|
rtn?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn tests_stream<'a>(
|
||||||
|
args: &TestExecutionContext,
|
||||||
|
metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone,
|
||||||
|
nodes: &'a [(&dyn Platform, NodePool)],
|
||||||
|
reporter: Reporter,
|
||||||
|
) -> impl Stream<Item = Test<'a>> {
|
||||||
|
let tests = metadata_files
|
||||||
|
.into_iter()
|
||||||
|
.flat_map(|metadata_file| {
|
||||||
|
metadata_file
|
||||||
|
.cases
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
|
||||||
|
})
|
||||||
|
// Flatten over the modes, prefer the case modes over the metadata file modes.
|
||||||
|
.flat_map(|(metadata_file, case_idx, case)| {
|
||||||
|
let reporter = reporter.clone();
|
||||||
|
|
||||||
|
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||||
|
let modes = match modes {
|
||||||
|
Some(modes) => EitherIter::A(
|
||||||
|
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
|
||||||
|
),
|
||||||
|
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||||
|
};
|
||||||
|
|
||||||
|
modes.into_iter().map(move |mode| {
|
||||||
|
(
|
||||||
|
metadata_file,
|
||||||
|
case_idx,
|
||||||
|
case,
|
||||||
|
mode.clone(),
|
||||||
|
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
||||||
|
solc_mode: mode.as_ref().clone(),
|
||||||
|
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||||
|
case_idx: CaseIdx::new(case_idx),
|
||||||
|
})),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// Note: before we do any kind of filtering or process the iterator in any way, we need to
|
||||||
|
// inform the report aggregator of all of the cases that were found as it keeps a state of the
|
||||||
|
// test cases for its internal use.
|
||||||
|
for (_, _, _, _, reporter) in tests.iter() {
|
||||||
|
reporter
|
||||||
|
.report_test_case_discovery_event()
|
||||||
|
.expect("Can't fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
stream::iter(tests.into_iter())
|
||||||
|
.filter_map(
|
||||||
|
move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
||||||
|
let mut platforms = Vec::new();
|
||||||
|
for (platform, node_pool) in nodes.iter() {
|
||||||
|
let node = node_pool.round_robbin();
|
||||||
|
let compiler = platform
|
||||||
|
.new_compiler(
|
||||||
|
Context::ExecuteTests(Box::new(args.clone())),
|
||||||
|
mode.version.clone().map(Into::into),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
platform_identifier = %platform.platform_identifier(),
|
||||||
|
"Failed to instantiate the compiler"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok()?;
|
||||||
|
|
||||||
|
let reporter = reporter
|
||||||
|
.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||||
|
platforms.push((*platform, node, compiler, reporter));
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Test {
|
||||||
|
metadata: metadata_file,
|
||||||
|
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
||||||
|
mode: mode.clone(),
|
||||||
|
case_idx: CaseIdx::new(case_idx),
|
||||||
|
case,
|
||||||
|
platforms,
|
||||||
|
reporter,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.filter_map(move |test| async move {
|
||||||
|
match test.check_compatibility() {
|
||||||
|
Ok(()) => Some(test),
|
||||||
|
Err((reason, additional_information)) => {
|
||||||
|
debug!(
|
||||||
|
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
||||||
|
case_idx = %test.case_idx,
|
||||||
|
mode = %test.mode,
|
||||||
|
reason,
|
||||||
|
additional_information =
|
||||||
|
serde_json::to_string(&additional_information).unwrap(),
|
||||||
|
|
||||||
|
"Ignoring Test Case"
|
||||||
|
);
|
||||||
|
test.reporter
|
||||||
|
.report_test_ignored_event(
|
||||||
|
reason.to_string(),
|
||||||
|
additional_information
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.into(), v))
|
||||||
|
.collect::<IndexMap<_, _>>(),
|
||||||
|
)
|
||||||
|
.expect("Can't fail");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start_driver_task<'a>(
|
||||||
|
context: &TestExecutionContext,
|
||||||
|
tests: impl Stream<Item = Test<'a>>,
|
||||||
|
) -> anyhow::Result<impl Future<Output = ()>> {
|
||||||
|
info!("Starting driver task");
|
||||||
|
|
||||||
|
let cached_compiler = Arc::new(
|
||||||
|
CachedCompiler::new(
|
||||||
|
context
|
||||||
|
.working_directory
|
||||||
|
.as_path()
|
||||||
|
.join("compilation_cache"),
|
||||||
|
context
|
||||||
|
.compilation_configuration
|
||||||
|
.invalidate_compilation_cache,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context("Failed to initialize cached compiler")?,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(tests.for_each_concurrent(
|
||||||
|
context.concurrency_configuration.concurrency_limit(),
|
||||||
|
move |test| {
|
||||||
|
let cached_compiler = cached_compiler.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
for (platform, node, _, _) in test.platforms.iter() {
|
||||||
|
test.reporter
|
||||||
|
.report_node_assigned_event(
|
||||||
|
node.id(),
|
||||||
|
platform.platform_identifier(),
|
||||||
|
node.connection_string(),
|
||||||
|
)
|
||||||
|
.expect("Can't fail");
|
||||||
|
}
|
||||||
|
|
||||||
|
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||||
|
context.wallet_configuration.highest_private_key_exclusive(),
|
||||||
|
)));
|
||||||
|
|
||||||
|
let reporter = test.reporter.clone();
|
||||||
|
let result =
|
||||||
|
handle_case_driver(&test, cached_compiler, private_key_allocator).await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(steps_executed) => reporter
|
||||||
|
.report_test_succeeded_event(steps_executed)
|
||||||
|
.expect("Can't fail"),
|
||||||
|
Err(error) => reporter
|
||||||
|
.report_test_failed_event(format!("{error:#}"))
|
||||||
|
.expect("Can't fail"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
||||||
|
async fn start_cli_reporting_task(reporter: Reporter) {
|
||||||
|
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||||
|
drop(reporter);
|
||||||
|
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
const GREEN: &str = "\x1B[32m";
|
||||||
|
const RED: &str = "\x1B[31m";
|
||||||
|
const GREY: &str = "\x1B[90m";
|
||||||
|
const COLOR_RESET: &str = "\x1B[0m";
|
||||||
|
const BOLD: &str = "\x1B[1m";
|
||||||
|
const BOLD_RESET: &str = "\x1B[22m";
|
||||||
|
|
||||||
|
let mut number_of_successes = 0;
|
||||||
|
let mut number_of_failures = 0;
|
||||||
|
|
||||||
|
let mut buf = BufWriter::new(stderr());
|
||||||
|
while let Ok(event) = aggregator_events_rx.recv().await {
|
||||||
|
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
|
metadata_file_path,
|
||||||
|
mode,
|
||||||
|
case_status,
|
||||||
|
} = event
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
||||||
|
for (case_idx, case_status) in case_status.into_iter() {
|
||||||
|
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
||||||
|
let _ = match case_status {
|
||||||
|
TestCaseStatus::Succeeded { steps_executed } => {
|
||||||
|
number_of_successes += 1;
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"{}{}Case Succeeded{} - Steps Executed: {}{}",
|
||||||
|
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
|
||||||
|
)
|
||||||
|
}
|
||||||
|
TestCaseStatus::Failed { reason } => {
|
||||||
|
number_of_failures += 1;
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"{}{}Case Failed{} - Reason: {}{}",
|
||||||
|
RED,
|
||||||
|
BOLD,
|
||||||
|
BOLD_RESET,
|
||||||
|
reason.trim(),
|
||||||
|
COLOR_RESET,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
TestCaseStatus::Ignored { reason, .. } => writeln!(
|
||||||
|
buf,
|
||||||
|
"{}{}Case Ignored{} - Reason: {}{}",
|
||||||
|
GREY,
|
||||||
|
BOLD,
|
||||||
|
BOLD_RESET,
|
||||||
|
reason.trim(),
|
||||||
|
COLOR_RESET,
|
||||||
|
),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
let _ = writeln!(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Summary at the end.
|
||||||
|
let _ = writeln!(
|
||||||
|
buf,
|
||||||
|
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
|
||||||
|
number_of_successes + number_of_failures,
|
||||||
|
GREEN,
|
||||||
|
number_of_successes,
|
||||||
|
COLOR_RESET,
|
||||||
|
RED,
|
||||||
|
number_of_failures,
|
||||||
|
COLOR_RESET,
|
||||||
|
start.elapsed().as_secs()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
#[instrument(
|
||||||
|
level = "info",
|
||||||
|
name = "Handling Case"
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
metadata_file_path = %test.metadata.relative_path().display(),
|
||||||
|
mode = %test.mode,
|
||||||
|
case_idx = %test.case_idx,
|
||||||
|
case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"),
|
||||||
|
)
|
||||||
|
)]
|
||||||
|
async fn handle_case_driver<'a>(
|
||||||
|
test: &Test<'a>,
|
||||||
|
cached_compiler: Arc<CachedCompiler<'a>>,
|
||||||
|
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||||
|
) -> anyhow::Result<usize> {
|
||||||
|
let platform_state = stream::iter(test.platforms.iter())
|
||||||
|
// Compiling the pre-link contracts.
|
||||||
|
.filter_map(|(platform, node, compiler, reporter)| {
|
||||||
|
let cached_compiler = cached_compiler.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let compiler_output = cached_compiler
|
||||||
|
.compile_contracts(
|
||||||
|
test.metadata,
|
||||||
|
test.metadata_file_path,
|
||||||
|
test.mode.clone(),
|
||||||
|
None,
|
||||||
|
compiler.as_ref(),
|
||||||
|
*platform,
|
||||||
|
reporter,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
platform_identifier = %platform.platform_identifier(),
|
||||||
|
"Pre-linking compilation failed"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok()?;
|
||||||
|
Some((test, platform, node, compiler, reporter, compiler_output))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// Deploying the libraries for the platform.
|
||||||
|
.filter_map(
|
||||||
|
|(test, platform, node, compiler, reporter, compiler_output)| async move {
|
||||||
|
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||||
|
let mut contract_sources = test
|
||||||
|
.metadata
|
||||||
|
.contract_sources()
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
platform_identifier = %platform.platform_identifier(),
|
||||||
|
"Failed to retrieve contract sources from metadata"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok()?;
|
||||||
|
for library_instance in test
|
||||||
|
.metadata
|
||||||
|
.libraries
|
||||||
|
.iter()
|
||||||
|
.flatten()
|
||||||
|
.flat_map(|(_, map)| map.values())
|
||||||
|
{
|
||||||
|
debug!(%library_instance, "Deploying Library Instance");
|
||||||
|
|
||||||
|
let ContractPathAndIdent {
|
||||||
|
contract_source_path: library_source_path,
|
||||||
|
contract_ident: library_ident,
|
||||||
|
} = contract_sources.remove(library_instance)?;
|
||||||
|
|
||||||
|
let (code, abi) = compiler_output
|
||||||
|
.contracts
|
||||||
|
.get(&library_source_path)
|
||||||
|
.and_then(|contracts| contracts.get(library_ident.as_str()))?;
|
||||||
|
|
||||||
|
let code = alloy::hex::decode(code).ok()?;
|
||||||
|
|
||||||
|
// Getting the deployer address from the cases themselves. This is to ensure
|
||||||
|
// that we're doing the deployments from different accounts and therefore we're
|
||||||
|
// not slowed down by the nonce.
|
||||||
|
let deployer_address = test
|
||||||
|
.case
|
||||||
|
.steps
|
||||||
|
.iter()
|
||||||
|
.filter_map(|step| match step {
|
||||||
|
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
||||||
|
Step::BalanceAssertion(..) => None,
|
||||||
|
Step::StorageEmptyAssertion(..) => None,
|
||||||
|
Step::Repeat(..) => None,
|
||||||
|
Step::AllocateAccount(..) => None,
|
||||||
|
})
|
||||||
|
.next()
|
||||||
|
.unwrap_or(FunctionCallStep::default_caller_address());
|
||||||
|
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||||
|
TransactionRequest::default().from(deployer_address),
|
||||||
|
code,
|
||||||
|
);
|
||||||
|
let receipt = node
|
||||||
|
.execute_transaction(tx)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
%library_instance,
|
||||||
|
platform_identifier = %platform.platform_identifier(),
|
||||||
|
"Failed to deploy the library"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok()?;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
?library_instance,
|
||||||
|
platform_identifier = %platform.platform_identifier(),
|
||||||
|
"Deployed library"
|
||||||
|
);
|
||||||
|
|
||||||
|
let library_address = receipt.contract_address?;
|
||||||
|
|
||||||
|
deployed_libraries.get_or_insert_default().insert(
|
||||||
|
library_instance.clone(),
|
||||||
|
(library_ident.clone(), library_address, abi.clone()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((
|
||||||
|
test,
|
||||||
|
platform,
|
||||||
|
node,
|
||||||
|
compiler,
|
||||||
|
reporter,
|
||||||
|
compiler_output,
|
||||||
|
deployed_libraries,
|
||||||
|
))
|
||||||
|
},
|
||||||
|
)
|
||||||
|
// Compiling the post-link contracts.
|
||||||
|
.filter_map(
|
||||||
|
|(test, platform, node, compiler, reporter, _, deployed_libraries)| {
|
||||||
|
let cached_compiler = cached_compiler.clone();
|
||||||
|
let private_key_allocator = private_key_allocator.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let compiler_output = cached_compiler
|
||||||
|
.compile_contracts(
|
||||||
|
test.metadata,
|
||||||
|
test.metadata_file_path,
|
||||||
|
test.mode.clone(),
|
||||||
|
deployed_libraries.as_ref(),
|
||||||
|
compiler.as_ref(),
|
||||||
|
*platform,
|
||||||
|
reporter,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
platform_identifier = %platform.platform_identifier(),
|
||||||
|
"Pre-linking compilation failed"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok()?;
|
||||||
|
|
||||||
|
let case_state = CaseState::new(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler_output.contracts,
|
||||||
|
deployed_libraries.unwrap_or_default(),
|
||||||
|
reporter.clone(),
|
||||||
|
private_key_allocator,
|
||||||
|
);
|
||||||
|
|
||||||
|
Some((*node, platform.platform_identifier(), case_state))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
// Collect
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let mut driver = CaseDriver::new(test.metadata, test.case, platform_state);
|
||||||
|
driver
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
.inspect(|steps_executed| info!(steps_executed, "Case succeeded"))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn execute_corpus(
|
||||||
|
context: TestExecutionContext,
|
||||||
|
tests: &[MetadataFile],
|
||||||
|
reporter: Reporter,
|
||||||
|
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let platforms = context
|
||||||
|
.platforms
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.collect::<BTreeSet<_>>()
|
||||||
|
.into_iter()
|
||||||
|
.map(Into::<&dyn Platform>::into)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
run_driver(context, tests, reporter, report_aggregator_task, platforms).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// this represents a single "test"; a mode, path and collection of cases.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
|
struct Test<'a> {
|
||||||
|
metadata: &'a MetadataFile,
|
||||||
|
metadata_file_path: &'a Path,
|
||||||
|
mode: Cow<'a, Mode>,
|
||||||
|
case_idx: CaseIdx,
|
||||||
|
case: &'a Case,
|
||||||
|
platforms: Vec<(
|
||||||
|
&'a dyn Platform,
|
||||||
|
&'a dyn EthereumNode,
|
||||||
|
Box<dyn SolidityCompiler>,
|
||||||
|
ExecutionSpecificReporter,
|
||||||
|
)>,
|
||||||
|
reporter: TestSpecificReporter,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Test<'a> {
|
||||||
|
/// Checks if this test can be ran with the current configuration.
|
||||||
|
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
|
self.check_metadata_file_ignored()?;
|
||||||
|
self.check_case_file_ignored()?;
|
||||||
|
self.check_target_compatibility()?;
|
||||||
|
self.check_evm_version_compatibility()?;
|
||||||
|
self.check_compiler_compatibility()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the metadata file is ignored or not.
|
||||||
|
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
||||||
|
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
||||||
|
Err(("Metadata file is ignored.", indexmap! {}))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the case file is ignored or not.
|
||||||
|
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
||||||
|
if self.case.ignore.is_some_and(|ignore| ignore) {
|
||||||
|
Err(("Case is ignored.", indexmap! {}))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the platforms all support the desired targets in the metadata file.
|
||||||
|
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
|
let mut error_map = indexmap! {
|
||||||
|
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||||
|
};
|
||||||
|
let mut is_allowed = true;
|
||||||
|
for (platform, ..) in self.platforms.iter() {
|
||||||
|
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
||||||
|
None => true,
|
||||||
|
Some(targets) => {
|
||||||
|
let mut target_matches = false;
|
||||||
|
for target in targets.iter() {
|
||||||
|
if &platform.vm_identifier() == target {
|
||||||
|
target_matches = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
target_matches
|
||||||
|
}
|
||||||
|
};
|
||||||
|
is_allowed &= is_allowed_for_platform;
|
||||||
|
error_map.insert(
|
||||||
|
platform.platform_identifier().into(),
|
||||||
|
json!(is_allowed_for_platform),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if is_allowed {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err((
|
||||||
|
"One of the platforms do do not support the targets allowed by the test.",
|
||||||
|
error_map,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks for the compatibility of the EVM version with the platforms specified.
|
||||||
|
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
|
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut error_map = indexmap! {
|
||||||
|
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||||
|
};
|
||||||
|
let mut is_allowed = true;
|
||||||
|
for (platform, node, ..) in self.platforms.iter() {
|
||||||
|
let is_allowed_for_platform = evm_version_requirement.matches(&node.evm_version());
|
||||||
|
is_allowed &= is_allowed_for_platform;
|
||||||
|
error_map.insert(
|
||||||
|
platform.platform_identifier().into(),
|
||||||
|
json!(is_allowed_for_platform),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if is_allowed {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err((
|
||||||
|
"EVM version is incompatible for the platforms specified",
|
||||||
|
error_map,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the platforms compilers support the mode that the test is for.
|
||||||
|
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
|
let mut error_map = indexmap! {
|
||||||
|
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||||
|
};
|
||||||
|
let mut is_allowed = true;
|
||||||
|
for (platform, _, compiler, ..) in self.platforms.iter() {
|
||||||
|
let is_allowed_for_platform =
|
||||||
|
compiler.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||||
|
is_allowed &= is_allowed_for_platform;
|
||||||
|
error_map.insert(
|
||||||
|
platform.platform_identifier().into(),
|
||||||
|
json!(is_allowed_for_platform),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if is_allowed {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err((
|
||||||
|
"Compilers do not support this mode either for the provided platforms.",
|
||||||
|
error_map,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ pub struct NodePool {
|
|||||||
|
|
||||||
impl NodePool {
|
impl NodePool {
|
||||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||||
pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
|
pub fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
|
||||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||||
let nodes = concurrency_configuration.number_of_nodes;
|
let nodes = concurrency_configuration.number_of_nodes;
|
||||||
|
|
||||||
@@ -33,18 +33,11 @@ impl NodePool {
|
|||||||
.join()
|
.join()
|
||||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||||
.context("Failed to join node spawn thread")?
|
.context("Failed to join node spawn thread")?
|
||||||
|
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
|
||||||
.context("Node failed to spawn")?,
|
.context("Node failed to spawn")?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pre_transactions_tasks = nodes
|
|
||||||
.iter_mut()
|
|
||||||
.map(|node| node.pre_transactions())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
futures::future::try_join_all(pre_transactions_tasks)
|
|
||||||
.await
|
|
||||||
.context("Failed to run the pre-transactions task")?;
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
nodes,
|
nodes,
|
||||||
next: Default::default(),
|
next: Default::default(),
|
||||||
@@ -14,14 +14,16 @@ revive-dt-common = { workspace = true }
|
|||||||
revive-common = { workspace = true }
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
|
alloy-primitives = { workspace = true }
|
||||||
|
alloy-sol-types = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
|
regex = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
schemars = { workspace = true }
|
schemars = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true, features = ["derive"] }
|
serde = { workspace = true, features = ["derive"] }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
itertools = { workspace = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
use alloy::primitives::Address;
|
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use revive_dt_common::{
|
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
|
||||||
macros::define_wrapper_type,
|
|
||||||
types::{Mode, ParsedMode},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::steps::*;
|
use crate::{
|
||||||
|
mode::ParsedMode,
|
||||||
|
steps::{Expected, RepeatStep, Step},
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
||||||
pub struct Case {
|
pub struct Case {
|
||||||
@@ -108,20 +107,6 @@ impl Case {
|
|||||||
None => Mode::all().cloned().collect(),
|
None => Mode::all().cloned().collect(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deployer_address(&self) -> Address {
|
|
||||||
self.steps
|
|
||||||
.iter()
|
|
||||||
.filter_map(|step| match step {
|
|
||||||
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
|
||||||
Step::BalanceAssertion(..) => None,
|
|
||||||
Step::StorageEmptyAssertion(..) => None,
|
|
||||||
Step::Repeat(..) => None,
|
|
||||||
Step::AllocateAccount(..) => None,
|
|
||||||
})
|
|
||||||
.next()
|
|
||||||
.unwrap_or(FunctionCallStep::default_caller_address())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
|
|||||||
+79
-148
@@ -1,167 +1,56 @@
|
|||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
fs::File,
|
||||||
collections::HashMap,
|
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
};
|
};
|
||||||
|
|
||||||
use itertools::Itertools;
|
use revive_dt_common::iterators::FilesWithExtensionIterator;
|
||||||
use revive_dt_common::{
|
use serde::{Deserialize, Serialize};
|
||||||
iterators::{EitherIter, FilesWithExtensionIterator},
|
use tracing::{debug, info};
|
||||||
types::{Mode, ParsedMode, ParsedTestSpecifier},
|
|
||||||
};
|
|
||||||
use tracing::{debug, warn};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::metadata::{Metadata, MetadataFile};
|
||||||
case::{Case, CaseIdx},
|
use anyhow::Context as _;
|
||||||
metadata::{Metadata, MetadataFile},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
pub struct Corpus {
|
#[serde(untagged)]
|
||||||
test_specifiers: HashMap<ParsedTestSpecifier, Vec<PathBuf>>,
|
pub enum Corpus {
|
||||||
metadata_files: HashMap<PathBuf, MetadataFile>,
|
SinglePath { name: String, path: PathBuf },
|
||||||
|
MultiplePaths { name: String, paths: Vec<PathBuf> },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Corpus {
|
impl Corpus {
|
||||||
pub fn new() -> Self {
|
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
|
||||||
Default::default()
|
let mut corpus = File::open(file_path.as_ref())
|
||||||
}
|
.map_err(anyhow::Error::from)
|
||||||
|
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
|
||||||
pub fn with_test_specifier(
|
.with_context(|| {
|
||||||
mut self,
|
format!(
|
||||||
test_specifier: ParsedTestSpecifier,
|
"Failed to open and deserialize corpus file at {}",
|
||||||
) -> anyhow::Result<Self> {
|
file_path.as_ref().display()
|
||||||
match &test_specifier {
|
|
||||||
ParsedTestSpecifier::FileOrDirectory {
|
|
||||||
metadata_or_directory_file_path: metadata_file_path,
|
|
||||||
}
|
|
||||||
| ParsedTestSpecifier::Case {
|
|
||||||
metadata_file_path, ..
|
|
||||||
}
|
|
||||||
| ParsedTestSpecifier::CaseWithMode {
|
|
||||||
metadata_file_path, ..
|
|
||||||
} => {
|
|
||||||
let metadata_files = enumerate_metadata_files(metadata_file_path);
|
|
||||||
self.test_specifiers.insert(
|
|
||||||
test_specifier,
|
|
||||||
metadata_files
|
|
||||||
.iter()
|
|
||||||
.map(|metadata_file| metadata_file.metadata_file_path.clone())
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
for metadata_file in metadata_files.into_iter() {
|
|
||||||
self.metadata_files
|
|
||||||
.insert(metadata_file.metadata_file_path.clone(), metadata_file);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cases_iterator(
|
|
||||||
&self,
|
|
||||||
) -> impl Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_ {
|
|
||||||
let mut iterator = Box::new(std::iter::empty())
|
|
||||||
as Box<dyn Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_>;
|
|
||||||
|
|
||||||
for (test_specifier, metadata_file_paths) in self.test_specifiers.iter() {
|
|
||||||
for metadata_file_path in metadata_file_paths {
|
|
||||||
let metadata_file = self
|
|
||||||
.metadata_files
|
|
||||||
.get(metadata_file_path)
|
|
||||||
.expect("Must succeed");
|
|
||||||
|
|
||||||
match test_specifier {
|
|
||||||
ParsedTestSpecifier::FileOrDirectory { .. } => {
|
|
||||||
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
|
|
||||||
let case_idx = CaseIdx::new(case_idx);
|
|
||||||
|
|
||||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
|
||||||
let modes = match modes {
|
|
||||||
Some(modes) => EitherIter::A(
|
|
||||||
ParsedMode::many_to_modes(modes.iter())
|
|
||||||
.map(Cow::<'static, _>::Owned),
|
|
||||||
),
|
|
||||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
|
||||||
};
|
|
||||||
|
|
||||||
iterator = Box::new(
|
|
||||||
iterator.chain(
|
|
||||||
modes
|
|
||||||
.into_iter()
|
|
||||||
.map(move |mode| (metadata_file, case_idx, case, mode)),
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
}
|
})?;
|
||||||
}
|
|
||||||
ParsedTestSpecifier::Case { case_idx, .. } => {
|
|
||||||
let Some(case) = metadata_file.cases.get(*case_idx) else {
|
|
||||||
warn!(
|
|
||||||
test_specifier = %test_specifier,
|
|
||||||
metadata_file_path = %metadata_file_path.display(),
|
|
||||||
case_idx = case_idx,
|
|
||||||
case_count = metadata_file.cases.len(),
|
|
||||||
"Specified case not found in metadata file"
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let case_idx = CaseIdx::new(*case_idx);
|
|
||||||
|
|
||||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
let corpus_directory = file_path
|
||||||
let modes = match modes {
|
.as_ref()
|
||||||
Some(modes) => EitherIter::A(
|
.canonicalize()
|
||||||
ParsedMode::many_to_modes(modes.iter())
|
.context("Failed to canonicalize the path to the corpus file")?
|
||||||
.map(Cow::<'static, Mode>::Owned),
|
.parent()
|
||||||
),
|
.context("Corpus file has no parent")?
|
||||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
.to_path_buf();
|
||||||
};
|
|
||||||
|
|
||||||
iterator = Box::new(
|
for path in corpus.paths_iter_mut() {
|
||||||
iterator.chain(
|
*path = corpus_directory.join(path.as_path())
|
||||||
modes
|
|
||||||
.into_iter()
|
|
||||||
.map(move |mode| (metadata_file, case_idx, case, mode)),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
ParsedTestSpecifier::CaseWithMode { case_idx, mode, .. } => {
|
|
||||||
let Some(case) = metadata_file.cases.get(*case_idx) else {
|
|
||||||
warn!(
|
|
||||||
test_specifier = %test_specifier,
|
|
||||||
metadata_file_path = %metadata_file_path.display(),
|
|
||||||
case_idx = case_idx,
|
|
||||||
case_count = metadata_file.cases.len(),
|
|
||||||
"Specified case not found in metadata file"
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let case_idx = CaseIdx::new(*case_idx);
|
|
||||||
|
|
||||||
let mode = Cow::Borrowed(mode);
|
|
||||||
iterator = Box::new(iterator.chain(std::iter::once((
|
|
||||||
metadata_file,
|
|
||||||
case_idx,
|
|
||||||
case,
|
|
||||||
mode,
|
|
||||||
))))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
iterator.unique_by(|item| (&item.0.metadata_file_path, item.1, item.3.clone()))
|
Ok(corpus)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn metadata_file_count(&self) -> usize {
|
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
|
||||||
self.metadata_files.len()
|
let mut tests = self
|
||||||
}
|
.paths_iter()
|
||||||
}
|
.flat_map(|root_path| {
|
||||||
|
if !root_path.is_dir() {
|
||||||
fn enumerate_metadata_files(path: impl AsRef<Path>) -> Vec<MetadataFile> {
|
Box::new(std::iter::once(root_path.to_path_buf()))
|
||||||
let root_path = path.as_ref();
|
as Box<dyn Iterator<Item = _>>
|
||||||
let mut tests = if !root_path.is_dir() {
|
|
||||||
Box::new(std::iter::once(root_path.to_path_buf())) as Box<dyn Iterator<Item = _>>
|
|
||||||
} else {
|
} else {
|
||||||
Box::new(
|
Box::new(
|
||||||
FilesWithExtensionIterator::new(root_path)
|
FilesWithExtensionIterator::new(root_path)
|
||||||
@@ -171,6 +60,7 @@ fn enumerate_metadata_files(path: impl AsRef<Path>) -> Vec<MetadataFile> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
||||||
|
})
|
||||||
.filter_map(|(root_path, metadata_file_path)| {
|
.filter_map(|(root_path, metadata_file_path)| {
|
||||||
Metadata::try_from_file(&metadata_file_path)
|
Metadata::try_from_file(&metadata_file_path)
|
||||||
.or_else(|| {
|
.or_else(|| {
|
||||||
@@ -196,5 +86,46 @@ fn enumerate_metadata_files(path: impl AsRef<Path>) -> Vec<MetadataFile> {
|
|||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||||
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||||
|
info!(
|
||||||
|
len = tests.len(),
|
||||||
|
corpus_name = self.name(),
|
||||||
|
"Found tests in Corpus"
|
||||||
|
);
|
||||||
tests
|
tests
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn name(&self) -> &str {
|
||||||
|
match self {
|
||||||
|
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
|
||||||
|
match self {
|
||||||
|
Corpus::SinglePath { path, .. } => {
|
||||||
|
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>
|
||||||
|
}
|
||||||
|
Corpus::MultiplePaths { paths, .. } => {
|
||||||
|
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
|
||||||
|
match self {
|
||||||
|
Corpus::SinglePath { path, .. } => {
|
||||||
|
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>
|
||||||
|
}
|
||||||
|
Corpus::MultiplePaths { paths, .. } => {
|
||||||
|
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn path_count(&self) -> usize {
|
||||||
|
match self {
|
||||||
|
Corpus::SinglePath { .. } => 1,
|
||||||
|
Corpus::MultiplePaths { paths, .. } => paths.len(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,5 +3,6 @@
|
|||||||
pub mod case;
|
pub mod case;
|
||||||
pub mod corpus;
|
pub mod corpus;
|
||||||
pub mod metadata;
|
pub mod metadata;
|
||||||
|
pub mod mode;
|
||||||
pub mod steps;
|
pub mod steps;
|
||||||
pub mod traits;
|
pub mod traits;
|
||||||
|
|||||||
@@ -16,11 +16,11 @@ use revive_dt_common::{
|
|||||||
cached_fs::read_to_string,
|
cached_fs::read_to_string,
|
||||||
iterators::FilesWithExtensionIterator,
|
iterators::FilesWithExtensionIterator,
|
||||||
macros::define_wrapper_type,
|
macros::define_wrapper_type,
|
||||||
types::{Mode, ParsedMode, VmIdentifier},
|
types::{Mode, VmIdentifier},
|
||||||
};
|
};
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
use crate::case::Case;
|
use crate::{case::Case, mode::ParsedMode};
|
||||||
|
|
||||||
pub const METADATA_FILE_EXTENSION: &str = "json";
|
pub const METADATA_FILE_EXTENSION: &str = "json";
|
||||||
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
||||||
|
|||||||
@@ -0,0 +1,257 @@
|
|||||||
|
use anyhow::Context as _;
|
||||||
|
use regex::Regex;
|
||||||
|
use revive_dt_common::iterators::EitherIter;
|
||||||
|
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::fmt::Display;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
|
///
|
||||||
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
||||||
|
#[serde(try_from = "String", into = "String")]
|
||||||
|
pub struct ParsedMode {
|
||||||
|
pub pipeline: Option<ModePipeline>,
|
||||||
|
pub optimize_flag: Option<bool>,
|
||||||
|
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||||
|
pub version: Option<semver::VersionReq>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ParsedMode {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||||
|
Regex::new(r"(?x)
|
||||||
|
^
|
||||||
|
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
||||||
|
\s*
|
||||||
|
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
||||||
|
\s*
|
||||||
|
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
||||||
|
$
|
||||||
|
").unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
|
let Some(caps) = REGEX.captures(s) else {
|
||||||
|
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||||
|
};
|
||||||
|
|
||||||
|
let pipeline = match caps.name("pipeline") {
|
||||||
|
Some(m) => Some(
|
||||||
|
ModePipeline::from_str(m.as_str())
|
||||||
|
.context("Failed to parse mode pipeline from string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||||
|
|
||||||
|
let optimize_setting = match caps.name("optimize_setting") {
|
||||||
|
Some(m) => Some(
|
||||||
|
ModeOptimizerSetting::from_str(m.as_str())
|
||||||
|
.context("Failed to parse optimizer setting from string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let version = match caps.name("version") {
|
||||||
|
Some(m) => Some(
|
||||||
|
semver::VersionReq::parse(m.as_str())
|
||||||
|
.map_err(|e| {
|
||||||
|
anyhow::anyhow!(
|
||||||
|
"Cannot parse the version requirement '{}': {e}",
|
||||||
|
m.as_str()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.context("Failed to parse semver requirement from mode string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ParsedMode {
|
||||||
|
pipeline,
|
||||||
|
optimize_flag,
|
||||||
|
optimize_setting,
|
||||||
|
version,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ParsedMode {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let mut has_written = false;
|
||||||
|
|
||||||
|
if let Some(pipeline) = self.pipeline {
|
||||||
|
pipeline.fmt(f)?;
|
||||||
|
if let Some(optimize_flag) = self.optimize_flag {
|
||||||
|
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||||
|
}
|
||||||
|
has_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(optimize_setting) = self.optimize_setting {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
optimize_setting.fmt(f)?;
|
||||||
|
has_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(version) = &self.version {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
version.fmt(f)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ParsedMode> for String {
|
||||||
|
fn from(parsed_mode: ParsedMode) -> Self {
|
||||||
|
parsed_mode.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for ParsedMode {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
|
ParsedMode::from_str(&value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ParsedMode {
|
||||||
|
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||||
|
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||||
|
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(ModePipeline::test_cases()),
|
||||||
|
|p| EitherIter::B(std::iter::once(*p)),
|
||||||
|
);
|
||||||
|
|
||||||
|
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
||||||
|
if flag {
|
||||||
|
ModeOptimizerSetting::M3
|
||||||
|
} else {
|
||||||
|
ModeOptimizerSetting::M0
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let optimize_flag_iter = match optimize_flag_setting {
|
||||||
|
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||||
|
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(optimize_flag_iter),
|
||||||
|
|s| EitherIter::B(std::iter::once(*s)),
|
||||||
|
);
|
||||||
|
|
||||||
|
pipeline_iter.flat_map(move |pipeline| {
|
||||||
|
optimize_settings_iter
|
||||||
|
.clone()
|
||||||
|
.map(move |optimize_setting| Mode {
|
||||||
|
pipeline,
|
||||||
|
optimize_setting,
|
||||||
|
version: self.version.clone(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||||
|
/// This avoids any duplicate entries.
|
||||||
|
pub fn many_to_modes<'a>(
|
||||||
|
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||||
|
) -> impl Iterator<Item = Mode> {
|
||||||
|
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||||
|
modes.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_from_str() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", "Mz"),
|
||||||
|
("Y", "Y"),
|
||||||
|
("Y+", "Y+"),
|
||||||
|
("Y-", "Y-"),
|
||||||
|
("E", "E"),
|
||||||
|
("E+", "E+"),
|
||||||
|
("E-", "E-"),
|
||||||
|
("Y M0", "Y M0"),
|
||||||
|
("Y M1", "Y M1"),
|
||||||
|
("Y M2", "Y M2"),
|
||||||
|
("Y M3", "Y M3"),
|
||||||
|
("Y Ms", "Y Ms"),
|
||||||
|
("Y Mz", "Y Mz"),
|
||||||
|
("E M0", "E M0"),
|
||||||
|
("E M1", "E M1"),
|
||||||
|
("E M2", "E M2"),
|
||||||
|
("E M3", "E M3"),
|
||||||
|
("E Ms", "E Ms"),
|
||||||
|
("E Mz", "E Mz"),
|
||||||
|
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||||
|
("Y 0.8.0", "Y ^0.8.0"),
|
||||||
|
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||||
|
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||||
|
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||||
|
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||||
|
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||||
|
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||||
|
// We don't see this in the wild but it is parsed.
|
||||||
|
("<=0.8", "<=0.8"),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
assert_eq!(
|
||||||
|
expected,
|
||||||
|
parsed.to_string(),
|
||||||
|
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_to_test_modes() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", vec!["Y Mz", "E Mz"]),
|
||||||
|
("Y", vec!["Y M0", "Y M3"]),
|
||||||
|
("E", vec!["E M0", "E M3"]),
|
||||||
|
("Y+", vec!["Y M3"]),
|
||||||
|
("Y-", vec!["Y M0"]),
|
||||||
|
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||||
|
(
|
||||||
|
"<=0.8",
|
||||||
|
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||||
|
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
expected_set, actual_set,
|
||||||
|
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
+13
-22
@@ -1,7 +1,5 @@
|
|||||||
use std::{collections::HashMap, fmt::Display, str::FromStr};
|
use std::{collections::HashMap, fmt::Display, str::FromStr};
|
||||||
|
|
||||||
use alloy::hex::ToHexExt;
|
|
||||||
use alloy::primitives::{FixedBytes, utils::parse_units};
|
|
||||||
use alloy::{
|
use alloy::{
|
||||||
eips::BlockNumberOrTag,
|
eips::BlockNumberOrTag,
|
||||||
json_abi::Function,
|
json_abi::Function,
|
||||||
@@ -9,6 +7,7 @@ use alloy::{
|
|||||||
primitives::{Address, Bytes, U256},
|
primitives::{Address, Bytes, U256},
|
||||||
rpc::types::TransactionRequest,
|
rpc::types::TransactionRequest,
|
||||||
};
|
};
|
||||||
|
use alloy_primitives::{FixedBytes, utils::parse_units};
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
|
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream};
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
@@ -46,12 +45,12 @@ pub enum Step {
|
|||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
pub struct StepIdx(usize) impl Display, FromStr;
|
pub struct StepIdx(usize) impl Display, FromStr;
|
||||||
);
|
);
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
#[serde(try_from = "String", into = "String")]
|
#[serde(try_from = "String", into = "String")]
|
||||||
pub struct StepPath(Vec<StepIdx>);
|
pub struct StepPath(Vec<StepIdx>);
|
||||||
);
|
);
|
||||||
@@ -538,7 +537,7 @@ impl FunctionCallStep {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Parse this input into a legacy transaction.
|
/// Parse this input into a legacy transaction.
|
||||||
pub async fn as_transaction(
|
pub async fn legacy_transaction(
|
||||||
&self,
|
&self,
|
||||||
resolver: &(impl ResolverApi + ?Sized),
|
resolver: &(impl ResolverApi + ?Sized),
|
||||||
context: ResolutionContext<'_>,
|
context: ResolutionContext<'_>,
|
||||||
@@ -687,8 +686,8 @@ impl Calldata {
|
|||||||
Calldata::Compound(items) => {
|
Calldata::Compound(items) => {
|
||||||
stream::iter(items.iter().zip(other.chunks(32)))
|
stream::iter(items.iter().zip(other.chunks(32)))
|
||||||
.map(|(this, other)| async move {
|
.map(|(this, other)| async move {
|
||||||
// The MatterLabs format supports wildcards and therefore we also need to
|
// The matterlabs format supports wildcards and therefore we
|
||||||
// support them.
|
// also need to support them.
|
||||||
if this.as_ref() == "*" {
|
if this.as_ref() == "*" {
|
||||||
return Ok::<_, anyhow::Error>(true);
|
return Ok::<_, anyhow::Error>(true);
|
||||||
}
|
}
|
||||||
@@ -706,7 +705,6 @@ impl Calldata {
|
|||||||
.await
|
.await
|
||||||
.context("Failed to resolve calldata item during equivalence check")?;
|
.context("Failed to resolve calldata item during equivalence check")?;
|
||||||
let other = U256::from_be_slice(&other);
|
let other = U256::from_be_slice(&other);
|
||||||
|
|
||||||
Ok(this == other)
|
Ok(this == other)
|
||||||
})
|
})
|
||||||
.buffered(0xFF)
|
.buffered(0xFF)
|
||||||
@@ -719,7 +717,7 @@ impl Calldata {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CalldataItem {
|
impl CalldataItem {
|
||||||
#[instrument(level = "info", skip_all, err(Debug))]
|
#[instrument(level = "info", skip_all, err)]
|
||||||
async fn resolve(
|
async fn resolve(
|
||||||
&self,
|
&self,
|
||||||
resolver: &(impl ResolverApi + ?Sized),
|
resolver: &(impl ResolverApi + ?Sized),
|
||||||
@@ -770,14 +768,7 @@ impl CalldataItem {
|
|||||||
match stack.as_slice() {
|
match stack.as_slice() {
|
||||||
// Empty stack means that we got an empty compound calldata which we resolve to zero.
|
// Empty stack means that we got an empty compound calldata which we resolve to zero.
|
||||||
[] => Ok(U256::ZERO),
|
[] => Ok(U256::ZERO),
|
||||||
[CalldataToken::Item(item)] => {
|
[CalldataToken::Item(item)] => Ok(*item),
|
||||||
tracing::debug!(
|
|
||||||
original_item = ?self,
|
|
||||||
resolved_item = item.to_be_bytes::<32>().encode_hex(),
|
|
||||||
"Resolution Done"
|
|
||||||
);
|
|
||||||
Ok(*item)
|
|
||||||
}
|
|
||||||
_ => Err(anyhow::anyhow!(
|
_ => Err(anyhow::anyhow!(
|
||||||
"Invalid calldata arithmetic operation - Invalid stack"
|
"Invalid calldata arithmetic operation - Invalid stack"
|
||||||
)),
|
)),
|
||||||
@@ -907,7 +898,7 @@ impl<T: AsRef<str>> CalldataToken<T> {
|
|||||||
let block_hash = resolver
|
let block_hash = resolver
|
||||||
.block_hash(desired_block_number.into())
|
.block_hash(desired_block_number.into())
|
||||||
.await
|
.await
|
||||||
.context(format!("Failed to resolve the block hash of block number {desired_block_number}"))?;
|
.context("Failed to resolve block hash for desired block number")?;
|
||||||
|
|
||||||
Ok(U256::from_be_bytes(block_hash.0))
|
Ok(U256::from_be_bytes(block_hash.0))
|
||||||
} else if item == Self::BLOCK_NUMBER_VARIABLE {
|
} else if item == Self::BLOCK_NUMBER_VARIABLE {
|
||||||
@@ -968,9 +959,9 @@ impl<'de> Deserialize<'de> for EtherValue {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use alloy::primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address};
|
|
||||||
use alloy::sol_types::SolValue;
|
|
||||||
use alloy::{eips::BlockNumberOrTag, json_abi::JsonAbi};
|
use alloy::{eips::BlockNumberOrTag, json_abi::JsonAbi};
|
||||||
|
use alloy_primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address};
|
||||||
|
use alloy_sol_types::SolValue;
|
||||||
use std::{collections::HashMap, pin::Pin};
|
use std::{collections::HashMap, pin::Pin};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -1124,7 +1115,7 @@ mod tests {
|
|||||||
let encoded = input.encoded_input(&resolver, context).await.unwrap();
|
let encoded = input.encoded_input(&resolver, context).await.unwrap();
|
||||||
assert!(encoded.0.starts_with(&selector));
|
assert!(encoded.0.starts_with(&selector));
|
||||||
|
|
||||||
type T = (alloy::primitives::Address,);
|
type T = (alloy_primitives::Address,);
|
||||||
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
|
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
decoded.0,
|
decoded.0,
|
||||||
@@ -1171,7 +1162,7 @@ mod tests {
|
|||||||
let encoded = input.encoded_input(&resolver, context).await.unwrap();
|
let encoded = input.encoded_input(&resolver, context).await.unwrap();
|
||||||
assert!(encoded.0.starts_with(&selector));
|
assert!(encoded.0.starts_with(&selector));
|
||||||
|
|
||||||
type T = (alloy::primitives::Address,);
|
type T = (alloy_primitives::Address,);
|
||||||
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
|
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
decoded.0,
|
decoded.0,
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ use std::pin::Pin;
|
|||||||
|
|
||||||
use alloy::eips::BlockNumberOrTag;
|
use alloy::eips::BlockNumberOrTag;
|
||||||
use alloy::json_abi::JsonAbi;
|
use alloy::json_abi::JsonAbi;
|
||||||
use alloy::primitives::TxHash;
|
|
||||||
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
||||||
|
use alloy_primitives::TxHash;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
use crate::metadata::{ContractIdent, ContractInstance};
|
use crate::metadata::{ContractIdent, ContractInstance};
|
||||||
|
|||||||
@@ -12,11 +12,9 @@ rust-version.workspace = true
|
|||||||
revive-common = { workspace = true }
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
futures = { workspace = true }
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|||||||
@@ -3,39 +3,22 @@
|
|||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use alloy::network::Ethereum;
|
|
||||||
use alloy::primitives::{Address, StorageKey, TxHash, U256};
|
use alloy::primitives::{Address, StorageKey, TxHash, U256};
|
||||||
use alloy::providers::DynProvider;
|
|
||||||
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
||||||
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
use futures::Stream;
|
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_report::MinedBlockInformation;
|
|
||||||
|
|
||||||
/// An interface for all interactions with Ethereum compatible nodes.
|
/// An interface for all interactions with Ethereum compatible nodes.
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
pub trait EthereumNode {
|
pub trait EthereumNode {
|
||||||
/// A function to run post spawning the nodes and before any transactions are run on the node.
|
|
||||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>;
|
|
||||||
|
|
||||||
fn id(&self) -> usize;
|
fn id(&self) -> usize;
|
||||||
|
|
||||||
/// Returns the nodes connection string.
|
/// Returns the nodes connection string.
|
||||||
fn connection_string(&self) -> &str;
|
fn connection_string(&self) -> &str;
|
||||||
|
|
||||||
fn submit_transaction(
|
|
||||||
&self,
|
|
||||||
transaction: TransactionRequest,
|
|
||||||
) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>;
|
|
||||||
|
|
||||||
fn get_receipt(
|
|
||||||
&self,
|
|
||||||
tx_hash: TxHash,
|
|
||||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
|
||||||
|
|
||||||
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
||||||
fn execute_transaction(
|
fn execute_transaction(
|
||||||
&self,
|
&self,
|
||||||
@@ -67,17 +50,4 @@ pub trait EthereumNode {
|
|||||||
|
|
||||||
/// Returns the EVM version of the node.
|
/// Returns the EVM version of the node.
|
||||||
fn evm_version(&self) -> EVMVersion;
|
fn evm_version(&self) -> EVMVersion;
|
||||||
|
|
||||||
/// Returns a stream of the blocks that were mined by the node.
|
|
||||||
fn subscribe_to_full_blocks_information(
|
|
||||||
&self,
|
|
||||||
) -> Pin<
|
|
||||||
Box<
|
|
||||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
|
||||||
+ '_,
|
|
||||||
>,
|
|
||||||
>;
|
|
||||||
|
|
||||||
fn provider(&self)
|
|
||||||
-> Pin<Box<dyn Future<Output = anyhow::Result<DynProvider<Ethereum>>> + '_>>;
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,9 +11,7 @@ rust-version.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
futures = { workspace = true }
|
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
tower = { workspace = true }
|
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|
||||||
revive-common = { workspace = true }
|
revive-common = { workspace = true }
|
||||||
@@ -21,7 +19,6 @@ revive-dt-common = { workspace = true }
|
|||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
revive-dt-node-interaction = { workspace = true }
|
revive-dt-node-interaction = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
|
||||||
|
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
@@ -30,8 +27,6 @@ serde_yaml_ng = { workspace = true }
|
|||||||
|
|
||||||
sp-core = { workspace = true }
|
sp-core = { workspace = true }
|
||||||
sp-runtime = { workspace = true }
|
sp-runtime = { workspace = true }
|
||||||
subxt = { workspace = true }
|
|
||||||
zombienet-sdk = { workspace = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
temp-dir = { workspace = true }
|
temp-dir = { workspace = true }
|
||||||
|
|||||||
@@ -0,0 +1,78 @@
|
|||||||
|
use alloy::{
|
||||||
|
network::{Network, TransactionBuilder},
|
||||||
|
providers::{
|
||||||
|
Provider, SendableTx,
|
||||||
|
fillers::{GasFiller, TxFiller},
|
||||||
|
},
|
||||||
|
transports::TransportResult,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct FallbackGasFiller {
|
||||||
|
inner: GasFiller,
|
||||||
|
default_gas_limit: u64,
|
||||||
|
default_max_fee_per_gas: u128,
|
||||||
|
default_priority_fee: u128,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FallbackGasFiller {
|
||||||
|
pub fn new(
|
||||||
|
default_gas_limit: u64,
|
||||||
|
default_max_fee_per_gas: u128,
|
||||||
|
default_priority_fee: u128,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: GasFiller,
|
||||||
|
default_gas_limit,
|
||||||
|
default_max_fee_per_gas,
|
||||||
|
default_priority_fee,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N> TxFiller<N> for FallbackGasFiller
|
||||||
|
where
|
||||||
|
N: Network,
|
||||||
|
{
|
||||||
|
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
|
||||||
|
|
||||||
|
fn status(
|
||||||
|
&self,
|
||||||
|
tx: &<N as Network>::TransactionRequest,
|
||||||
|
) -> alloy::providers::fillers::FillerControlFlow {
|
||||||
|
<GasFiller as TxFiller<N>>::status(&self.inner, tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
|
||||||
|
|
||||||
|
async fn prepare<P: Provider<N>>(
|
||||||
|
&self,
|
||||||
|
provider: &P,
|
||||||
|
tx: &<N as Network>::TransactionRequest,
|
||||||
|
) -> TransportResult<Self::Fillable> {
|
||||||
|
// Try to fetch GasFiller’s “fillable” (gas_price, base_fee, estimate_gas, …)
|
||||||
|
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it.
|
||||||
|
match self.inner.prepare(provider, tx).await {
|
||||||
|
Ok(fill) => Ok(Some(fill)),
|
||||||
|
Err(_) => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fill(
|
||||||
|
&self,
|
||||||
|
fillable: Self::Fillable,
|
||||||
|
mut tx: alloy::providers::SendableTx<N>,
|
||||||
|
) -> TransportResult<SendableTx<N>> {
|
||||||
|
if let Some(fill) = fillable {
|
||||||
|
// our inner GasFiller succeeded — use it
|
||||||
|
self.inner.fill(fill, tx).await
|
||||||
|
} else {
|
||||||
|
if let Some(builder) = tx.as_mut_builder() {
|
||||||
|
builder.set_gas_limit(self.default_gas_limit);
|
||||||
|
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
|
||||||
|
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
|
||||||
|
}
|
||||||
|
Ok(tx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,10 +1,5 @@
|
|||||||
use alloy::primitives::ChainId;
|
|
||||||
|
|
||||||
/// This constant defines how much Wei accounts are pre-seeded with in genesis.
|
/// This constant defines how much Wei accounts are pre-seeded with in genesis.
|
||||||
///
|
///
|
||||||
/// Note: After changing this number, check that the tests for substrate work as we encountered
|
/// Note: After changing this number, check that the tests for substrate work as we encountered
|
||||||
/// some issues with different values of the initial balance on substrate.
|
/// some issues with different values of the initial balance on substrate.
|
||||||
pub const INITIAL_BALANCE: u128 = 10u128.pow(37);
|
pub const INITIAL_BALANCE: u128 = 10u128.pow(37);
|
||||||
|
|
||||||
/// The chain id used for all of the chains spawned by the framework.
|
|
||||||
pub const CHAIN_ID: ChainId = 420420420;
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
use std::{
|
use std::{
|
||||||
fs::{File, create_dir_all, remove_dir_all},
|
fs::{File, create_dir_all, remove_dir_all},
|
||||||
io::Read,
|
io::Read,
|
||||||
|
ops::ControlFlow,
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
process::{Command, Stdio},
|
process::{Command, Stdio},
|
||||||
@@ -19,34 +20,32 @@ use alloy::{
|
|||||||
network::{Ethereum, EthereumWallet, NetworkWallet},
|
network::{Ethereum, EthereumWallet, NetworkWallet},
|
||||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||||
providers::{
|
providers::{
|
||||||
Provider,
|
Provider, ProviderBuilder,
|
||||||
ext::DebugApi,
|
ext::DebugApi,
|
||||||
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
|
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
|
||||||
},
|
},
|
||||||
rpc::types::{
|
rpc::types::{
|
||||||
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
EIP1186AccountProofResponse, TransactionRequest,
|
||||||
trace::geth::{
|
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
||||||
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use futures::{FutureExt, Stream, StreamExt};
|
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use tokio::sync::OnceCell;
|
use tracing::{Instrument, instrument};
|
||||||
use tracing::{error, instrument};
|
|
||||||
|
|
||||||
use revive_dt_common::fs::clear_directory;
|
use revive_dt_common::{
|
||||||
|
fs::clear_directory,
|
||||||
|
futures::{PollingWaitBehavior, poll},
|
||||||
|
};
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
use revive_dt_report::{EthereumMinedBlockInformation, MinedBlockInformation};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Node,
|
Node,
|
||||||
constants::{CHAIN_ID, INITIAL_BALANCE},
|
common::FallbackGasFiller,
|
||||||
helpers::{Process, ProcessReadinessWaitBehavior},
|
constants::INITIAL_BALANCE,
|
||||||
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
|
process::{Process, ProcessReadinessWaitBehavior},
|
||||||
};
|
};
|
||||||
|
|
||||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||||
@@ -71,8 +70,7 @@ pub struct GethNode {
|
|||||||
start_timeout: Duration,
|
start_timeout: Duration,
|
||||||
wallet: Arc<EthereumWallet>,
|
wallet: Arc<EthereumWallet>,
|
||||||
nonce_manager: CachedNonceManager,
|
nonce_manager: CachedNonceManager,
|
||||||
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
chain_id_filler: ChainIdFiller,
|
||||||
use_fallback_gas_filler: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl GethNode {
|
impl GethNode {
|
||||||
@@ -86,12 +84,17 @@ impl GethNode {
|
|||||||
const READY_MARKER: &str = "IPC endpoint opened";
|
const READY_MARKER: &str = "IPC endpoint opened";
|
||||||
const ERROR_MARKER: &str = "Fatal:";
|
const ERROR_MARKER: &str = "Fatal:";
|
||||||
|
|
||||||
|
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
|
||||||
|
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
|
||||||
|
|
||||||
|
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||||
|
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||||
+ AsRef<WalletConfiguration>
|
+ AsRef<WalletConfiguration>
|
||||||
+ AsRef<GethConfiguration>
|
+ AsRef<GethConfiguration>
|
||||||
+ Clone,
|
+ Clone,
|
||||||
use_fallback_gas_filler: bool,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let working_directory_configuration =
|
let working_directory_configuration =
|
||||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||||
@@ -116,15 +119,14 @@ impl GethNode {
|
|||||||
handle: None,
|
handle: None,
|
||||||
start_timeout: geth_configuration.start_timeout_ms,
|
start_timeout: geth_configuration.start_timeout_ms,
|
||||||
wallet: wallet.clone(),
|
wallet: wallet.clone(),
|
||||||
|
chain_id_filler: Default::default(),
|
||||||
nonce_manager: Default::default(),
|
nonce_manager: Default::default(),
|
||||||
provider: Default::default(),
|
|
||||||
use_fallback_gas_filler,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create the node directory and call `geth init` to configure the genesis.
|
/// Create the node directory and call `geth init` to configure the genesis.
|
||||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn init(&mut self, genesis: Genesis) -> anyhow::Result<&mut Self> {
|
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||||
let _ = clear_directory(&self.base_directory);
|
let _ = clear_directory(&self.base_directory);
|
||||||
let _ = clear_directory(&self.logs_directory);
|
let _ = clear_directory(&self.logs_directory);
|
||||||
|
|
||||||
@@ -133,7 +135,16 @@ impl GethNode {
|
|||||||
create_dir_all(&self.logs_directory)
|
create_dir_all(&self.logs_directory)
|
||||||
.context("Failed to create logs directory for geth node")?;
|
.context("Failed to create logs directory for geth node")?;
|
||||||
|
|
||||||
let genesis = Self::node_genesis(genesis, self.wallet.as_ref());
|
for signer_address in
|
||||||
|
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
|
||||||
|
{
|
||||||
|
// Note, the use of the entry API here means that we only modify the entries for any
|
||||||
|
// account that is not in the `alloc` field of the genesis state.
|
||||||
|
genesis
|
||||||
|
.alloc
|
||||||
|
.entry(signer_address)
|
||||||
|
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
||||||
|
}
|
||||||
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
||||||
serde_json::to_writer(
|
serde_json::to_writer(
|
||||||
File::create(&genesis_path).context("Failed to create geth genesis file")?,
|
File::create(&genesis_path).context("Failed to create geth genesis file")?,
|
||||||
@@ -224,7 +235,7 @@ impl GethNode {
|
|||||||
match process {
|
match process {
|
||||||
Ok(process) => self.handle = Some(process),
|
Ok(process) => self.handle = Some(process),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!(?err, "Failed to start geth, shutting down gracefully");
|
tracing::error!(?err, "Failed to start geth, shutting down gracefully");
|
||||||
self.shutdown()
|
self.shutdown()
|
||||||
.context("Failed to gracefully shutdown after geth start error")?;
|
.context("Failed to gracefully shutdown after geth start error")?;
|
||||||
return Err(err);
|
return Err(err);
|
||||||
@@ -234,40 +245,27 @@ impl GethNode {
|
|||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
|
async fn provider(
|
||||||
self.provider
|
&self,
|
||||||
.get_or_try_init(|| async move {
|
) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
|
||||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
{
|
||||||
self.connection_string.as_str(),
|
ProviderBuilder::new()
|
||||||
FallbackGasFiller::default()
|
.disable_recommended_fillers()
|
||||||
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
.filler(FallbackGasFiller::new(
|
||||||
ChainIdFiller::new(Some(CHAIN_ID)),
|
25_000_000,
|
||||||
NonceFiller::new(self.nonce_manager.clone()),
|
1_000_000_000,
|
||||||
self.wallet.clone(),
|
1_000_000_000,
|
||||||
)
|
))
|
||||||
|
.filler(self.chain_id_filler.clone())
|
||||||
|
.filler(NonceFiller::new(self.nonce_manager.clone()))
|
||||||
|
.wallet(self.wallet.clone())
|
||||||
|
.connect(&self.connection_string)
|
||||||
.await
|
.await
|
||||||
.context("Failed to construct the provider")
|
.map_err(Into::into)
|
||||||
})
|
|
||||||
.await
|
|
||||||
.cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
|
|
||||||
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
|
|
||||||
genesis
|
|
||||||
.alloc
|
|
||||||
.entry(signer_address)
|
|
||||||
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
|
||||||
}
|
|
||||||
genesis
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EthereumNode for GethNode {
|
impl EthereumNode for GethNode {
|
||||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
|
|
||||||
Box::pin(async move { Ok(()) })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn id(&self) -> usize {
|
fn id(&self) -> usize {
|
||||||
self.id as _
|
self.id as _
|
||||||
}
|
}
|
||||||
@@ -282,64 +280,69 @@ impl EthereumNode for GethNode {
|
|||||||
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||||
err,
|
err,
|
||||||
)]
|
)]
|
||||||
fn submit_transaction(
|
fn execute_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: TransactionRequest,
|
transaction: TransactionRequest,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::TransactionReceipt>> + '_>>
|
||||||
|
{
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let provider = self
|
let provider = self
|
||||||
.provider()
|
.provider()
|
||||||
.await
|
.await
|
||||||
.context("Failed to create the provider for transaction submission")?;
|
.context("Failed to create provider for transaction submission")?;
|
||||||
|
|
||||||
let pending_transaction = provider
|
let pending_transaction = provider
|
||||||
.send_transaction(transaction)
|
.send_transaction(transaction)
|
||||||
.await
|
.await
|
||||||
.context("Failed to submit the transaction through the provider")?;
|
.inspect_err(
|
||||||
Ok(*pending_transaction.tx_hash())
|
|err| tracing::error!(%err, "Encountered an error when submitting the transaction"),
|
||||||
})
|
)
|
||||||
}
|
.context("Failed to submit transaction to geth node")?;
|
||||||
|
let transaction_hash = *pending_transaction.tx_hash();
|
||||||
|
|
||||||
#[instrument(
|
// The following is a fix for the "transaction indexing is in progress" error that we used
|
||||||
level = "info",
|
// to get. You can find more information on this in the following GH issue in geth
|
||||||
skip_all,
|
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
||||||
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
// before we can get the receipt of the transaction it needs to have been indexed by the
|
||||||
err,
|
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it
|
||||||
)]
|
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was
|
||||||
fn get_receipt(
|
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which
|
||||||
&self,
|
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to
|
||||||
tx_hash: TxHash,
|
// implement a retry mechanism for the receipt to keep retrying to get it until it
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
// eventually works, but we only do that if the error we get back is the "transaction
|
||||||
Box::pin(async move {
|
// indexing is in progress" error or if the receipt is None.
|
||||||
self.provider()
|
//
|
||||||
.await
|
// Getting the transaction indexed and taking a receipt can take a long time especially when
|
||||||
.context("Failed to create provider for getting the receipt")?
|
// a lot of transactions are being submitted to the node. Thus, while initially we only
|
||||||
.get_transaction_receipt(tx_hash)
|
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for
|
||||||
.await
|
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential
|
||||||
.context("Failed to get the receipt of the transaction")?
|
// backoff each time we attempt to get the receipt and find that it's not available.
|
||||||
.context("Failed to get the receipt of the transaction")
|
let provider = Arc::new(provider);
|
||||||
})
|
poll(
|
||||||
|
Self::RECEIPT_POLLING_DURATION,
|
||||||
|
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||||
|
move || {
|
||||||
|
let provider = provider.clone();
|
||||||
|
async move {
|
||||||
|
match provider.get_transaction_receipt(transaction_hash).await {
|
||||||
|
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||||
|
Ok(None) => Ok(ControlFlow::Continue(())),
|
||||||
|
Err(error) => {
|
||||||
|
let error_string = error.to_string();
|
||||||
|
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
|
||||||
|
true => Ok(ControlFlow::Continue(())),
|
||||||
|
false => Err(error.into()),
|
||||||
}
|
}
|
||||||
|
}
|
||||||
#[instrument(
|
}
|
||||||
level = "info",
|
}
|
||||||
skip_all,
|
},
|
||||||
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
)
|
||||||
err,
|
.instrument(tracing::info_span!(
|
||||||
)]
|
"Awaiting transaction receipt",
|
||||||
fn execute_transaction(
|
?transaction_hash
|
||||||
&self,
|
))
|
||||||
transaction: TransactionRequest,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider()
|
|
||||||
.await
|
.await
|
||||||
.context("Failed to create provider for transaction submission")?
|
|
||||||
.send_transaction(transaction)
|
|
||||||
.await
|
|
||||||
.context("Encountered an error when submitting a transaction")?
|
|
||||||
.get_receipt()
|
|
||||||
.await
|
|
||||||
.context("Failed to get the receipt for the transaction")
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -348,14 +351,38 @@ impl EthereumNode for GethNode {
|
|||||||
&self,
|
&self,
|
||||||
tx_hash: TxHash,
|
tx_hash: TxHash,
|
||||||
trace_options: GethDebugTracingOptions,
|
trace_options: GethDebugTracingOptions,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::trace::geth::GethTrace>> + '_>>
|
||||||
|
{
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
|
let provider = Arc::new(
|
||||||
self.provider()
|
self.provider()
|
||||||
.await
|
.await
|
||||||
.context("Failed to create provider for tracing")?
|
.context("Failed to create provider for tracing")?,
|
||||||
|
);
|
||||||
|
poll(
|
||||||
|
Self::TRACE_POLLING_DURATION,
|
||||||
|
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||||
|
move || {
|
||||||
|
let provider = provider.clone();
|
||||||
|
let trace_options = trace_options.clone();
|
||||||
|
async move {
|
||||||
|
match provider
|
||||||
.debug_trace_transaction(tx_hash, trace_options)
|
.debug_trace_transaction(tx_hash, trace_options)
|
||||||
.await
|
.await
|
||||||
.context("Failed to get the transaction trace")
|
{
|
||||||
|
Ok(trace) => Ok(ControlFlow::Break(trace)),
|
||||||
|
Err(error) => {
|
||||||
|
let error_string = error.to_string();
|
||||||
|
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
|
||||||
|
true => Ok(ControlFlow::Continue(())),
|
||||||
|
false => Err(error.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -429,68 +456,14 @@ impl EthereumNode for GethNode {
|
|||||||
fn evm_version(&self) -> EVMVersion {
|
fn evm_version(&self) -> EVMVersion {
|
||||||
EVMVersion::Cancun
|
EVMVersion::Cancun
|
||||||
}
|
}
|
||||||
|
|
||||||
fn subscribe_to_full_blocks_information(
|
|
||||||
&self,
|
|
||||||
) -> Pin<
|
|
||||||
Box<
|
|
||||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
|
||||||
+ '_,
|
|
||||||
>,
|
|
||||||
> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let provider = self
|
|
||||||
.provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to create the provider for block subscription")?;
|
|
||||||
let block_subscription = provider.subscribe_full_blocks();
|
|
||||||
let block_stream = block_subscription
|
|
||||||
.into_stream()
|
|
||||||
.await
|
|
||||||
.context("Failed to create the block stream")?;
|
|
||||||
|
|
||||||
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
|
||||||
let block = block.ok()?;
|
|
||||||
Some(MinedBlockInformation {
|
|
||||||
ethereum_block_information: EthereumMinedBlockInformation {
|
|
||||||
block_number: block.number(),
|
|
||||||
block_timestamp: block.header.timestamp,
|
|
||||||
mined_gas: block.header.gas_used as _,
|
|
||||||
block_gas_limit: block.header.gas_limit as _,
|
|
||||||
transaction_hashes: block
|
|
||||||
.transactions
|
|
||||||
.into_hashes()
|
|
||||||
.as_hashes()
|
|
||||||
.expect("Must be hashes")
|
|
||||||
.to_vec(),
|
|
||||||
},
|
|
||||||
substrate_block_information: None,
|
|
||||||
tx_counts: Default::default(),
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(Box::pin(mined_block_information_stream)
|
|
||||||
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn provider(
|
|
||||||
&self,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
|
|
||||||
{
|
|
||||||
Box::pin(
|
|
||||||
self.provider()
|
|
||||||
.map(|provider| provider.map(|provider| provider.erased())),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct GethNodeResolver {
|
pub struct GethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
|
||||||
id: u32,
|
id: u32,
|
||||||
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
|
provider: FillProvider<F, P, Ethereum>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResolverApi for GethNodeResolver {
|
impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi for GethNodeResolver<F, P> {
|
||||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn chain_id(
|
fn chain_id(
|
||||||
&self,
|
&self,
|
||||||
@@ -667,7 +640,7 @@ mod tests {
|
|||||||
|
|
||||||
fn new_node() -> (TestExecutionContext, GethNode) {
|
fn new_node() -> (TestExecutionContext, GethNode) {
|
||||||
let context = test_config();
|
let context = test_config();
|
||||||
let mut node = GethNode::new(&context, true);
|
let mut node = GethNode::new(&context);
|
||||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||||
.expect("Failed to initialize the node")
|
.expect("Failed to initialize the node")
|
||||||
.spawn_process()
|
.spawn_process()
|
||||||
@@ -675,38 +648,12 @@ mod tests {
|
|||||||
(context, node)
|
(context, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn shared_state() -> &'static (TestExecutionContext, GethNode) {
|
|
||||||
static STATE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node);
|
|
||||||
&STATE
|
|
||||||
}
|
|
||||||
|
|
||||||
fn shared_node() -> &'static GethNode {
|
fn shared_node() -> &'static GethNode {
|
||||||
&shared_state().1
|
static NODE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node);
|
||||||
}
|
&NODE.1
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
|
||||||
// Arrange
|
|
||||||
let (context, node) = shared_state();
|
|
||||||
|
|
||||||
let account_address = context
|
|
||||||
.wallet_configuration
|
|
||||||
.wallet()
|
|
||||||
.default_signer()
|
|
||||||
.address();
|
|
||||||
let transaction = TransactionRequest::default()
|
|
||||||
.to(account_address)
|
|
||||||
.value(U256::from(100_000_000_000_000u128));
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let receipt = node.execute_transaction(transaction).await;
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let _ = receipt.expect("Failed to get the receipt for the transfer");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
fn version_works() {
|
fn version_works() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node();
|
let node = shared_node();
|
||||||
@@ -723,7 +670,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_chain_id_from_node() {
|
async fn can_get_chain_id_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node();
|
let node = shared_node();
|
||||||
@@ -737,7 +683,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_gas_limit_from_node() {
|
async fn can_get_gas_limit_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node();
|
let node = shared_node();
|
||||||
@@ -755,7 +700,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_coinbase_from_node() {
|
async fn can_get_coinbase_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node();
|
let node = shared_node();
|
||||||
@@ -773,7 +717,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_block_difficulty_from_node() {
|
async fn can_get_block_difficulty_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node();
|
let node = shared_node();
|
||||||
@@ -791,7 +734,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_block_hash_from_node() {
|
async fn can_get_block_hash_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node();
|
let node = shared_node();
|
||||||
@@ -809,7 +751,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_block_timestamp_from_node() {
|
async fn can_get_block_timestamp_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node();
|
let node = shared_node();
|
||||||
@@ -827,7 +768,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_block_number_from_node() {
|
async fn can_get_block_number_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node();
|
let node = shared_node();
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
mod process;
|
|
||||||
|
|
||||||
pub use process::*;
|
|
||||||
@@ -3,10 +3,12 @@
|
|||||||
use alloy::genesis::Genesis;
|
use alloy::genesis::Genesis;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
|
||||||
|
pub mod common;
|
||||||
pub mod constants;
|
pub mod constants;
|
||||||
pub mod helpers;
|
pub mod geth;
|
||||||
pub mod node_implementations;
|
pub mod lighthouse_geth;
|
||||||
pub mod provider_utils;
|
pub mod process;
|
||||||
|
pub mod substrate;
|
||||||
|
|
||||||
/// An abstract interface for testing nodes.
|
/// An abstract interface for testing nodes.
|
||||||
pub trait Node: EthereumNode {
|
pub trait Node: EthereumNode {
|
||||||
|
|||||||
+175
-370
@@ -9,9 +9,10 @@
|
|||||||
//! that the tool has.
|
//! that the tool has.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashSet},
|
collections::BTreeMap,
|
||||||
fs::{File, create_dir_all},
|
fs::{File, create_dir_all},
|
||||||
io::Read,
|
io::Read,
|
||||||
|
ops::ControlFlow,
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
process::{Command, Stdio},
|
process::{Command, Stdio},
|
||||||
@@ -30,36 +31,34 @@ use alloy::{
|
|||||||
Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256, address,
|
Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256, address,
|
||||||
},
|
},
|
||||||
providers::{
|
providers::{
|
||||||
Provider,
|
Provider, ProviderBuilder,
|
||||||
ext::DebugApi,
|
ext::DebugApi,
|
||||||
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
|
fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller},
|
||||||
},
|
},
|
||||||
rpc::types::{
|
rpc::types::{
|
||||||
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
EIP1186AccountProofResponse, TransactionRequest,
|
||||||
trace::geth::{
|
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
||||||
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use futures::{FutureExt, Stream, StreamExt};
|
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
use serde_with::serde_as;
|
use serde_with::serde_as;
|
||||||
use tokio::sync::OnceCell;
|
use tracing::{Instrument, instrument};
|
||||||
use tracing::{info, instrument};
|
|
||||||
|
|
||||||
use revive_dt_common::fs::clear_directory;
|
use revive_dt_common::{
|
||||||
|
fs::clear_directory,
|
||||||
|
futures::{PollingWaitBehavior, poll},
|
||||||
|
};
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
use revive_dt_report::{EthereumMinedBlockInformation, MinedBlockInformation};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Node,
|
Node,
|
||||||
constants::{CHAIN_ID, INITIAL_BALANCE},
|
common::FallbackGasFiller,
|
||||||
helpers::{Process, ProcessReadinessWaitBehavior},
|
constants::INITIAL_BALANCE,
|
||||||
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
|
process::{Process, ProcessReadinessWaitBehavior},
|
||||||
};
|
};
|
||||||
|
|
||||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||||
@@ -76,8 +75,7 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
|||||||
pub struct LighthouseGethNode {
|
pub struct LighthouseGethNode {
|
||||||
/* Node Identifier */
|
/* Node Identifier */
|
||||||
id: u32,
|
id: u32,
|
||||||
ws_connection_string: String,
|
connection_string: String,
|
||||||
http_connection_string: String,
|
|
||||||
enclave_name: String,
|
enclave_name: String,
|
||||||
|
|
||||||
/* Directory Paths */
|
/* Directory Paths */
|
||||||
@@ -93,25 +91,25 @@ pub struct LighthouseGethNode {
|
|||||||
/* Spawned Processes */
|
/* Spawned Processes */
|
||||||
process: Option<Process>,
|
process: Option<Process>,
|
||||||
|
|
||||||
/* Prefunded Account Information */
|
|
||||||
prefunded_account_address: Address,
|
|
||||||
|
|
||||||
/* Provider Related Fields */
|
/* Provider Related Fields */
|
||||||
wallet: Arc<EthereumWallet>,
|
wallet: Arc<EthereumWallet>,
|
||||||
nonce_manager: CachedNonceManager,
|
nonce_manager: CachedNonceManager,
|
||||||
|
chain_id_filler: ChainIdFiller,
|
||||||
persistent_http_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
|
||||||
persistent_ws_provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
|
||||||
|
|
||||||
use_fallback_gas_filler: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LighthouseGethNode {
|
impl LighthouseGethNode {
|
||||||
const BASE_DIRECTORY: &str = "lighthouse";
|
const BASE_DIRECTORY: &str = "lighthouse";
|
||||||
const LOGS_DIRECTORY: &str = "logs";
|
const LOGS_DIRECTORY: &str = "logs";
|
||||||
|
|
||||||
|
const IPC_FILE_NAME: &str = "geth.ipc";
|
||||||
const CONFIG_FILE_NAME: &str = "config.yaml";
|
const CONFIG_FILE_NAME: &str = "config.yaml";
|
||||||
|
|
||||||
|
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
|
||||||
|
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
|
||||||
|
|
||||||
|
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||||
|
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete";
|
const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete";
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
@@ -119,7 +117,6 @@ impl LighthouseGethNode {
|
|||||||
+ AsRef<WalletConfiguration>
|
+ AsRef<WalletConfiguration>
|
||||||
+ AsRef<KurtosisConfiguration>
|
+ AsRef<KurtosisConfiguration>
|
||||||
+ Clone,
|
+ Clone,
|
||||||
use_fallback_gas_filler: bool,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let working_directory_configuration =
|
let working_directory_configuration =
|
||||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||||
@@ -137,8 +134,10 @@ impl LighthouseGethNode {
|
|||||||
Self {
|
Self {
|
||||||
/* Node Identifier */
|
/* Node Identifier */
|
||||||
id,
|
id,
|
||||||
ws_connection_string: String::default(),
|
connection_string: base_directory
|
||||||
http_connection_string: String::default(),
|
.join(Self::IPC_FILE_NAME)
|
||||||
|
.display()
|
||||||
|
.to_string(),
|
||||||
enclave_name: format!(
|
enclave_name: format!(
|
||||||
"enclave-{}-{}",
|
"enclave-{}-{}",
|
||||||
SystemTime::now()
|
SystemTime::now()
|
||||||
@@ -161,20 +160,15 @@ impl LighthouseGethNode {
|
|||||||
/* Spawned Processes */
|
/* Spawned Processes */
|
||||||
process: None,
|
process: None,
|
||||||
|
|
||||||
/* Prefunded Account Information */
|
|
||||||
prefunded_account_address: wallet.default_signer().address(),
|
|
||||||
|
|
||||||
/* Provider Related Fields */
|
/* Provider Related Fields */
|
||||||
wallet: wallet.clone(),
|
wallet: wallet.clone(),
|
||||||
nonce_manager: Default::default(),
|
nonce_manager: Default::default(),
|
||||||
persistent_http_provider: OnceCell::const_new(),
|
chain_id_filler: Default::default(),
|
||||||
persistent_ws_provider: OnceCell::const_new(),
|
|
||||||
use_fallback_gas_filler,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create the node directory and call `geth init` to configure the genesis.
|
/// Create the node directory and call `geth init` to configure the genesis.
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
|
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
|
||||||
self.init_directories()
|
self.init_directories()
|
||||||
.context("Failed to initialize the directories of the Lighthouse Geth node.")?;
|
.context("Failed to initialize the directories of the Lighthouse Geth node.")?;
|
||||||
@@ -204,12 +198,10 @@ impl LighthouseGethNode {
|
|||||||
execution_layer_extra_parameters: vec![
|
execution_layer_extra_parameters: vec![
|
||||||
"--nodiscover".to_string(),
|
"--nodiscover".to_string(),
|
||||||
"--cache=4096".to_string(),
|
"--cache=4096".to_string(),
|
||||||
"--txlookuplimit=0".to_string(),
|
"--txpool.globalslots=100000".to_string(),
|
||||||
"--gcmode=archive".to_string(),
|
"--txpool.globalqueue=100000".to_string(),
|
||||||
"--txpool.globalslots=500000".to_string(),
|
"--txpool.accountslots=128".to_string(),
|
||||||
"--txpool.globalqueue=500000".to_string(),
|
"--txpool.accountqueue=1024".to_string(),
|
||||||
"--txpool.accountslots=32768".to_string(),
|
|
||||||
"--txpool.accountqueue=32768".to_string(),
|
|
||||||
"--http.api=admin,engine,net,eth,web3,debug,txpool".to_string(),
|
"--http.api=admin,engine,net,eth,web3,debug,txpool".to_string(),
|
||||||
"--http.addr=0.0.0.0".to_string(),
|
"--http.addr=0.0.0.0".to_string(),
|
||||||
"--ws".to_string(),
|
"--ws".to_string(),
|
||||||
@@ -217,17 +209,15 @@ impl LighthouseGethNode {
|
|||||||
"--ws.port=8546".to_string(),
|
"--ws.port=8546".to_string(),
|
||||||
"--ws.api=eth,net,web3,txpool,engine".to_string(),
|
"--ws.api=eth,net,web3,txpool,engine".to_string(),
|
||||||
"--ws.origins=*".to_string(),
|
"--ws.origins=*".to_string(),
|
||||||
"--miner.gaslimit=30000000".to_string(),
|
|
||||||
],
|
],
|
||||||
consensus_layer_extra_parameters: vec![
|
consensus_layer_extra_parameters: vec![
|
||||||
"--disable-quic".to_string(),
|
|
||||||
"--disable-deposit-contract-sync".to_string(),
|
"--disable-deposit-contract-sync".to_string(),
|
||||||
],
|
],
|
||||||
}],
|
}],
|
||||||
network_parameters: NetworkParameters {
|
network_parameters: NetworkParameters {
|
||||||
preset: NetworkPreset::Mainnet,
|
preset: NetworkPreset::Mainnet,
|
||||||
seconds_per_slot: 12,
|
seconds_per_slot: 12,
|
||||||
network_id: CHAIN_ID,
|
network_id: 420420420,
|
||||||
deposit_contract_address: address!("0x00000000219ab540356cBB839Cbe05303d7705Fa"),
|
deposit_contract_address: address!("0x00000000219ab540356cBB839Cbe05303d7705Fa"),
|
||||||
altair_fork_epoch: 0,
|
altair_fork_epoch: 0,
|
||||||
bellatrix_fork_epoch: 0,
|
bellatrix_fork_epoch: 0,
|
||||||
@@ -238,13 +228,17 @@ impl LighthouseGethNode {
|
|||||||
num_validator_keys_per_node: 64,
|
num_validator_keys_per_node: 64,
|
||||||
genesis_delay: 10,
|
genesis_delay: 10,
|
||||||
prefunded_accounts: {
|
prefunded_accounts: {
|
||||||
let map = std::iter::once(self.prefunded_account_address)
|
let map = NetworkWallet::<Ethereum>::signer_addresses(&self.wallet)
|
||||||
.map(|address| (address, GenesisAccount::default().with_balance(U256::MAX)))
|
.map(|address| {
|
||||||
|
(
|
||||||
|
address,
|
||||||
|
GenesisAccount::default()
|
||||||
|
.with_balance(INITIAL_BALANCE.try_into().unwrap()),
|
||||||
|
)
|
||||||
|
})
|
||||||
.collect::<BTreeMap<_, _>>();
|
.collect::<BTreeMap<_, _>>();
|
||||||
serde_json::to_string(&map).unwrap()
|
serde_json::to_string(&map).unwrap()
|
||||||
},
|
},
|
||||||
gas_limit: 30_000_000,
|
|
||||||
genesis_gaslimit: 30_000_000,
|
|
||||||
},
|
},
|
||||||
wait_for_finalization: false,
|
wait_for_finalization: false,
|
||||||
port_publisher: Some(PortPublisherParameters {
|
port_publisher: Some(PortPublisherParameters {
|
||||||
@@ -254,12 +248,7 @@ impl LighthouseGethNode {
|
|||||||
public_port_start: Some(32000 + self.id as u16 * 1000),
|
public_port_start: Some(32000 + self.id as u16 * 1000),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
consensus_layer_port_publisher_parameters: Some(
|
consensus_layer_port_publisher_parameters: Default::default(),
|
||||||
PortPublisherSingleItemParameters {
|
|
||||||
enabled: Some(true),
|
|
||||||
public_port_start: Some(59010 + self.id as u16 * 50),
|
|
||||||
},
|
|
||||||
),
|
|
||||||
}),
|
}),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -272,7 +261,7 @@ impl LighthouseGethNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Spawn the go-ethereum node child process.
|
/// Spawn the go-ethereum node child process.
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
|
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
|
||||||
let process = Process::new(
|
let process = Process::new(
|
||||||
None,
|
None,
|
||||||
@@ -303,7 +292,6 @@ impl LighthouseGethNode {
|
|||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.context("Failed to spawn the kurtosis enclave")
|
|
||||||
.inspect_err(|err| {
|
.inspect_err(|err| {
|
||||||
tracing::error!(?err, "Failed to spawn Kurtosis");
|
tracing::error!(?err, "Failed to spawn Kurtosis");
|
||||||
self.shutdown().expect("Failed to shutdown kurtosis");
|
self.shutdown().expect("Failed to shutdown kurtosis");
|
||||||
@@ -328,256 +316,164 @@ impl LighthouseGethNode {
|
|||||||
stdout
|
stdout
|
||||||
};
|
};
|
||||||
|
|
||||||
self.http_connection_string = stdout
|
self.connection_string = stdout
|
||||||
.split("el-1-geth-lighthouse")
|
|
||||||
.nth(1)
|
|
||||||
.and_then(|str| str.split(" rpc").nth(1))
|
|
||||||
.and_then(|str| str.split("->").nth(1))
|
|
||||||
.and_then(|str| str.split("\n").next())
|
|
||||||
.and_then(|str| str.trim().split(" ").next())
|
|
||||||
.map(|str| format!("http://{}", str.trim()))
|
|
||||||
.context("Failed to find the HTTP connection string of Kurtosis")?;
|
|
||||||
self.ws_connection_string = stdout
|
|
||||||
.split("el-1-geth-lighthouse")
|
.split("el-1-geth-lighthouse")
|
||||||
.nth(1)
|
.nth(1)
|
||||||
.and_then(|str| str.split("ws").nth(1))
|
.and_then(|str| str.split("ws").nth(1))
|
||||||
.and_then(|str| str.split("->").nth(1))
|
.and_then(|str| str.split("->").nth(1))
|
||||||
.and_then(|str| str.split("\n").next())
|
.and_then(|str| str.split("\n").next())
|
||||||
.and_then(|str| str.trim().split(" ").next())
|
|
||||||
.map(|str| format!("ws://{}", str.trim()))
|
.map(|str| format!("ws://{}", str.trim()))
|
||||||
.context("Failed to find the WS connection string of Kurtosis")?;
|
.context("Failed to find the WS connection string of Kurtosis")?;
|
||||||
|
|
||||||
info!(
|
|
||||||
http_connection_string = self.http_connection_string,
|
|
||||||
ws_connection_string = self.ws_connection_string,
|
|
||||||
"Discovered the connection strings for the node"
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(
|
async fn provider(
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
|
|
||||||
err(Debug),
|
|
||||||
)]
|
|
||||||
#[allow(clippy::type_complexity)]
|
|
||||||
async fn ws_provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
|
|
||||||
self.persistent_ws_provider
|
|
||||||
.get_or_try_init(|| async move {
|
|
||||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
|
||||||
self.ws_connection_string.as_str(),
|
|
||||||
FallbackGasFiller::default()
|
|
||||||
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
|
||||||
ChainIdFiller::new(Some(CHAIN_ID)),
|
|
||||||
NonceFiller::new(self.nonce_manager.clone()),
|
|
||||||
self.wallet.clone(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.context("Failed to construct the provider")
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
|
|
||||||
err(Debug),
|
|
||||||
)]
|
|
||||||
#[allow(clippy::type_complexity)]
|
|
||||||
async fn http_provider(
|
|
||||||
&self,
|
&self,
|
||||||
) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
|
) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
|
||||||
self.persistent_http_provider
|
{
|
||||||
.get_or_try_init(|| async move {
|
ProviderBuilder::new()
|
||||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
.disable_recommended_fillers()
|
||||||
self.http_connection_string.as_str(),
|
.filler(FallbackGasFiller::new(
|
||||||
FallbackGasFiller::default(),
|
25_000_000,
|
||||||
ChainIdFiller::new(Some(CHAIN_ID)),
|
1_000_000_000,
|
||||||
NonceFiller::new(self.nonce_manager.clone()),
|
1_000_000_000,
|
||||||
self.wallet.clone(),
|
))
|
||||||
)
|
.filler(self.chain_id_filler.clone())
|
||||||
|
.filler(NonceFiller::new(self.nonce_manager.clone()))
|
||||||
|
.wallet(self.wallet.clone())
|
||||||
|
.connect(&self.connection_string)
|
||||||
.await
|
.await
|
||||||
.context("Failed to construct the provider")
|
.context("Failed to create the provider for Kurtosis")
|
||||||
})
|
|
||||||
.await
|
|
||||||
.cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Funds all of the accounts in the Ethereum wallet from the initially funded account.
|
|
||||||
#[instrument(
|
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
|
|
||||||
err(Debug),
|
|
||||||
)]
|
|
||||||
async fn fund_all_accounts(&self) -> anyhow::Result<()> {
|
|
||||||
let mut full_block_subscriber = self
|
|
||||||
.ws_provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to create the WS provider")?
|
|
||||||
.subscribe_full_blocks()
|
|
||||||
.into_stream()
|
|
||||||
.await
|
|
||||||
.context("Full block subscriber")?;
|
|
||||||
|
|
||||||
let mut tx_hashes = futures::future::try_join_all(
|
|
||||||
NetworkWallet::<Ethereum>::signer_addresses(self.wallet.as_ref())
|
|
||||||
.enumerate()
|
|
||||||
.map(|(nonce, address)| async move {
|
|
||||||
let mut transaction = TransactionRequest::default()
|
|
||||||
.from(self.prefunded_account_address)
|
|
||||||
.to(address)
|
|
||||||
.nonce(nonce as _)
|
|
||||||
.value(INITIAL_BALANCE.try_into().unwrap());
|
|
||||||
transaction.chain_id = Some(CHAIN_ID);
|
|
||||||
self.submit_transaction(transaction).await
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.context("Failed to submit all transactions")?
|
|
||||||
.into_iter()
|
|
||||||
.collect::<HashSet<_>>();
|
|
||||||
|
|
||||||
while let Some(block) = full_block_subscriber.next().await {
|
|
||||||
let Ok(block) = block else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let block_number = block.number();
|
|
||||||
let block_timestamp = block.header.timestamp;
|
|
||||||
let block_transaction_count = block.transactions.len();
|
|
||||||
|
|
||||||
for hash in block.transactions.into_hashes().as_hashes().unwrap() {
|
|
||||||
tx_hashes.remove(hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(
|
|
||||||
block.number = block_number,
|
|
||||||
block.timestamp = block_timestamp,
|
|
||||||
block.transaction_count = block_transaction_count,
|
|
||||||
remaining_transactions = tx_hashes.len(),
|
|
||||||
"Discovered new block when funding accounts"
|
|
||||||
);
|
|
||||||
|
|
||||||
if tx_hashes.is_empty() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
|
|
||||||
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
|
|
||||||
genesis
|
|
||||||
.alloc
|
|
||||||
.entry(signer_address)
|
|
||||||
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
|
||||||
}
|
|
||||||
genesis
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EthereumNode for LighthouseGethNode {
|
impl EthereumNode for LighthouseGethNode {
|
||||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
|
|
||||||
Box::pin(async move { self.fund_all_accounts().await })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn id(&self) -> usize {
|
fn id(&self) -> usize {
|
||||||
self.id as _
|
self.id as _
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connection_string(&self) -> &str {
|
fn connection_string(&self) -> &str {
|
||||||
&self.ws_connection_string
|
&self.connection_string
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(
|
#[instrument(
|
||||||
level = "info",
|
level = "info",
|
||||||
skip_all,
|
skip_all,
|
||||||
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
|
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||||
err,
|
|
||||||
)]
|
|
||||||
fn submit_transaction(
|
|
||||||
&self,
|
|
||||||
transaction: TransactionRequest,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let provider = self
|
|
||||||
.http_provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to create the provider for transaction submission")?;
|
|
||||||
let pending_transaction = provider
|
|
||||||
.send_transaction(transaction)
|
|
||||||
.await
|
|
||||||
.context("Failed to submit the transaction through the provider")?;
|
|
||||||
Ok(*pending_transaction.tx_hash())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
|
|
||||||
)]
|
|
||||||
fn get_receipt(
|
|
||||||
&self,
|
|
||||||
tx_hash: TxHash,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.ws_provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to create provider for getting the receipt")?
|
|
||||||
.get_transaction_receipt(tx_hash)
|
|
||||||
.await
|
|
||||||
.context("Failed to get the receipt of the transaction")?
|
|
||||||
.context("Failed to get the receipt of the transaction")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(
|
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string),
|
|
||||||
err,
|
err,
|
||||||
)]
|
)]
|
||||||
fn execute_transaction(
|
fn execute_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: TransactionRequest,
|
transaction: TransactionRequest,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::TransactionReceipt>> + '_>>
|
||||||
|
{
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
self.provider()
|
let provider = self
|
||||||
|
.provider()
|
||||||
.await
|
.await
|
||||||
.context("Failed to create provider for transaction submission")?
|
.context("Failed to create provider for transaction submission")?;
|
||||||
|
|
||||||
|
let pending_transaction = provider
|
||||||
.send_transaction(transaction)
|
.send_transaction(transaction)
|
||||||
.await
|
.await
|
||||||
.context("Encountered an error when submitting a transaction")?
|
.inspect_err(|err| {
|
||||||
.get_receipt()
|
tracing::error!(
|
||||||
|
%err,
|
||||||
|
"Encountered an error when submitting the transaction"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.context("Failed to submit transaction to geth node")?;
|
||||||
|
let transaction_hash = *pending_transaction.tx_hash();
|
||||||
|
|
||||||
|
// The following is a fix for the "transaction indexing is in progress" error that we
|
||||||
|
// used to get. You can find more information on this in the following GH issue in geth
|
||||||
|
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
||||||
|
// before we can get the receipt of the transaction it needs to have been indexed by the
|
||||||
|
// node's indexer. Just because the transaction has been confirmed it doesn't mean that
|
||||||
|
// it has been indexed. When we call alloy's `get_receipt` it checks if the transaction
|
||||||
|
// was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method
|
||||||
|
// which _might_ return the above error if the tx has not yet been indexed yet. So, we
|
||||||
|
// need to implement a retry mechanism for the receipt to keep retrying to get it until
|
||||||
|
// it eventually works, but we only do that if the error we get back is the "transaction
|
||||||
|
// indexing is in progress" error or if the receipt is None.
|
||||||
|
//
|
||||||
|
// Getting the transaction indexed and taking a receipt can take a long time especially
|
||||||
|
// when a lot of transactions are being submitted to the node. Thus, while initially we
|
||||||
|
// only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to
|
||||||
|
// allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting
|
||||||
|
// with exponential backoff each time we attempt to get the receipt and find that it's
|
||||||
|
// not available.
|
||||||
|
let provider = Arc::new(provider);
|
||||||
|
poll(
|
||||||
|
Self::RECEIPT_POLLING_DURATION,
|
||||||
|
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||||
|
move || {
|
||||||
|
let provider = provider.clone();
|
||||||
|
async move {
|
||||||
|
match provider.get_transaction_receipt(transaction_hash).await {
|
||||||
|
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||||
|
Ok(None) => Ok(ControlFlow::Continue(())),
|
||||||
|
Err(error) => {
|
||||||
|
let error_string = error.to_string();
|
||||||
|
match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) {
|
||||||
|
true => Ok(ControlFlow::Continue(())),
|
||||||
|
false => Err(error.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.instrument(tracing::info_span!(
|
||||||
|
"Awaiting transaction receipt",
|
||||||
|
?transaction_hash
|
||||||
|
))
|
||||||
.await
|
.await
|
||||||
.context("Failed to get the receipt for the transaction")
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn trace_transaction(
|
fn trace_transaction(
|
||||||
&self,
|
&self,
|
||||||
tx_hash: TxHash,
|
tx_hash: TxHash,
|
||||||
trace_options: GethDebugTracingOptions,
|
trace_options: GethDebugTracingOptions,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::rpc::types::trace::geth::GethTrace>> + '_>>
|
||||||
|
{
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
|
let provider = Arc::new(
|
||||||
self.provider()
|
self.provider()
|
||||||
.await
|
.await
|
||||||
.context("Failed to create provider for tracing")?
|
.context("Failed to create provider for tracing")?,
|
||||||
|
);
|
||||||
|
poll(
|
||||||
|
Self::TRACE_POLLING_DURATION,
|
||||||
|
PollingWaitBehavior::Constant(Duration::from_millis(200)),
|
||||||
|
move || {
|
||||||
|
let provider = provider.clone();
|
||||||
|
let trace_options = trace_options.clone();
|
||||||
|
async move {
|
||||||
|
match provider
|
||||||
.debug_trace_transaction(tx_hash, trace_options)
|
.debug_trace_transaction(tx_hash, trace_options)
|
||||||
.await
|
.await
|
||||||
.context("Failed to get the transaction trace")
|
{
|
||||||
|
Ok(trace) => Ok(ControlFlow::Break(trace)),
|
||||||
|
Err(error) => {
|
||||||
|
let error_string = error.to_string();
|
||||||
|
match error_string.contains(Self::TRANSACTION_TRACING_ERROR) {
|
||||||
|
true => Ok(ControlFlow::Continue(())),
|
||||||
|
false => Err(error.into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn state_diff(
|
fn state_diff(
|
||||||
&self,
|
&self,
|
||||||
tx_hash: TxHash,
|
tx_hash: TxHash,
|
||||||
@@ -601,13 +497,13 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn balance_of(
|
fn balance_of(
|
||||||
&self,
|
&self,
|
||||||
address: Address,
|
address: Address,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
self.ws_provider()
|
self.provider()
|
||||||
.await
|
.await
|
||||||
.context("Failed to get the Geth provider")?
|
.context("Failed to get the Geth provider")?
|
||||||
.get_balance(address)
|
.get_balance(address)
|
||||||
@@ -616,14 +512,14 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn latest_state_proof(
|
fn latest_state_proof(
|
||||||
&self,
|
&self,
|
||||||
address: Address,
|
address: Address,
|
||||||
keys: Vec<StorageKey>,
|
keys: Vec<StorageKey>,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
self.ws_provider()
|
self.provider()
|
||||||
.await
|
.await
|
||||||
.context("Failed to get the Geth provider")?
|
.context("Failed to get the Geth provider")?
|
||||||
.get_proof(address, keys)
|
.get_proof(address, keys)
|
||||||
@@ -633,13 +529,13 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
// #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn resolver(
|
fn resolver(
|
||||||
&self,
|
&self,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let id = self.id;
|
let id = self.id;
|
||||||
let provider = self.ws_provider().await?;
|
let provider = self.provider().await?;
|
||||||
Ok(Arc::new(LighthouseGethNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
Ok(Arc::new(LighthouseGethNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -647,57 +543,6 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
fn evm_version(&self) -> EVMVersion {
|
fn evm_version(&self) -> EVMVersion {
|
||||||
EVMVersion::Cancun
|
EVMVersion::Cancun
|
||||||
}
|
}
|
||||||
|
|
||||||
fn subscribe_to_full_blocks_information(
|
|
||||||
&self,
|
|
||||||
) -> Pin<
|
|
||||||
Box<
|
|
||||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
|
||||||
+ '_,
|
|
||||||
>,
|
|
||||||
> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let provider = self.ws_provider().await?;
|
|
||||||
let block_subscription = provider.subscribe_full_blocks().channel_size(1024);
|
|
||||||
let block_stream = block_subscription
|
|
||||||
.into_stream()
|
|
||||||
.await
|
|
||||||
.context("Failed to create the block stream")?;
|
|
||||||
|
|
||||||
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
|
||||||
let block = block.ok()?;
|
|
||||||
Some(MinedBlockInformation {
|
|
||||||
ethereum_block_information: EthereumMinedBlockInformation {
|
|
||||||
block_number: block.number(),
|
|
||||||
block_timestamp: block.header.timestamp,
|
|
||||||
mined_gas: block.header.gas_used as _,
|
|
||||||
block_gas_limit: block.header.gas_limit as _,
|
|
||||||
transaction_hashes: block
|
|
||||||
.transactions
|
|
||||||
.into_hashes()
|
|
||||||
.as_hashes()
|
|
||||||
.expect("Must be hashes")
|
|
||||||
.to_vec(),
|
|
||||||
},
|
|
||||||
substrate_block_information: None,
|
|
||||||
tx_counts: Default::default(),
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(Box::pin(mined_block_information_stream)
|
|
||||||
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn provider(
|
|
||||||
&self,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
|
|
||||||
{
|
|
||||||
Box::pin(
|
|
||||||
self.http_provider()
|
|
||||||
.map(|provider| provider.map(|provider| provider.erased())),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
|
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
|
||||||
@@ -708,14 +553,14 @@ pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereu
|
|||||||
impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
|
impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
|
||||||
for LighthouseGethNodeResolver<F, P>
|
for LighthouseGethNodeResolver<F, P>
|
||||||
{
|
{
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn chain_id(
|
fn chain_id(
|
||||||
&self,
|
&self,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
|
||||||
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
|
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn transaction_gas_price(
|
fn transaction_gas_price(
|
||||||
&self,
|
&self,
|
||||||
tx_hash: TxHash,
|
tx_hash: TxHash,
|
||||||
@@ -729,7 +574,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn block_gas_limit(
|
fn block_gas_limit(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
@@ -744,7 +589,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn block_coinbase(
|
fn block_coinbase(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
@@ -759,7 +604,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn block_difficulty(
|
fn block_difficulty(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
@@ -774,7 +619,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn block_base_fee(
|
fn block_base_fee(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
@@ -794,7 +639,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn block_hash(
|
fn block_hash(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
@@ -809,7 +654,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn block_timestamp(
|
fn block_timestamp(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
@@ -824,55 +669,29 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
|
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
|
||||||
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
|
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Node for LighthouseGethNode {
|
impl Node for LighthouseGethNode {
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||||
let mut child = Command::new(self.kurtosis_binary_path.as_path())
|
if !Command::new(self.kurtosis_binary_path.as_path())
|
||||||
.arg("enclave")
|
.arg("enclave")
|
||||||
.arg("rm")
|
.arg("rm")
|
||||||
.arg("-f")
|
.arg("-f")
|
||||||
.arg(self.enclave_name.as_str())
|
.arg(self.enclave_name.as_str())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::null())
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::null())
|
||||||
.spawn()
|
.spawn()
|
||||||
.expect("Failed to spawn the enclave kill command");
|
.expect("Failed to spawn the enclave kill command")
|
||||||
|
|
||||||
if !child
|
|
||||||
.wait()
|
.wait()
|
||||||
.expect("Failed to wait for the enclave kill command")
|
.expect("Failed to wait for the enclave kill command")
|
||||||
.success()
|
.success()
|
||||||
{
|
{
|
||||||
let stdout = {
|
panic!("Failed to shut down the enclave {}", self.enclave_name)
|
||||||
let mut stdout = String::default();
|
|
||||||
child
|
|
||||||
.stdout
|
|
||||||
.take()
|
|
||||||
.expect("Should be piped")
|
|
||||||
.read_to_string(&mut stdout)
|
|
||||||
.context("Failed to read stdout of kurtosis inspect to string")?;
|
|
||||||
stdout
|
|
||||||
};
|
|
||||||
let stderr = {
|
|
||||||
let mut stderr = String::default();
|
|
||||||
child
|
|
||||||
.stderr
|
|
||||||
.take()
|
|
||||||
.expect("Should be piped")
|
|
||||||
.read_to_string(&mut stderr)
|
|
||||||
.context("Failed to read stderr of kurtosis inspect to string")?;
|
|
||||||
stderr
|
|
||||||
};
|
|
||||||
|
|
||||||
panic!(
|
|
||||||
"Failed to shut down the enclave {} - stdout: {stdout}, stderr: {stderr}",
|
|
||||||
self.enclave_name
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
drop(self.process.take());
|
drop(self.process.take());
|
||||||
@@ -880,13 +699,13 @@ impl Node for LighthouseGethNode {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||||
self.init(genesis)?.spawn_process()?;
|
self.init(genesis)?.spawn_process()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn version(&self) -> anyhow::Result<String> {
|
fn version(&self) -> anyhow::Result<String> {
|
||||||
let output = Command::new(&self.kurtosis_binary_path)
|
let output = Command::new(&self.kurtosis_binary_path)
|
||||||
.arg("version")
|
.arg("version")
|
||||||
@@ -903,7 +722,7 @@ impl Node for LighthouseGethNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for LighthouseGethNode {
|
impl Drop for LighthouseGethNode {
|
||||||
#[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))]
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.shutdown().expect("Failed to shutdown")
|
self.shutdown().expect("Failed to shutdown")
|
||||||
}
|
}
|
||||||
@@ -972,8 +791,6 @@ struct NetworkParameters {
|
|||||||
pub num_validator_keys_per_node: u64,
|
pub num_validator_keys_per_node: u64,
|
||||||
|
|
||||||
pub genesis_delay: u64,
|
pub genesis_delay: u64,
|
||||||
pub genesis_gaslimit: u64,
|
|
||||||
pub gas_limit: u64,
|
|
||||||
|
|
||||||
pub prefunded_accounts: String,
|
pub prefunded_accounts: String,
|
||||||
}
|
}
|
||||||
@@ -1036,9 +853,7 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
fn test_config() -> TestExecutionContext {
|
fn test_config() -> TestExecutionContext {
|
||||||
let mut config = TestExecutionContext::default();
|
TestExecutionContext::default()
|
||||||
config.wallet_configuration.additional_keys = 100;
|
|
||||||
config
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_node() -> (TestExecutionContext, LighthouseGethNode) {
|
fn new_node() -> (TestExecutionContext, LighthouseGethNode) {
|
||||||
@@ -1061,7 +876,7 @@ mod tests {
|
|||||||
let _guard = NODE_START_MUTEX.lock().unwrap();
|
let _guard = NODE_START_MUTEX.lock().unwrap();
|
||||||
|
|
||||||
let context = test_config();
|
let context = test_config();
|
||||||
let mut node = LighthouseGethNode::new(&context, true);
|
let mut node = LighthouseGethNode::new(&context);
|
||||||
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||||
.expect("Failed to initialize the node")
|
.expect("Failed to initialize the node")
|
||||||
.spawn_process()
|
.spawn_process()
|
||||||
@@ -1070,11 +885,9 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (context, node) = new_node();
|
let (context, node) = new_node();
|
||||||
node.fund_all_accounts().await.expect("Failed");
|
|
||||||
|
|
||||||
let account_address = context
|
let account_address = context
|
||||||
.wallet_configuration
|
.wallet_configuration
|
||||||
@@ -1093,7 +906,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
fn version_works() {
|
fn version_works() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (_context, node) = new_node();
|
let (_context, node) = new_node();
|
||||||
@@ -1110,7 +922,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_chain_id_from_node() {
|
async fn can_get_chain_id_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (_context, node) = new_node();
|
let (_context, node) = new_node();
|
||||||
@@ -1124,7 +935,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_gas_limit_from_node() {
|
async fn can_get_gas_limit_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (_context, node) = new_node();
|
let (_context, node) = new_node();
|
||||||
@@ -1142,7 +952,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_coinbase_from_node() {
|
async fn can_get_coinbase_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (_context, node) = new_node();
|
let (_context, node) = new_node();
|
||||||
@@ -1160,7 +969,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_block_difficulty_from_node() {
|
async fn can_get_block_difficulty_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (_context, node) = new_node();
|
let (_context, node) = new_node();
|
||||||
@@ -1178,7 +986,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_block_hash_from_node() {
|
async fn can_get_block_hash_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (_context, node) = new_node();
|
let (_context, node) = new_node();
|
||||||
@@ -1196,7 +1003,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_block_timestamp_from_node() {
|
async fn can_get_block_timestamp_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (_context, node) = new_node();
|
let (_context, node) = new_node();
|
||||||
@@ -1214,7 +1020,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since they take a long time to run"]
|
|
||||||
async fn can_get_block_number_from_node() {
|
async fn can_get_block_number_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (_context, node) = new_node();
|
let (_context, node) = new_node();
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
pub mod geth;
|
|
||||||
pub mod lighthouse_geth;
|
|
||||||
pub mod polkadot_omni_node;
|
|
||||||
pub mod substrate;
|
|
||||||
pub mod zombienet;
|
|
||||||
@@ -1,791 +0,0 @@
|
|||||||
use std::{
|
|
||||||
fs::{File, create_dir_all, remove_dir_all},
|
|
||||||
path::{Path, PathBuf},
|
|
||||||
pin::Pin,
|
|
||||||
process::{Command, Stdio},
|
|
||||||
sync::{
|
|
||||||
Arc,
|
|
||||||
atomic::{AtomicU32, Ordering},
|
|
||||||
},
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
|
|
||||||
use alloy::{
|
|
||||||
eips::BlockNumberOrTag,
|
|
||||||
genesis::Genesis,
|
|
||||||
network::{Ethereum, EthereumWallet, NetworkWallet},
|
|
||||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
|
||||||
providers::{
|
|
||||||
Provider,
|
|
||||||
ext::DebugApi,
|
|
||||||
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
|
|
||||||
},
|
|
||||||
rpc::types::{
|
|
||||||
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
|
||||||
trace::geth::{
|
|
||||||
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use anyhow::Context as _;
|
|
||||||
use futures::{FutureExt, Stream, StreamExt};
|
|
||||||
use revive_common::EVMVersion;
|
|
||||||
use revive_dt_common::fs::clear_directory;
|
|
||||||
use revive_dt_format::traits::ResolverApi;
|
|
||||||
use serde_json::json;
|
|
||||||
use sp_core::crypto::Ss58Codec;
|
|
||||||
use sp_runtime::AccountId32;
|
|
||||||
|
|
||||||
use revive_dt_config::*;
|
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
|
||||||
use revive_dt_report::{
|
|
||||||
EthereumMinedBlockInformation, MinedBlockInformation, SubstrateMinedBlockInformation,
|
|
||||||
};
|
|
||||||
use subxt::{OnlineClient, SubstrateConfig};
|
|
||||||
use tokio::sync::OnceCell;
|
|
||||||
use tracing::{instrument, trace};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Node,
|
|
||||||
constants::INITIAL_BALANCE,
|
|
||||||
helpers::{Process, ProcessReadinessWaitBehavior},
|
|
||||||
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
|
|
||||||
};
|
|
||||||
|
|
||||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
|
||||||
|
|
||||||
/// The number of blocks that should be cached by the polkadot-omni-node and the eth-rpc.
|
|
||||||
const NUMBER_OF_CACHED_BLOCKS: u32 = 100_000;
|
|
||||||
|
|
||||||
/// A node implementation for the polkadot-omni-node.
|
|
||||||
#[derive(Debug)]
|
|
||||||
|
|
||||||
pub struct PolkadotOmnichainNode {
|
|
||||||
/// The id of the node.
|
|
||||||
id: u32,
|
|
||||||
|
|
||||||
/// The path of the polkadot-omni-chain node binary.
|
|
||||||
polkadot_omnichain_node_binary_path: PathBuf,
|
|
||||||
/// The path of the eth-rpc binary.
|
|
||||||
eth_rpc_binary_path: PathBuf,
|
|
||||||
/// The path of the runtime's WASM that this node will be spawned with.
|
|
||||||
chain_spec_path: Option<PathBuf>,
|
|
||||||
/// The path of the base directory which contains all of the stored data for this node.
|
|
||||||
base_directory_path: PathBuf,
|
|
||||||
/// The path of the logs directory which contains all of the stored logs.
|
|
||||||
logs_directory_path: PathBuf,
|
|
||||||
|
|
||||||
/// Defines the amount of time to wait before considering that the node start has timed out.
|
|
||||||
node_start_timeout: Duration,
|
|
||||||
|
|
||||||
/// The id of the parachain that this node will be spawning.
|
|
||||||
parachain_id: Option<usize>,
|
|
||||||
/// The block time.
|
|
||||||
block_time: Duration,
|
|
||||||
|
|
||||||
/// The node's process.
|
|
||||||
polkadot_omnichain_node_process: Option<Process>,
|
|
||||||
/// The eth-rpc's process.
|
|
||||||
eth_rpc_process: Option<Process>,
|
|
||||||
|
|
||||||
/// The URL of the eth-rpc.
|
|
||||||
rpc_url: String,
|
|
||||||
/// The wallet object that's used to sign any transaction submitted through this node.
|
|
||||||
wallet: Arc<EthereumWallet>,
|
|
||||||
/// The nonce manager used to populate nonces for all transactions submitted through this node.
|
|
||||||
nonce_manager: CachedNonceManager,
|
|
||||||
/// The provider used for all RPC interactions with the RPC of this node.
|
|
||||||
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
|
||||||
|
|
||||||
/// A boolean that controls if the fallback gas filler should be used or not.
|
|
||||||
use_fallback_gas_filler: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PolkadotOmnichainNode {
|
|
||||||
const BASE_DIRECTORY: &str = "polkadot-omni-node";
|
|
||||||
const LOGS_DIRECTORY: &str = "logs";
|
|
||||||
|
|
||||||
const POLKADOT_OMNICHAIN_NODE_READY_MARKER: &str = "Running JSON-RPC server";
|
|
||||||
const ETH_RPC_READY_MARKER: &str = "Running JSON-RPC server";
|
|
||||||
const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json";
|
|
||||||
const BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT: u16 = 9944;
|
|
||||||
const BASE_ETH_RPC_PORT: u16 = 8545;
|
|
||||||
|
|
||||||
const POLKADOT_OMNICHAIN_NODE_LOG_ENV: &str =
|
|
||||||
"error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
|
|
||||||
const RPC_LOG_ENV: &str = "info,eth-rpc=debug";
|
|
||||||
|
|
||||||
pub fn new(
|
|
||||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
|
||||||
+ AsRef<EthRpcConfiguration>
|
|
||||||
+ AsRef<WalletConfiguration>
|
|
||||||
+ AsRef<PolkadotOmnichainNodeConfiguration>,
|
|
||||||
use_fallback_gas_filler: bool,
|
|
||||||
) -> Self {
|
|
||||||
let polkadot_omnichain_node_configuration =
|
|
||||||
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
|
|
||||||
let working_directory_path =
|
|
||||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
|
|
||||||
let eth_rpc_path = AsRef::<EthRpcConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.as_path();
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
|
||||||
let base_directory = working_directory_path
|
|
||||||
.join(Self::BASE_DIRECTORY)
|
|
||||||
.join(id.to_string());
|
|
||||||
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
id,
|
|
||||||
polkadot_omnichain_node_binary_path: polkadot_omnichain_node_configuration
|
|
||||||
.path
|
|
||||||
.to_path_buf(),
|
|
||||||
eth_rpc_binary_path: eth_rpc_path.to_path_buf(),
|
|
||||||
chain_spec_path: polkadot_omnichain_node_configuration
|
|
||||||
.chain_spec_path
|
|
||||||
.clone(),
|
|
||||||
base_directory_path: base_directory,
|
|
||||||
logs_directory_path: logs_directory,
|
|
||||||
parachain_id: polkadot_omnichain_node_configuration.parachain_id,
|
|
||||||
block_time: polkadot_omnichain_node_configuration.block_time,
|
|
||||||
polkadot_omnichain_node_process: Default::default(),
|
|
||||||
eth_rpc_process: Default::default(),
|
|
||||||
rpc_url: Default::default(),
|
|
||||||
wallet,
|
|
||||||
nonce_manager: Default::default(),
|
|
||||||
provider: Default::default(),
|
|
||||||
use_fallback_gas_filler,
|
|
||||||
node_start_timeout: polkadot_omnichain_node_configuration.start_timeout_ms,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
|
|
||||||
trace!("Removing the various directories");
|
|
||||||
let _ = remove_dir_all(self.base_directory_path.as_path());
|
|
||||||
let _ = clear_directory(&self.base_directory_path);
|
|
||||||
let _ = clear_directory(&self.logs_directory_path);
|
|
||||||
|
|
||||||
trace!("Creating the various directories");
|
|
||||||
create_dir_all(&self.base_directory_path)
|
|
||||||
.context("Failed to create base directory for polkadot-omni-node node")?;
|
|
||||||
create_dir_all(&self.logs_directory_path)
|
|
||||||
.context("Failed to create logs directory for polkadot-omni-node node")?;
|
|
||||||
|
|
||||||
let template_chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
|
|
||||||
|
|
||||||
let chainspec_json = Self::node_genesis(
|
|
||||||
&self.wallet,
|
|
||||||
self.chain_spec_path
|
|
||||||
.as_ref()
|
|
||||||
.context("No runtime path provided")?,
|
|
||||||
)
|
|
||||||
.context("Failed to prepare the chainspec command")?;
|
|
||||||
|
|
||||||
serde_json::to_writer_pretty(
|
|
||||||
std::fs::File::create(&template_chainspec_path)
|
|
||||||
.context("Failed to create polkadot-omni-node template chainspec file")?,
|
|
||||||
&chainspec_json,
|
|
||||||
)
|
|
||||||
.context("Failed to write polkadot-omni-node template chainspec JSON")?;
|
|
||||||
|
|
||||||
Ok(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn spawn_process(&mut self) -> anyhow::Result<()> {
|
|
||||||
// Error out if the runtime's path or the parachain id are not set which means that the
|
|
||||||
// arguments we require were not provided.
|
|
||||||
self.chain_spec_path
|
|
||||||
.as_ref()
|
|
||||||
.context("No WASM path provided for the runtime")?;
|
|
||||||
self.parachain_id
|
|
||||||
.as_ref()
|
|
||||||
.context("No argument provided for the parachain-id")?;
|
|
||||||
|
|
||||||
let polkadot_omnichain_node_rpc_port =
|
|
||||||
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
|
|
||||||
let eth_rpc_port = Self::BASE_ETH_RPC_PORT + self.id as u16;
|
|
||||||
|
|
||||||
let chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
|
|
||||||
|
|
||||||
self.rpc_url = format!("http://127.0.0.1:{eth_rpc_port}");
|
|
||||||
|
|
||||||
let polkadot_omnichain_node_process = Process::new(
|
|
||||||
"node",
|
|
||||||
self.logs_directory_path.as_path(),
|
|
||||||
self.polkadot_omnichain_node_binary_path.as_path(),
|
|
||||||
|command, stdout_file, stderr_file| {
|
|
||||||
command
|
|
||||||
.arg("--log")
|
|
||||||
.arg(Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
|
|
||||||
.arg("--dev-block-time")
|
|
||||||
.arg(self.block_time.as_millis().to_string())
|
|
||||||
.arg("--rpc-port")
|
|
||||||
.arg(polkadot_omnichain_node_rpc_port.to_string())
|
|
||||||
.arg("--base-path")
|
|
||||||
.arg(self.base_directory_path.as_path())
|
|
||||||
.arg("--no-prometheus")
|
|
||||||
.arg("--no-hardware-benchmarks")
|
|
||||||
.arg("--authoring")
|
|
||||||
.arg("slot-based")
|
|
||||||
.arg("--chain")
|
|
||||||
.arg(chainspec_path)
|
|
||||||
.arg("--name")
|
|
||||||
.arg(format!("polkadot-omni-node-{}", self.id))
|
|
||||||
.arg("--rpc-methods")
|
|
||||||
.arg("unsafe")
|
|
||||||
.arg("--rpc-cors")
|
|
||||||
.arg("all")
|
|
||||||
.arg("--rpc-max-connections")
|
|
||||||
.arg(u32::MAX.to_string())
|
|
||||||
.arg("--pool-limit")
|
|
||||||
.arg(u32::MAX.to_string())
|
|
||||||
.arg("--pool-kbytes")
|
|
||||||
.arg(u32::MAX.to_string())
|
|
||||||
.arg("--state-pruning")
|
|
||||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
|
||||||
.env("RUST_LOG", Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
|
|
||||||
.stdout(stdout_file)
|
|
||||||
.stderr(stderr_file);
|
|
||||||
},
|
|
||||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
|
||||||
max_wait_duration: self.node_start_timeout,
|
|
||||||
check_function: Box::new(|_, stderr_line| match stderr_line {
|
|
||||||
Some(line) => Ok(line.contains(Self::POLKADOT_OMNICHAIN_NODE_READY_MARKER)),
|
|
||||||
None => Ok(false),
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
match polkadot_omnichain_node_process {
|
|
||||||
Ok(process) => self.polkadot_omnichain_node_process = Some(process),
|
|
||||||
Err(err) => {
|
|
||||||
tracing::error!(
|
|
||||||
?err,
|
|
||||||
"Failed to start polkadot-omni-node, shutting down gracefully"
|
|
||||||
);
|
|
||||||
self.shutdown().context(
|
|
||||||
"Failed to gracefully shutdown after polkadot-omni-node start error",
|
|
||||||
)?;
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let eth_rpc_process = Process::new(
|
|
||||||
"eth-rpc",
|
|
||||||
self.logs_directory_path.as_path(),
|
|
||||||
self.eth_rpc_binary_path.as_path(),
|
|
||||||
|command, stdout_file, stderr_file| {
|
|
||||||
command
|
|
||||||
.arg("--dev")
|
|
||||||
.arg("--rpc-port")
|
|
||||||
.arg(eth_rpc_port.to_string())
|
|
||||||
.arg("--node-rpc-url")
|
|
||||||
.arg(format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}"))
|
|
||||||
.arg("--rpc-max-connections")
|
|
||||||
.arg(u32::MAX.to_string())
|
|
||||||
.arg("--index-last-n-blocks")
|
|
||||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
|
||||||
.arg("--cache-size")
|
|
||||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
|
||||||
.env("RUST_LOG", Self::RPC_LOG_ENV)
|
|
||||||
.stdout(stdout_file)
|
|
||||||
.stderr(stderr_file);
|
|
||||||
},
|
|
||||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
|
||||||
max_wait_duration: Duration::from_secs(30),
|
|
||||||
check_function: Box::new(|_, stderr_line| match stderr_line {
|
|
||||||
Some(line) => Ok(line.contains(Self::ETH_RPC_READY_MARKER)),
|
|
||||||
None => Ok(false),
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
match eth_rpc_process {
|
|
||||||
Ok(process) => self.eth_rpc_process = Some(process),
|
|
||||||
Err(err) => {
|
|
||||||
tracing::error!(?err, "Failed to start eth-rpc, shutting down gracefully");
|
|
||||||
self.shutdown()
|
|
||||||
.context("Failed to gracefully shutdown after eth-rpc start error")?;
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn eth_to_substrate_address(address: &Address) -> String {
|
|
||||||
let eth_bytes = address.0.0;
|
|
||||||
|
|
||||||
let mut padded = [0xEEu8; 32];
|
|
||||||
padded[..20].copy_from_slice(ð_bytes);
|
|
||||||
|
|
||||||
let account_id = AccountId32::from(padded);
|
|
||||||
account_id.to_ss58check()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
|
|
||||||
let output = Command::new(&self.eth_rpc_binary_path)
|
|
||||||
.arg("--version")
|
|
||||||
.stdin(Stdio::null())
|
|
||||||
.stdout(Stdio::piped())
|
|
||||||
.stderr(Stdio::null())
|
|
||||||
.spawn()?
|
|
||||||
.wait_with_output()?
|
|
||||||
.stdout;
|
|
||||||
Ok(String::from_utf8_lossy(&output).trim().to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
|
|
||||||
self.provider
|
|
||||||
.get_or_try_init(|| async move {
|
|
||||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
|
||||||
self.rpc_url.as_str(),
|
|
||||||
FallbackGasFiller::default()
|
|
||||||
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
|
||||||
ChainIdFiller::default(),
|
|
||||||
NonceFiller::new(self.nonce_manager.clone()),
|
|
||||||
self.wallet.clone(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.context("Failed to construct the provider")
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn node_genesis(
|
|
||||||
wallet: &EthereumWallet,
|
|
||||||
chain_spec_path: &Path,
|
|
||||||
) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let unmodified_chainspec_file =
|
|
||||||
File::open(chain_spec_path).context("Failed to open the unmodified chainspec file")?;
|
|
||||||
let mut chainspec_json =
|
|
||||||
serde_json::from_reader::<_, serde_json::Value>(&unmodified_chainspec_file)
|
|
||||||
.context("Failed to read the unmodified chainspec JSON")?;
|
|
||||||
|
|
||||||
let existing_chainspec_balances =
|
|
||||||
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
|
|
||||||
.as_array_mut()
|
|
||||||
.expect("Can't fail");
|
|
||||||
|
|
||||||
for address in NetworkWallet::<Ethereum>::signer_addresses(wallet) {
|
|
||||||
let substrate_address = Self::eth_to_substrate_address(&address);
|
|
||||||
let balance = INITIAL_BALANCE;
|
|
||||||
existing_chainspec_balances.push(json!((substrate_address, balance)));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(chainspec_json)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EthereumNode for PolkadotOmnichainNode {
|
|
||||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
|
|
||||||
Box::pin(async move { Ok(()) })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn id(&self) -> usize {
|
|
||||||
self.id as _
|
|
||||||
}
|
|
||||||
|
|
||||||
fn connection_string(&self) -> &str {
|
|
||||||
&self.rpc_url
|
|
||||||
}
|
|
||||||
|
|
||||||
fn submit_transaction(
|
|
||||||
&self,
|
|
||||||
transaction: TransactionRequest,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let provider = self
|
|
||||||
.provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to create the provider for transaction submission")?;
|
|
||||||
let pending_transaction = provider
|
|
||||||
.send_transaction(transaction)
|
|
||||||
.await
|
|
||||||
.context("Failed to submit the transaction through the provider")?;
|
|
||||||
Ok(*pending_transaction.tx_hash())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_receipt(
|
|
||||||
&self,
|
|
||||||
tx_hash: TxHash,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to create provider for getting the receipt")?
|
|
||||||
.get_transaction_receipt(tx_hash)
|
|
||||||
.await
|
|
||||||
.context("Failed to get the receipt of the transaction")?
|
|
||||||
.context("Failed to get the receipt of the transaction")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute_transaction(
|
|
||||||
&self,
|
|
||||||
transaction: TransactionRequest,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to create provider for transaction submission")?
|
|
||||||
.send_transaction(transaction)
|
|
||||||
.await
|
|
||||||
.context("Encountered an error when submitting a transaction")?
|
|
||||||
.get_receipt()
|
|
||||||
.await
|
|
||||||
.context("Failed to get the receipt for the transaction")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn trace_transaction(
|
|
||||||
&self,
|
|
||||||
tx_hash: TxHash,
|
|
||||||
trace_options: GethDebugTracingOptions,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to create provider for debug tracing")?
|
|
||||||
.debug_trace_transaction(tx_hash, trace_options)
|
|
||||||
.await
|
|
||||||
.context("Failed to obtain debug trace from eth-proxy")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn state_diff(
|
|
||||||
&self,
|
|
||||||
tx_hash: TxHash,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<DiffMode>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
|
||||||
diff_mode: Some(true),
|
|
||||||
disable_code: None,
|
|
||||||
disable_storage: None,
|
|
||||||
});
|
|
||||||
match self
|
|
||||||
.trace_transaction(tx_hash, trace_options)
|
|
||||||
.await?
|
|
||||||
.try_into_pre_state_frame()?
|
|
||||||
{
|
|
||||||
PreStateFrame::Diff(diff) => Ok(diff),
|
|
||||||
_ => anyhow::bail!("expected a diff mode trace"),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn balance_of(
|
|
||||||
&self,
|
|
||||||
address: Address,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to get the eth-rpc provider")?
|
|
||||||
.get_balance(address)
|
|
||||||
.await
|
|
||||||
.map_err(Into::into)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn latest_state_proof(
|
|
||||||
&self,
|
|
||||||
address: Address,
|
|
||||||
keys: Vec<StorageKey>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider()
|
|
||||||
.await
|
|
||||||
.context("Failed to get the eth-rpc provider")?
|
|
||||||
.get_proof(address, keys)
|
|
||||||
.latest()
|
|
||||||
.await
|
|
||||||
.map_err(Into::into)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn resolver(
|
|
||||||
&self,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let id = self.id;
|
|
||||||
let provider = self.provider().await?;
|
|
||||||
Ok(Arc::new(PolkadotOmnichainNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn evm_version(&self) -> EVMVersion {
|
|
||||||
EVMVersion::Cancun
|
|
||||||
}
|
|
||||||
|
|
||||||
fn subscribe_to_full_blocks_information(
|
|
||||||
&self,
|
|
||||||
) -> Pin<
|
|
||||||
Box<
|
|
||||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
|
||||||
+ '_,
|
|
||||||
>,
|
|
||||||
> {
|
|
||||||
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
|
|
||||||
pub mod revive {}
|
|
||||||
|
|
||||||
Box::pin(async move {
|
|
||||||
let polkadot_omnichain_node_rpc_port =
|
|
||||||
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
|
|
||||||
let polkadot_omnichain_node_rpc_url =
|
|
||||||
format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}");
|
|
||||||
let api = OnlineClient::<SubstrateConfig>::from_url(polkadot_omnichain_node_rpc_url)
|
|
||||||
.await
|
|
||||||
.context("Failed to create subxt rpc client")?;
|
|
||||||
let provider = self.provider().await.context("Failed to create provider")?;
|
|
||||||
|
|
||||||
let block_stream = api
|
|
||||||
.blocks()
|
|
||||||
.subscribe_all()
|
|
||||||
.await
|
|
||||||
.context("Failed to subscribe to blocks")?;
|
|
||||||
|
|
||||||
let mined_block_information_stream = block_stream.filter_map(move |block| {
|
|
||||||
let api = api.clone();
|
|
||||||
let provider = provider.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let substrate_block = block.ok()?;
|
|
||||||
let revive_block = provider
|
|
||||||
.get_block_by_number(
|
|
||||||
BlockNumberOrTag::Number(substrate_block.number() as _),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("TODO: Remove")
|
|
||||||
.expect("TODO: Remove");
|
|
||||||
|
|
||||||
let used = api
|
|
||||||
.storage()
|
|
||||||
.at(substrate_block.reference())
|
|
||||||
.fetch_or_default(&revive::storage().system().block_weight())
|
|
||||||
.await
|
|
||||||
.expect("TODO: Remove");
|
|
||||||
|
|
||||||
let block_ref_time = (used.normal.ref_time as u128)
|
|
||||||
+ (used.operational.ref_time as u128)
|
|
||||||
+ (used.mandatory.ref_time as u128);
|
|
||||||
let block_proof_size = (used.normal.proof_size as u128)
|
|
||||||
+ (used.operational.proof_size as u128)
|
|
||||||
+ (used.mandatory.proof_size as u128);
|
|
||||||
|
|
||||||
let limits = api
|
|
||||||
.constants()
|
|
||||||
.at(&revive::constants().system().block_weights())
|
|
||||||
.expect("TODO: Remove");
|
|
||||||
|
|
||||||
let max_ref_time = limits.max_block.ref_time;
|
|
||||||
let max_proof_size = limits.max_block.proof_size;
|
|
||||||
|
|
||||||
Some(MinedBlockInformation {
|
|
||||||
ethereum_block_information: EthereumMinedBlockInformation {
|
|
||||||
block_number: revive_block.number(),
|
|
||||||
block_timestamp: revive_block.header.timestamp,
|
|
||||||
mined_gas: revive_block.header.gas_used as _,
|
|
||||||
block_gas_limit: revive_block.header.gas_limit as _,
|
|
||||||
transaction_hashes: revive_block
|
|
||||||
.transactions
|
|
||||||
.into_hashes()
|
|
||||||
.as_hashes()
|
|
||||||
.expect("Must be hashes")
|
|
||||||
.to_vec(),
|
|
||||||
},
|
|
||||||
substrate_block_information: Some(SubstrateMinedBlockInformation {
|
|
||||||
ref_time: block_ref_time,
|
|
||||||
max_ref_time,
|
|
||||||
proof_size: block_proof_size,
|
|
||||||
max_proof_size,
|
|
||||||
}),
|
|
||||||
tx_counts: Default::default(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(Box::pin(mined_block_information_stream)
|
|
||||||
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn provider(
|
|
||||||
&self,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
|
|
||||||
{
|
|
||||||
Box::pin(
|
|
||||||
self.provider()
|
|
||||||
.map(|provider| provider.map(|provider| provider.erased())),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct PolkadotOmnichainNodeResolver {
|
|
||||||
id: u32,
|
|
||||||
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ResolverApi for PolkadotOmnichainNodeResolver {
|
|
||||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
|
||||||
fn chain_id(
|
|
||||||
&self,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
|
|
||||||
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
|
||||||
fn transaction_gas_price(
|
|
||||||
&self,
|
|
||||||
tx_hash: TxHash,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider
|
|
||||||
.get_transaction_receipt(tx_hash)
|
|
||||||
.await?
|
|
||||||
.context("Failed to get the transaction receipt")
|
|
||||||
.map(|receipt| receipt.effective_gas_price)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
|
||||||
fn block_gas_limit(
|
|
||||||
&self,
|
|
||||||
number: BlockNumberOrTag,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await
|
|
||||||
.context("Failed to get the eth-rpc block")?
|
|
||||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
|
||||||
.map(|block| block.header.gas_limit as _)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
|
||||||
fn block_coinbase(
|
|
||||||
&self,
|
|
||||||
number: BlockNumberOrTag,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Address>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await
|
|
||||||
.context("Failed to get the eth-rpc block")?
|
|
||||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
|
||||||
.map(|block| block.header.beneficiary)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
|
||||||
fn block_difficulty(
|
|
||||||
&self,
|
|
||||||
number: BlockNumberOrTag,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await
|
|
||||||
.context("Failed to get the eth-rpc block")?
|
|
||||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
|
||||||
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
|
||||||
fn block_base_fee(
|
|
||||||
&self,
|
|
||||||
number: BlockNumberOrTag,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<u64>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await
|
|
||||||
.context("Failed to get the eth-rpc block")?
|
|
||||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
|
||||||
.and_then(|block| {
|
|
||||||
block
|
|
||||||
.header
|
|
||||||
.base_fee_per_gas
|
|
||||||
.context("Failed to get the base fee per gas")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
|
||||||
fn block_hash(
|
|
||||||
&self,
|
|
||||||
number: BlockNumberOrTag,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockHash>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await
|
|
||||||
.context("Failed to get the eth-rpc block")?
|
|
||||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
|
||||||
.map(|block| block.header.hash)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
|
||||||
fn block_timestamp(
|
|
||||||
&self,
|
|
||||||
number: BlockNumberOrTag,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockTimestamp>> + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.provider
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await
|
|
||||||
.context("Failed to get the eth-rpc block")?
|
|
||||||
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
|
||||||
.map(|block| block.header.timestamp)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
|
||||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
|
|
||||||
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Node for PolkadotOmnichainNode {
|
|
||||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
|
||||||
drop(self.polkadot_omnichain_node_process.take());
|
|
||||||
drop(self.eth_rpc_process.take());
|
|
||||||
|
|
||||||
// Remove the node's database so that subsequent runs do not run on the same database. We
|
|
||||||
// ignore the error just in case the directory didn't exist in the first place and therefore
|
|
||||||
// there's nothing to be deleted.
|
|
||||||
let _ = remove_dir_all(self.base_directory_path.join("data"));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
|
||||||
self.init(genesis)?.spawn_process()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn version(&self) -> anyhow::Result<String> {
|
|
||||||
let output = Command::new(&self.polkadot_omnichain_node_binary_path)
|
|
||||||
.arg("--version")
|
|
||||||
.stdin(Stdio::null())
|
|
||||||
.stdout(Stdio::piped())
|
|
||||||
.stderr(Stdio::null())
|
|
||||||
.spawn()
|
|
||||||
.context("Failed to spawn substrate --version")?
|
|
||||||
.wait_with_output()
|
|
||||||
.context("Failed to wait for substrate --version")?
|
|
||||||
.stdout;
|
|
||||||
Ok(String::from_utf8_lossy(&output).into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for PolkadotOmnichainNode {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.shutdown().expect("Failed to shutdown")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -110,11 +110,8 @@ impl Process {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let check_result =
|
let check_result =
|
||||||
check_function(stdout_line.as_deref(), stderr_line.as_deref()).context(
|
check_function(stdout_line.as_deref(), stderr_line.as_deref())
|
||||||
format!(
|
.context("Failed to wait for the process to be ready")?;
|
||||||
"Failed to wait for the process to be ready - {stdout} - {stderr}"
|
|
||||||
),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if check_result {
|
if check_result {
|
||||||
break;
|
break;
|
||||||
@@ -130,10 +127,10 @@ impl Process {
|
|||||||
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
|
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
|
||||||
if !child
|
if !child
|
||||||
.wait()
|
.wait()
|
||||||
.context("Failed waiting for process to finish")?
|
.context("Failed waiting for kurtosis run process to finish")?
|
||||||
.success()
|
.success()
|
||||||
{
|
{
|
||||||
anyhow::bail!("Failed to spawn command");
|
anyhow::bail!("Failed to initialize kurtosis network",);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use alloy::transports::BoxFuture;
|
|
||||||
use tokio::sync::Semaphore;
|
|
||||||
use tower::{Layer, Service};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct ConcurrencyLimiterLayer {
|
|
||||||
semaphore: Arc<Semaphore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConcurrencyLimiterLayer {
|
|
||||||
pub fn new(permit_count: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
semaphore: Arc::new(Semaphore::new(permit_count)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S> Layer<S> for ConcurrencyLimiterLayer {
|
|
||||||
type Service = ConcurrencyLimiterService<S>;
|
|
||||||
|
|
||||||
fn layer(&self, inner: S) -> Self::Service {
|
|
||||||
ConcurrencyLimiterService {
|
|
||||||
service: inner,
|
|
||||||
semaphore: self.semaphore.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ConcurrencyLimiterService<S> {
|
|
||||||
service: S,
|
|
||||||
semaphore: Arc<Semaphore>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Request> Service<Request> for ConcurrencyLimiterService<S>
|
|
||||||
where
|
|
||||||
S: Service<Request> + Send,
|
|
||||||
S::Future: Send + 'static,
|
|
||||||
{
|
|
||||||
type Response = S::Response;
|
|
||||||
type Error = S::Error;
|
|
||||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
|
||||||
|
|
||||||
fn poll_ready(
|
|
||||||
&mut self,
|
|
||||||
cx: &mut std::task::Context<'_>,
|
|
||||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
|
||||||
self.service.poll_ready(cx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn call(&mut self, req: Request) -> Self::Future {
|
|
||||||
let semaphore = self.semaphore.clone();
|
|
||||||
let future = self.service.call(req);
|
|
||||||
|
|
||||||
Box::pin(async move {
|
|
||||||
let _permit = semaphore
|
|
||||||
.acquire()
|
|
||||||
.await
|
|
||||||
.expect("Semaphore has been closed");
|
|
||||||
tracing::debug!(
|
|
||||||
available_permits = semaphore.available_permits(),
|
|
||||||
"Acquired Semaphore Permit"
|
|
||||||
);
|
|
||||||
future.await
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,169 +0,0 @@
|
|||||||
use alloy::{
|
|
||||||
eips::BlockNumberOrTag,
|
|
||||||
network::{Network, TransactionBuilder},
|
|
||||||
providers::{
|
|
||||||
Provider, SendableTx,
|
|
||||||
ext::DebugApi,
|
|
||||||
fillers::{GasFillable, GasFiller, TxFiller},
|
|
||||||
},
|
|
||||||
rpc::types::trace::geth::{
|
|
||||||
GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions,
|
|
||||||
GethDebugTracingOptions,
|
|
||||||
},
|
|
||||||
transports::{RpcError, TransportResult},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// An implementation of [`GasFiller`] with a fallback mechanism for reverting transactions.
|
|
||||||
///
|
|
||||||
/// This struct provides a fallback mechanism for alloy's [`GasFiller`] which kicks in when a
|
|
||||||
/// transaction's dry run fails due to it reverting allowing us to get gas estimates even for
|
|
||||||
/// failing transactions. In this codebase, this is very important since the MatterLabs tests
|
|
||||||
/// expect some transactions in the test suite revert. Since we're expected to run a number of
|
|
||||||
/// assertions on these reverting transactions we must commit them to the ledger.
|
|
||||||
///
|
|
||||||
/// Therefore, this struct does the following:
|
|
||||||
///
|
|
||||||
/// 1. It first attempts to estimate the gas through the mechanism implemented in the [`GasFiller`].
|
|
||||||
/// 2. If it fails, then we perform a debug trace of the transaction to find out how much gas the
|
|
||||||
/// transaction needs until it reverts.
|
|
||||||
/// 3. We fill in these values (either the success or failure case) into the transaction.
|
|
||||||
///
|
|
||||||
/// The fallback mechanism of this filler can be completely disabled if we don't want it to be used.
|
|
||||||
/// In that case, this gas filler will act in an identical way to alloy's [`GasFiller`].
|
|
||||||
///
|
|
||||||
/// We then fill in these values into the transaction.
|
|
||||||
///
|
|
||||||
/// The previous implementation of this fallback gas filler relied on making use of default values
|
|
||||||
/// for the gas limit in order to be able to submit the reverting transactions to the network. But,
|
|
||||||
/// it introduced a number of issues that we weren't anticipating at the time when it was built.
|
|
||||||
#[derive(Clone, Copy, Debug)]
|
|
||||||
pub struct FallbackGasFiller {
|
|
||||||
/// The inner [`GasFiller`] which we pass all of the calls to in the happy path.
|
|
||||||
inner: GasFiller,
|
|
||||||
|
|
||||||
/// A [`bool`] that controls if the fallback mechanism is enabled or not.
|
|
||||||
enable_fallback_mechanism: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FallbackGasFiller {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
inner: Default::default(),
|
|
||||||
enable_fallback_mechanism: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_fallback_mechanism(mut self, enable: bool) -> Self {
|
|
||||||
self.enable_fallback_mechanism = enable;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_fallback_mechanism_enabled(self) -> Self {
|
|
||||||
self.with_fallback_mechanism(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_fallback_mechanism_disabled(self) -> Self {
|
|
||||||
self.with_fallback_mechanism(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<N> TxFiller<N> for FallbackGasFiller
|
|
||||||
where
|
|
||||||
N: Network,
|
|
||||||
{
|
|
||||||
type Fillable = <GasFiller as TxFiller<N>>::Fillable;
|
|
||||||
|
|
||||||
fn status(
|
|
||||||
&self,
|
|
||||||
tx: &<N as Network>::TransactionRequest,
|
|
||||||
) -> alloy::providers::fillers::FillerControlFlow {
|
|
||||||
TxFiller::<N>::status(&self.inner, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fill_sync(&self, _: &mut SendableTx<N>) {}
|
|
||||||
|
|
||||||
async fn prepare<P: Provider<N>>(
|
|
||||||
&self,
|
|
||||||
provider: &P,
|
|
||||||
tx: &<N as Network>::TransactionRequest,
|
|
||||||
) -> TransportResult<Self::Fillable> {
|
|
||||||
match (
|
|
||||||
self.inner.prepare(provider, tx).await,
|
|
||||||
self.enable_fallback_mechanism,
|
|
||||||
) {
|
|
||||||
// Return the same thing if either this calls succeeds, or if the call falls and the
|
|
||||||
// fallback mechanism is disabled.
|
|
||||||
(rtn @ Ok(..), ..) | (rtn @ Err(..), false) => rtn,
|
|
||||||
(Err(..), true) => {
|
|
||||||
// Perform a trace of the transaction.
|
|
||||||
let trace = provider
|
|
||||||
.debug_trace_call(
|
|
||||||
tx.clone(),
|
|
||||||
BlockNumberOrTag::Latest.into(),
|
|
||||||
GethDebugTracingCallOptions {
|
|
||||||
tracing_options: GethDebugTracingOptions {
|
|
||||||
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
|
||||||
GethDebugBuiltInTracerType::CallTracer,
|
|
||||||
)),
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
state_overrides: Default::default(),
|
|
||||||
block_overrides: Default::default(),
|
|
||||||
tx_index: Default::default(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.try_into_call_frame()
|
|
||||||
.map_err(|err| {
|
|
||||||
RpcError::local_usage_str(
|
|
||||||
format!("Expected a callframe trace, but got: {err:?}").as_str(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let gas_used = u64::try_from(trace.gas_used).map_err(|_| {
|
|
||||||
RpcError::local_usage_str(
|
|
||||||
"Transaction trace returned a value of gas used that exceeds u64",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let gas_limit = gas_used.saturating_mul(2);
|
|
||||||
|
|
||||||
if let Some(gas_price) = tx.gas_price() {
|
|
||||||
return Ok(GasFillable::Legacy {
|
|
||||||
gas_limit,
|
|
||||||
gas_price,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let estimate = if let (Some(max_fee_per_gas), Some(max_priority_fee_per_gas)) =
|
|
||||||
(tx.max_fee_per_gas(), tx.max_priority_fee_per_gas())
|
|
||||||
{
|
|
||||||
alloy::eips::eip1559::Eip1559Estimation {
|
|
||||||
max_fee_per_gas,
|
|
||||||
max_priority_fee_per_gas,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
provider.estimate_eip1559_fees().await?
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(GasFillable::Eip1559 {
|
|
||||||
gas_limit,
|
|
||||||
estimate,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn fill(
|
|
||||||
&self,
|
|
||||||
fillable: Self::Fillable,
|
|
||||||
tx: SendableTx<N>,
|
|
||||||
) -> TransportResult<SendableTx<N>> {
|
|
||||||
self.inner.fill(fillable, tx).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for FallbackGasFiller {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
mod concurrency_limiter;
|
|
||||||
mod fallback_gas_filler;
|
|
||||||
mod provider;
|
|
||||||
mod receipt_retry_layer;
|
|
||||||
|
|
||||||
pub use concurrency_limiter::*;
|
|
||||||
pub use fallback_gas_filler::*;
|
|
||||||
pub use provider::*;
|
|
||||||
pub use receipt_retry_layer::*;
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
use std::sync::LazyLock;
|
|
||||||
|
|
||||||
use alloy::{
|
|
||||||
network::{Network, NetworkWallet, TransactionBuilder4844},
|
|
||||||
providers::{
|
|
||||||
Identity, ProviderBuilder, RootProvider,
|
|
||||||
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
|
||||||
},
|
|
||||||
rpc::client::ClientBuilder,
|
|
||||||
};
|
|
||||||
use anyhow::{Context, Result};
|
|
||||||
|
|
||||||
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller, RetryLayer};
|
|
||||||
|
|
||||||
pub type ConcreteProvider<N, W> = FillProvider<
|
|
||||||
JoinFill<
|
|
||||||
JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>,
|
|
||||||
WalletFiller<W>,
|
|
||||||
>,
|
|
||||||
RootProvider<N>,
|
|
||||||
N,
|
|
||||||
>;
|
|
||||||
|
|
||||||
pub async fn construct_concurrency_limited_provider<N, W>(
|
|
||||||
rpc_url: &str,
|
|
||||||
fallback_gas_filler: FallbackGasFiller,
|
|
||||||
chain_id_filler: ChainIdFiller,
|
|
||||||
nonce_filler: NonceFiller,
|
|
||||||
wallet: W,
|
|
||||||
) -> Result<ConcreteProvider<N, W>>
|
|
||||||
where
|
|
||||||
N: Network<TransactionRequest: TransactionBuilder4844>,
|
|
||||||
W: NetworkWallet<N>,
|
|
||||||
Identity: TxFiller<N>,
|
|
||||||
FallbackGasFiller: TxFiller<N>,
|
|
||||||
ChainIdFiller: TxFiller<N>,
|
|
||||||
NonceFiller: TxFiller<N>,
|
|
||||||
WalletFiller<W>: TxFiller<N>,
|
|
||||||
{
|
|
||||||
// This is a global limit on the RPC concurrency that applies to all of the providers created
|
|
||||||
// by the framework. With this limit, it means that we can have a maximum of N concurrent
|
|
||||||
// requests at any point of time and no more than that. This is done in an effort to stabilize
|
|
||||||
// the framework from some of the interment issues that we've been seeing related to RPC calls.
|
|
||||||
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
|
|
||||||
LazyLock::new(|| ConcurrencyLimiterLayer::new(500));
|
|
||||||
|
|
||||||
let client = ClientBuilder::default()
|
|
||||||
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
|
||||||
.layer(RetryLayer::default())
|
|
||||||
.connect(rpc_url)
|
|
||||||
.await
|
|
||||||
.context("Failed to construct the RPC client")?;
|
|
||||||
|
|
||||||
let provider = ProviderBuilder::new()
|
|
||||||
.disable_recommended_fillers()
|
|
||||||
.network::<N>()
|
|
||||||
.filler(fallback_gas_filler)
|
|
||||||
.filler(chain_id_filler)
|
|
||||||
.filler(nonce_filler)
|
|
||||||
.wallet(wallet)
|
|
||||||
.connect_client(client);
|
|
||||||
|
|
||||||
Ok(provider)
|
|
||||||
}
|
|
||||||
@@ -1,158 +0,0 @@
|
|||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use alloy::{
|
|
||||||
network::{AnyNetwork, Network},
|
|
||||||
rpc::json_rpc::{RequestPacket, ResponsePacket},
|
|
||||||
transports::{TransportError, TransportErrorKind, TransportFut},
|
|
||||||
};
|
|
||||||
use tokio::time::{interval, timeout};
|
|
||||||
use tower::{Layer, Service};
|
|
||||||
|
|
||||||
/// A layer that allows for automatic retries for getting the receipt.
|
|
||||||
///
|
|
||||||
/// There are certain cases where getting the receipt of a committed transaction might fail. In Geth
|
|
||||||
/// this can happen if the transaction has been committed to the ledger but has not been indexed, in
|
|
||||||
/// the substrate and revive stack it can also happen for other reasons.
|
|
||||||
///
|
|
||||||
/// Therefore, just because the first attempt to get the receipt (after transaction confirmation)
|
|
||||||
/// has failed it doesn't mean that it will continue to fail. This layer can be added to any alloy
|
|
||||||
/// provider to allow the provider to retry getting the receipt for some period of time before it
|
|
||||||
/// considers that a timeout. It attempts to poll for the receipt for the `polling_duration` with an
|
|
||||||
/// interval of `polling_interval` between each poll. If by the end of the `polling_duration` it was
|
|
||||||
/// not able to get the receipt successfully then this is considered to be a timeout.
|
|
||||||
///
|
|
||||||
/// Additionally, this layer allows for retries for other rpc methods such as all tracing methods.
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
||||||
pub struct RetryLayer {
|
|
||||||
/// The amount of time to keep polling for the receipt before considering it a timeout.
|
|
||||||
polling_duration: Duration,
|
|
||||||
|
|
||||||
/// The interval of time to wait between each poll for the receipt.
|
|
||||||
polling_interval: Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RetryLayer {
|
|
||||||
pub fn new(polling_duration: Duration, polling_interval: Duration) -> Self {
|
|
||||||
Self {
|
|
||||||
polling_duration,
|
|
||||||
polling_interval,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_polling_duration(mut self, polling_duration: Duration) -> Self {
|
|
||||||
self.polling_duration = polling_duration;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_polling_interval(mut self, polling_interval: Duration) -> Self {
|
|
||||||
self.polling_interval = polling_interval;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for RetryLayer {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
polling_duration: Duration::from_secs(90),
|
|
||||||
polling_interval: Duration::from_millis(500),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S> Layer<S> for RetryLayer {
|
|
||||||
type Service = RetryService<S>;
|
|
||||||
|
|
||||||
fn layer(&self, inner: S) -> Self::Service {
|
|
||||||
RetryService {
|
|
||||||
service: inner,
|
|
||||||
polling_duration: self.polling_duration,
|
|
||||||
polling_interval: self.polling_interval,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
||||||
pub struct RetryService<S> {
|
|
||||||
/// The internal service.
|
|
||||||
service: S,
|
|
||||||
|
|
||||||
/// The amount of time to keep polling for the receipt before considering it a timeout.
|
|
||||||
polling_duration: Duration,
|
|
||||||
|
|
||||||
/// The interval of time to wait between each poll for the receipt.
|
|
||||||
polling_interval: Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S> Service<RequestPacket> for RetryService<S>
|
|
||||||
where
|
|
||||||
S: Service<RequestPacket, Future = TransportFut<'static>, Error = TransportError>
|
|
||||||
+ Send
|
|
||||||
+ 'static
|
|
||||||
+ Clone,
|
|
||||||
{
|
|
||||||
type Response = ResponsePacket;
|
|
||||||
type Error = TransportError;
|
|
||||||
type Future = TransportFut<'static>;
|
|
||||||
|
|
||||||
fn poll_ready(
|
|
||||||
&mut self,
|
|
||||||
cx: &mut std::task::Context<'_>,
|
|
||||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
|
||||||
self.service.poll_ready(cx)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::nonminimal_bool)]
|
|
||||||
fn call(&mut self, req: RequestPacket) -> Self::Future {
|
|
||||||
type ReceiptOutput = <AnyNetwork as Network>::ReceiptResponse;
|
|
||||||
|
|
||||||
let mut service = self.service.clone();
|
|
||||||
let polling_interval = self.polling_interval;
|
|
||||||
let polling_duration = self.polling_duration;
|
|
||||||
|
|
||||||
Box::pin(async move {
|
|
||||||
let request = req.as_single().ok_or_else(|| {
|
|
||||||
TransportErrorKind::custom_str("Retry layer doesn't support batch requests")
|
|
||||||
})?;
|
|
||||||
let method = request.method();
|
|
||||||
let requires_retries = method == "eth_getTransactionReceipt"
|
|
||||||
|| (method.contains("debug") && method.contains("trace"));
|
|
||||||
|
|
||||||
if !requires_retries {
|
|
||||||
return service.call(req).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
timeout(polling_duration, async {
|
|
||||||
let mut interval = interval(polling_interval);
|
|
||||||
|
|
||||||
loop {
|
|
||||||
interval.tick().await;
|
|
||||||
|
|
||||||
let Ok(resp) = service.call(req.clone()).await else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let response = resp.as_single().expect("Can't fail");
|
|
||||||
if response.is_error() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if method == "eth_getTransactionReceipt"
|
|
||||||
&& response
|
|
||||||
.payload()
|
|
||||||
.clone()
|
|
||||||
.deserialize_success::<ReceiptOutput>()
|
|
||||||
.ok()
|
|
||||||
.and_then(|resp| resp.try_into_success().ok())
|
|
||||||
.is_some()
|
|
||||||
|| method != "eth_getTransactionReceipt"
|
|
||||||
{
|
|
||||||
return resp;
|
|
||||||
} else {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map_err(|_| TransportErrorKind::custom_str("Timeout when retrying request"))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
+611
-322
File diff suppressed because it is too large
Load Diff
@@ -1,25 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "revive-dt-report-processor"
|
|
||||||
description = "revive differential testing report processor utility"
|
|
||||||
version.workspace = true
|
|
||||||
authors.workspace = true
|
|
||||||
license.workspace = true
|
|
||||||
edition.workspace = true
|
|
||||||
repository.workspace = true
|
|
||||||
rust-version.workspace = true
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "report-processor"
|
|
||||||
path = "src/main.rs"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
revive-dt-report = { workspace = true }
|
|
||||||
revive-dt-common = { workspace = true }
|
|
||||||
|
|
||||||
anyhow = { workspace = true }
|
|
||||||
clap = { workspace = true }
|
|
||||||
serde = { workspace = true }
|
|
||||||
serde_json = { workspace = true }
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
||||||
@@ -1,329 +0,0 @@
|
|||||||
use std::{
|
|
||||||
borrow::Cow,
|
|
||||||
collections::{BTreeMap, BTreeSet},
|
|
||||||
fmt::Display,
|
|
||||||
fs::{File, OpenOptions},
|
|
||||||
ops::{Deref, DerefMut},
|
|
||||||
path::{Path, PathBuf},
|
|
||||||
str::FromStr,
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::{Context as _, Error, Result, bail};
|
|
||||||
use clap::Parser;
|
|
||||||
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
|
||||||
|
|
||||||
use revive_dt_common::types::{Mode, ParsedTestSpecifier};
|
|
||||||
use revive_dt_report::{Report, TestCaseStatus};
|
|
||||||
|
|
||||||
fn main() -> Result<()> {
|
|
||||||
let cli = Cli::try_parse().context("Failed to parse the CLI arguments")?;
|
|
||||||
|
|
||||||
match cli {
|
|
||||||
Cli::GenerateExpectationsFile {
|
|
||||||
report_path,
|
|
||||||
output_path: output_file,
|
|
||||||
remove_prefix,
|
|
||||||
} => {
|
|
||||||
let remove_prefix = remove_prefix
|
|
||||||
.into_iter()
|
|
||||||
.map(|path| path.canonicalize().context("Failed to canonicalize path"))
|
|
||||||
.collect::<Result<Vec<_>>>()?;
|
|
||||||
|
|
||||||
let expectations = report_path
|
|
||||||
.execution_information
|
|
||||||
.iter()
|
|
||||||
.flat_map(|(metadata_file_path, metadata_file_report)| {
|
|
||||||
metadata_file_report
|
|
||||||
.case_reports
|
|
||||||
.iter()
|
|
||||||
.map(move |(case_idx, case_report)| {
|
|
||||||
(metadata_file_path, case_idx, case_report)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.flat_map(|(metadata_file_path, case_idx, case_report)| {
|
|
||||||
case_report.mode_execution_reports.iter().map(
|
|
||||||
move |(mode, execution_report)| {
|
|
||||||
(
|
|
||||||
metadata_file_path,
|
|
||||||
case_idx,
|
|
||||||
mode,
|
|
||||||
execution_report.status.as_ref(),
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.filter_map(|(metadata_file_path, case_idx, mode, status)| {
|
|
||||||
status.map(|status| (metadata_file_path, case_idx, mode, status))
|
|
||||||
})
|
|
||||||
.map(|(metadata_file_path, case_idx, mode, status)| {
|
|
||||||
(
|
|
||||||
TestSpecifier {
|
|
||||||
metadata_file_path: Cow::Borrowed(
|
|
||||||
remove_prefix
|
|
||||||
.iter()
|
|
||||||
.filter_map(|prefix| {
|
|
||||||
metadata_file_path.as_inner().strip_prefix(prefix).ok()
|
|
||||||
})
|
|
||||||
.next()
|
|
||||||
.unwrap_or(metadata_file_path.as_inner()),
|
|
||||||
),
|
|
||||||
case_idx: case_idx.into_inner(),
|
|
||||||
mode: Cow::Borrowed(mode),
|
|
||||||
},
|
|
||||||
Status::from(status),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.filter(|(_, status)| *status == Status::Failed)
|
|
||||||
.collect::<Expectations>();
|
|
||||||
|
|
||||||
let output_file = OpenOptions::new()
|
|
||||||
.truncate(true)
|
|
||||||
.create(true)
|
|
||||||
.write(true)
|
|
||||||
.open(output_file)
|
|
||||||
.context("Failed to create the output file")?;
|
|
||||||
serde_json::to_writer_pretty(output_file, &expectations)
|
|
||||||
.context("Failed to write the expectations to file")?;
|
|
||||||
}
|
|
||||||
Cli::CompareExpectationFiles {
|
|
||||||
base_expectation_path,
|
|
||||||
other_expectation_path,
|
|
||||||
} => {
|
|
||||||
let keys = base_expectation_path
|
|
||||||
.keys()
|
|
||||||
.chain(other_expectation_path.keys())
|
|
||||||
.collect::<BTreeSet<_>>();
|
|
||||||
|
|
||||||
for key in keys {
|
|
||||||
let base_status = base_expectation_path.get(key).context(format!(
|
|
||||||
"Entry not found in the base expectations: \"{}\"",
|
|
||||||
key
|
|
||||||
))?;
|
|
||||||
let other_status = other_expectation_path.get(key).context(format!(
|
|
||||||
"Entry not found in the other expectations: \"{}\"",
|
|
||||||
key
|
|
||||||
))?;
|
|
||||||
|
|
||||||
if base_status != other_status {
|
|
||||||
bail!(
|
|
||||||
"Expectations for entry \"{}\" have changed. They were {:?} and now they are {:?}",
|
|
||||||
key,
|
|
||||||
base_status,
|
|
||||||
other_status
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
type Expectations<'a> = BTreeMap<TestSpecifier<'a>, Status>;
|
|
||||||
|
|
||||||
/// A tool that's used to process the reports generated by the retester binary in various ways.
|
|
||||||
#[derive(Clone, Debug, Parser)]
|
|
||||||
#[command(name = "retester", term_width = 100)]
|
|
||||||
pub enum Cli {
|
|
||||||
/// Generates an expectation file out of a given report.
|
|
||||||
GenerateExpectationsFile {
|
|
||||||
/// The path of the report's JSON file to generate the expectation's file for.
|
|
||||||
#[clap(long)]
|
|
||||||
report_path: JsonFile<Report>,
|
|
||||||
|
|
||||||
/// The path of the output file to generate.
|
|
||||||
///
|
|
||||||
/// Note that we expect that:
|
|
||||||
/// 1. The provided path points to a JSON file.
|
|
||||||
/// 1. The ancestor's of the provided path already exist such that no directory creations
|
|
||||||
/// are required.
|
|
||||||
#[clap(long)]
|
|
||||||
output_path: PathBuf,
|
|
||||||
|
|
||||||
/// Prefix paths to remove from the paths in the final expectations file.
|
|
||||||
#[clap(long)]
|
|
||||||
remove_prefix: Vec<PathBuf>,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Compares two expectation files to ensure that they match each other.
|
|
||||||
CompareExpectationFiles {
|
|
||||||
/// The path of the base expectation file.
|
|
||||||
#[clap(long)]
|
|
||||||
base_expectation_path: JsonFile<Expectations<'static>>,
|
|
||||||
|
|
||||||
/// The path of the other expectation file.
|
|
||||||
#[clap(long)]
|
|
||||||
other_expectation_path: JsonFile<Expectations<'static>>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
|
||||||
pub enum Status {
|
|
||||||
Succeeded,
|
|
||||||
Failed,
|
|
||||||
Ignored,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<TestCaseStatus> for Status {
|
|
||||||
fn from(value: TestCaseStatus) -> Self {
|
|
||||||
match value {
|
|
||||||
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
|
|
||||||
TestCaseStatus::Failed { .. } => Self::Failed,
|
|
||||||
TestCaseStatus::Ignored { .. } => Self::Ignored,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> From<&'a TestCaseStatus> for Status {
|
|
||||||
fn from(value: &'a TestCaseStatus) -> Self {
|
|
||||||
match value {
|
|
||||||
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
|
|
||||||
TestCaseStatus::Failed { .. } => Self::Failed,
|
|
||||||
TestCaseStatus::Ignored { .. } => Self::Ignored,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
||||||
pub struct JsonFile<T> {
|
|
||||||
path: PathBuf,
|
|
||||||
content: Box<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deref for JsonFile<T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.content
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> DerefMut for JsonFile<T> {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
&mut self.content
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> FromStr for JsonFile<T>
|
|
||||||
where
|
|
||||||
T: DeserializeOwned,
|
|
||||||
{
|
|
||||||
type Err = Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
let path = PathBuf::from(s);
|
|
||||||
let file = File::open(&path).context("Failed to open the file")?;
|
|
||||||
serde_json::from_reader(&file)
|
|
||||||
.map(|content| Self { path, content })
|
|
||||||
.context(format!(
|
|
||||||
"Failed to deserialize file's content as {}",
|
|
||||||
std::any::type_name::<T>()
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Display for JsonFile<T> {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
Display::fmt(&self.path.display(), f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> From<JsonFile<T>> for String {
|
|
||||||
fn from(value: JsonFile<T>) -> Self {
|
|
||||||
value.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
||||||
pub struct TestSpecifier<'a> {
|
|
||||||
pub metadata_file_path: Cow<'a, Path>,
|
|
||||||
pub case_idx: usize,
|
|
||||||
pub mode: Cow<'a, Mode>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Display for TestSpecifier<'a> {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(
|
|
||||||
f,
|
|
||||||
"{}::{}::{}",
|
|
||||||
self.metadata_file_path.display(),
|
|
||||||
self.case_idx,
|
|
||||||
self.mode
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> From<TestSpecifier<'a>> for ParsedTestSpecifier {
|
|
||||||
fn from(
|
|
||||||
TestSpecifier {
|
|
||||||
metadata_file_path,
|
|
||||||
case_idx,
|
|
||||||
mode,
|
|
||||||
}: TestSpecifier,
|
|
||||||
) -> Self {
|
|
||||||
Self::CaseWithMode {
|
|
||||||
metadata_file_path: metadata_file_path.to_path_buf(),
|
|
||||||
case_idx,
|
|
||||||
mode: mode.into_owned(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<ParsedTestSpecifier> for TestSpecifier<'static> {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
fn try_from(value: ParsedTestSpecifier) -> Result<Self> {
|
|
||||||
let ParsedTestSpecifier::CaseWithMode {
|
|
||||||
metadata_file_path,
|
|
||||||
case_idx,
|
|
||||||
mode,
|
|
||||||
} = value
|
|
||||||
else {
|
|
||||||
bail!("Expected a full test case specifier")
|
|
||||||
};
|
|
||||||
Ok(Self {
|
|
||||||
metadata_file_path: Cow::Owned(metadata_file_path),
|
|
||||||
case_idx,
|
|
||||||
mode: Cow::Owned(mode),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> Serialize for TestSpecifier<'a> {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
self.to_string().serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'d, 'a> Deserialize<'d> for TestSpecifier<'a> {
|
|
||||||
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::Deserializer<'d>,
|
|
||||||
{
|
|
||||||
let string = String::deserialize(deserializer)?;
|
|
||||||
let mut splitted = string.split("::");
|
|
||||||
let (Some(metadata_file_path), Some(case_idx), Some(mode), None) = (
|
|
||||||
splitted.next(),
|
|
||||||
splitted.next(),
|
|
||||||
splitted.next(),
|
|
||||||
splitted.next(),
|
|
||||||
) else {
|
|
||||||
return Err(serde::de::Error::custom(
|
|
||||||
"Test specifier doesn't contain the components required",
|
|
||||||
));
|
|
||||||
};
|
|
||||||
let metadata_file_path = PathBuf::from(metadata_file_path);
|
|
||||||
let case_idx = usize::from_str(case_idx)
|
|
||||||
.map_err(|_| serde::de::Error::custom("Case idx is not a usize"))?;
|
|
||||||
let mode = Mode::from_str(mode).map_err(|_| serde::de::Error::custom("Invalid mode"))?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
metadata_file_path: Cow::Owned(metadata_file_path),
|
|
||||||
case_idx,
|
|
||||||
mode: Cow::Owned(mode),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -13,11 +13,10 @@ revive-dt-config = { workspace = true }
|
|||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
revive-dt-compiler = { workspace = true }
|
revive-dt-compiler = { workspace = true }
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy-primitives = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
paste = { workspace = true }
|
paste = { workspace = true }
|
||||||
indexmap = { workspace = true, features = ["serde"] }
|
indexmap = { workspace = true, features = ["serde"] }
|
||||||
itertools = { workspace = true }
|
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
|||||||
+55
-482
@@ -4,21 +4,19 @@
|
|||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||||
fs::OpenOptions,
|
fs::OpenOptions,
|
||||||
ops::{Add, Div},
|
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
};
|
};
|
||||||
|
|
||||||
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, TxHash};
|
use alloy_primitives::Address;
|
||||||
use anyhow::{Context as _, Result};
|
use anyhow::{Context as _, Result};
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use itertools::Itertools;
|
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
||||||
use revive_dt_config::Context;
|
use revive_dt_config::Context;
|
||||||
use revive_dt_format::{case::CaseIdx, metadata::ContractInstance, steps::StepPath};
|
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::Serialize;
|
||||||
use serde_with::{DisplayFromStr, serde_as};
|
use serde_with::{DisplayFromStr, serde_as};
|
||||||
use tokio::sync::{
|
use tokio::sync::{
|
||||||
broadcast::{Sender, channel},
|
broadcast::{Sender, channel},
|
||||||
@@ -36,20 +34,13 @@ pub struct ReportAggregator {
|
|||||||
runner_tx: Option<UnboundedSender<RunnerEvent>>,
|
runner_tx: Option<UnboundedSender<RunnerEvent>>,
|
||||||
runner_rx: UnboundedReceiver<RunnerEvent>,
|
runner_rx: UnboundedReceiver<RunnerEvent>,
|
||||||
listener_tx: Sender<ReporterEvent>,
|
listener_tx: Sender<ReporterEvent>,
|
||||||
/* Context */
|
|
||||||
file_name: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReportAggregator {
|
impl ReportAggregator {
|
||||||
pub fn new(context: Context) -> Self {
|
pub fn new(context: Context) -> Self {
|
||||||
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||||
let (listener_tx, _) = channel::<ReporterEvent>(0xFFFF);
|
let (listener_tx, _) = channel::<ReporterEvent>(1024);
|
||||||
Self {
|
Self {
|
||||||
file_name: match context {
|
|
||||||
Context::Test(ref context) => context.report_configuration.file_name.clone(),
|
|
||||||
Context::Benchmark(ref context) => context.report_configuration.file_name.clone(),
|
|
||||||
Context::ExportJsonSchema | Context::ExportGenesis(..) => None,
|
|
||||||
},
|
|
||||||
report: Report::new(context),
|
report: Report::new(context),
|
||||||
remaining_cases: Default::default(),
|
remaining_cases: Default::default(),
|
||||||
runner_tx: Some(runner_tx),
|
runner_tx: Some(runner_tx),
|
||||||
@@ -58,7 +49,7 @@ impl ReportAggregator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<Report>>) {
|
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<()>>) {
|
||||||
let reporter = self
|
let reporter = self
|
||||||
.runner_tx
|
.runner_tx
|
||||||
.take()
|
.take()
|
||||||
@@ -67,15 +58,18 @@ impl ReportAggregator {
|
|||||||
(reporter, async move { self.aggregate().await })
|
(reporter, async move { self.aggregate().await })
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn aggregate(mut self) -> Result<Report> {
|
async fn aggregate(mut self) -> Result<()> {
|
||||||
debug!("Starting to aggregate report");
|
debug!("Starting to aggregate report");
|
||||||
|
|
||||||
while let Some(event) = self.runner_rx.recv().await {
|
while let Some(event) = self.runner_rx.recv().await {
|
||||||
debug!(event = event.variant_name(), "Received Event");
|
debug!(?event, "Received Event");
|
||||||
match event {
|
match event {
|
||||||
RunnerEvent::SubscribeToEvents(event) => {
|
RunnerEvent::SubscribeToEvents(event) => {
|
||||||
self.handle_subscribe_to_events_event(*event);
|
self.handle_subscribe_to_events_event(*event);
|
||||||
}
|
}
|
||||||
|
RunnerEvent::CorpusFileDiscovery(event) => {
|
||||||
|
self.handle_corpus_file_discovered_event(*event)
|
||||||
|
}
|
||||||
RunnerEvent::MetadataFileDiscovery(event) => {
|
RunnerEvent::MetadataFileDiscovery(event) => {
|
||||||
self.handle_metadata_file_discovery_event(*event);
|
self.handle_metadata_file_discovery_event(*event);
|
||||||
}
|
}
|
||||||
@@ -112,23 +106,11 @@ impl ReportAggregator {
|
|||||||
RunnerEvent::ContractDeployed(event) => {
|
RunnerEvent::ContractDeployed(event) => {
|
||||||
self.handle_contract_deployed_event(*event);
|
self.handle_contract_deployed_event(*event);
|
||||||
}
|
}
|
||||||
RunnerEvent::Completion(_) => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
/* Benchmarks Events */
|
|
||||||
RunnerEvent::StepTransactionInformation(event) => {
|
|
||||||
self.handle_step_transaction_information(*event)
|
|
||||||
}
|
|
||||||
RunnerEvent::ContractInformation(event) => {
|
|
||||||
self.handle_contract_information(*event);
|
|
||||||
}
|
|
||||||
RunnerEvent::BlockMined(event) => self.handle_block_mined(*event),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.handle_completion(CompletionEvent {});
|
|
||||||
debug!("Report aggregation completed");
|
debug!("Report aggregation completed");
|
||||||
|
|
||||||
let default_file_name = {
|
let file_name = {
|
||||||
let current_timestamp = SystemTime::now()
|
let current_timestamp = SystemTime::now()
|
||||||
.duration_since(UNIX_EPOCH)
|
.duration_since(UNIX_EPOCH)
|
||||||
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
|
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
|
||||||
@@ -137,7 +119,6 @@ impl ReportAggregator {
|
|||||||
file_name.push_str(".json");
|
file_name.push_str(".json");
|
||||||
file_name
|
file_name
|
||||||
};
|
};
|
||||||
let file_name = self.file_name.unwrap_or(default_file_name);
|
|
||||||
let file_path = self
|
let file_path = self
|
||||||
.report
|
.report
|
||||||
.context
|
.context
|
||||||
@@ -160,13 +141,17 @@ impl ReportAggregator {
|
|||||||
format!("Failed to serialize report JSON to {}", file_path.display())
|
format!("Failed to serialize report JSON to {}", file_path.display())
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(self.report)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
|
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
|
||||||
let _ = event.tx.send(self.listener_tx.subscribe());
|
let _ = event.tx.send(self.listener_tx.subscribe());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) {
|
||||||
|
self.report.corpora.push(event.corpus);
|
||||||
|
}
|
||||||
|
|
||||||
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
||||||
self.report.metadata_files.insert(event.path.clone());
|
self.report.metadata_files.insert(event.path.clone());
|
||||||
}
|
}
|
||||||
@@ -245,19 +230,17 @@ impl ReportAggregator {
|
|||||||
|
|
||||||
let case_status = self
|
let case_status = self
|
||||||
.report
|
.report
|
||||||
.execution_information
|
.test_case_information
|
||||||
.entry(specifier.metadata_file_path.clone().into())
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
.or_default()
|
.or_default()
|
||||||
.case_reports
|
.entry(specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|(case_idx, mode_to_execution_map)| {
|
.map(|(case_idx, case_report)| {
|
||||||
let case_status = mode_to_execution_map
|
(
|
||||||
.mode_execution_reports
|
*case_idx,
|
||||||
.get(&specifier.solc_mode)?
|
case_report.status.clone().expect("Can't be uninitialized"),
|
||||||
.status
|
)
|
||||||
.clone()
|
|
||||||
.expect("Can't be uninitialized");
|
|
||||||
Some((*case_idx, case_status))
|
|
||||||
})
|
})
|
||||||
.collect::<BTreeMap<_, _>>();
|
.collect::<BTreeMap<_, _>>();
|
||||||
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
@@ -396,157 +379,18 @@ impl ReportAggregator {
|
|||||||
self.execution_information(&event.execution_specifier)
|
self.execution_information(&event.execution_specifier)
|
||||||
.deployed_contracts
|
.deployed_contracts
|
||||||
.get_or_insert_default()
|
.get_or_insert_default()
|
||||||
.insert(event.contract_instance.clone(), event.address);
|
.insert(event.contract_instance, event.address);
|
||||||
self.test_case_report(&event.execution_specifier.test_specifier)
|
|
||||||
.contract_addresses
|
|
||||||
.entry(event.contract_instance)
|
|
||||||
.or_default()
|
|
||||||
.entry(event.execution_specifier.platform_identifier)
|
|
||||||
.or_default()
|
|
||||||
.push(event.address);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_completion(&mut self, _: CompletionEvent) {
|
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
|
||||||
self.runner_rx.close();
|
|
||||||
self.handle_metrics_computation();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_metrics_computation(&mut self) {
|
|
||||||
for report in self.report.execution_information.values_mut() {
|
|
||||||
for report in report.case_reports.values_mut() {
|
|
||||||
for report in report.mode_execution_reports.values_mut() {
|
|
||||||
for (platform_identifier, block_information) in
|
|
||||||
report.mined_block_information.iter_mut()
|
|
||||||
{
|
|
||||||
block_information.sort_by(|a, b| {
|
|
||||||
a.ethereum_block_information
|
|
||||||
.block_number
|
|
||||||
.cmp(&b.ethereum_block_information.block_number)
|
|
||||||
});
|
|
||||||
|
|
||||||
// Computing the TPS.
|
|
||||||
let tps = block_information
|
|
||||||
.iter()
|
|
||||||
.tuple_windows::<(_, _)>()
|
|
||||||
.map(|(block1, block2)| {
|
|
||||||
block2.ethereum_block_information.transaction_hashes.len() as u64
|
|
||||||
/ (block2.ethereum_block_information.block_timestamp
|
|
||||||
- block1.ethereum_block_information.block_timestamp)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.transaction_per_second
|
|
||||||
.with_list(*platform_identifier, tps);
|
|
||||||
|
|
||||||
// Computing the GPS.
|
|
||||||
let gps = block_information
|
|
||||||
.iter()
|
|
||||||
.tuple_windows::<(_, _)>()
|
|
||||||
.map(|(block1, block2)| {
|
|
||||||
block2.ethereum_block_information.mined_gas as u64
|
|
||||||
/ (block2.ethereum_block_information.block_timestamp
|
|
||||||
- block1.ethereum_block_information.block_timestamp)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.gas_per_second
|
|
||||||
.with_list(*platform_identifier, gps);
|
|
||||||
|
|
||||||
// Computing the gas block fullness
|
|
||||||
let gas_block_fullness = block_information
|
|
||||||
.iter()
|
|
||||||
.map(|block| block.gas_block_fullness_percentage())
|
|
||||||
.map(|v| v as u64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.gas_block_fullness
|
|
||||||
.with_list(*platform_identifier, gas_block_fullness);
|
|
||||||
|
|
||||||
// Computing the ref-time block fullness
|
|
||||||
let reftime_block_fullness = block_information
|
|
||||||
.iter()
|
|
||||||
.filter_map(|block| block.ref_time_block_fullness_percentage())
|
|
||||||
.map(|v| v as u64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
if !reftime_block_fullness.is_empty() {
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.ref_time_block_fullness
|
|
||||||
.get_or_insert_default()
|
|
||||||
.with_list(*platform_identifier, reftime_block_fullness);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Computing the proof size block fullness
|
|
||||||
let proof_size_block_fullness = block_information
|
|
||||||
.iter()
|
|
||||||
.filter_map(|block| block.proof_size_block_fullness_percentage())
|
|
||||||
.map(|v| v as u64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
if !proof_size_block_fullness.is_empty() {
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.proof_size_block_fullness
|
|
||||||
.get_or_insert_default()
|
|
||||||
.with_list(*platform_identifier, proof_size_block_fullness);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_step_transaction_information(&mut self, event: StepTransactionInformationEvent) {
|
|
||||||
self.test_case_report(&event.execution_specifier.test_specifier)
|
|
||||||
.steps
|
|
||||||
.entry(event.step_path)
|
|
||||||
.or_default()
|
|
||||||
.transactions
|
|
||||||
.entry(event.execution_specifier.platform_identifier)
|
|
||||||
.or_default()
|
|
||||||
.push(event.transaction_information);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_contract_information(&mut self, event: ContractInformationEvent) {
|
|
||||||
self.test_case_report(&event.execution_specifier.test_specifier)
|
|
||||||
.compiled_contracts
|
|
||||||
.entry(event.source_code_path)
|
|
||||||
.or_default()
|
|
||||||
.entry(event.contract_name)
|
|
||||||
.or_default()
|
|
||||||
.contract_size
|
|
||||||
.insert(
|
|
||||||
event.execution_specifier.platform_identifier,
|
|
||||||
event.contract_size,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_block_mined(&mut self, event: BlockMinedEvent) {
|
|
||||||
self.test_case_report(&event.execution_specifier.test_specifier)
|
|
||||||
.mined_block_information
|
|
||||||
.entry(event.execution_specifier.platform_identifier)
|
|
||||||
.or_default()
|
|
||||||
.push(event.mined_block_information);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut ExecutionReport {
|
|
||||||
self.report
|
self.report
|
||||||
.execution_information
|
.test_case_information
|
||||||
.entry(specifier.metadata_file_path.clone().into())
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
.or_default()
|
.or_default()
|
||||||
.case_reports
|
|
||||||
.entry(specifier.case_idx)
|
|
||||||
.or_default()
|
|
||||||
.mode_execution_reports
|
|
||||||
.entry(specifier.solc_mode.clone())
|
.entry(specifier.solc_mode.clone())
|
||||||
.or_default()
|
.or_default()
|
||||||
|
.entry(specifier.case_idx)
|
||||||
|
.or_default()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn execution_information(
|
fn execution_information(
|
||||||
@@ -563,78 +407,43 @@ impl ReportAggregator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[serde_as]
|
#[serde_as]
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
pub struct Report {
|
pub struct Report {
|
||||||
/// The context that the tool was started up with.
|
/// The context that the tool was started up with.
|
||||||
pub context: Context,
|
pub context: Context,
|
||||||
|
/// The list of corpus files that the tool found.
|
||||||
|
pub corpora: Vec<Corpus>,
|
||||||
/// The list of metadata files that were found by the tool.
|
/// The list of metadata files that were found by the tool.
|
||||||
pub metadata_files: BTreeSet<MetadataFilePath>,
|
pub metadata_files: BTreeSet<MetadataFilePath>,
|
||||||
/// Metrics from the execution.
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metrics: Option<Metrics>,
|
|
||||||
/// Information relating to each test case.
|
/// Information relating to each test case.
|
||||||
pub execution_information: BTreeMap<MetadataFilePath, MetadataFileReport>,
|
#[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")]
|
||||||
|
pub test_case_information:
|
||||||
|
BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Report {
|
impl Report {
|
||||||
pub fn new(context: Context) -> Self {
|
pub fn new(context: Context) -> Self {
|
||||||
Self {
|
Self {
|
||||||
context,
|
context,
|
||||||
metrics: Default::default(),
|
corpora: Default::default(),
|
||||||
metadata_files: Default::default(),
|
metadata_files: Default::default(),
|
||||||
execution_information: Default::default(),
|
test_case_information: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
#[derive(Clone, Debug, Serialize, Default)]
|
||||||
pub struct MetadataFileReport {
|
pub struct TestCaseReport {
|
||||||
/// Metrics from the execution.
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metrics: Option<Metrics>,
|
|
||||||
/// The report of each case keyed by the case idx.
|
|
||||||
pub case_reports: BTreeMap<CaseIdx, CaseReport>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[serde_as]
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
|
||||||
pub struct CaseReport {
|
|
||||||
/// Metrics from the execution.
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metrics: Option<Metrics>,
|
|
||||||
/// The [`ExecutionReport`] for each one of the [`Mode`]s.
|
|
||||||
#[serde_as(as = "HashMap<DisplayFromStr, _>")]
|
|
||||||
pub mode_execution_reports: HashMap<Mode, ExecutionReport>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
|
||||||
pub struct ExecutionReport {
|
|
||||||
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub status: Option<TestCaseStatus>,
|
pub status: Option<TestCaseStatus>,
|
||||||
/// Metrics from the execution.
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metrics: Option<Metrics>,
|
|
||||||
/// Information related to the execution on one of the platforms.
|
/// Information related to the execution on one of the platforms.
|
||||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>,
|
||||||
pub platform_execution: PlatformKeyedInformation<Option<ExecutionInformation>>,
|
|
||||||
/// Information on the compiled contracts.
|
|
||||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
|
||||||
pub compiled_contracts: BTreeMap<PathBuf, BTreeMap<String, ContractInformation>>,
|
|
||||||
/// The addresses of the deployed contracts
|
|
||||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
|
||||||
pub contract_addresses: BTreeMap<ContractInstance, PlatformKeyedInformation<Vec<Address>>>,
|
|
||||||
/// Information on the mined blocks as part of this execution.
|
|
||||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
|
||||||
pub mined_block_information: PlatformKeyedInformation<Vec<MinedBlockInformation>>,
|
|
||||||
/// Information tracked for each step that was executed.
|
|
||||||
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
|
||||||
pub steps: BTreeMap<StepPath, StepReport>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||||
/// it was ignored.
|
/// it was ignored.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
#[serde(tag = "status")]
|
#[serde(tag = "status")]
|
||||||
pub enum TestCaseStatus {
|
pub enum TestCaseStatus {
|
||||||
/// The test case succeeded.
|
/// The test case succeeded.
|
||||||
@@ -658,7 +467,7 @@ pub enum TestCaseStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Information related to the platform node that's being used to execute the step.
|
/// Information related to the platform node that's being used to execute the step.
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
pub struct TestCaseNodeInformation {
|
pub struct TestCaseNodeInformation {
|
||||||
/// The ID of the node that this case is being executed on.
|
/// The ID of the node that this case is being executed on.
|
||||||
pub id: usize,
|
pub id: usize,
|
||||||
@@ -669,27 +478,27 @@ pub struct TestCaseNodeInformation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Execution information tied to the platform.
|
/// Execution information tied to the platform.
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Default, Serialize)]
|
||||||
pub struct ExecutionInformation {
|
pub struct ExecutionInformation {
|
||||||
/// Information related to the node assigned to this test case.
|
/// Information related to the node assigned to this test case.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub node: Option<TestCaseNodeInformation>,
|
pub node: Option<TestCaseNodeInformation>,
|
||||||
/// Information on the pre-link compiled contracts.
|
/// Information on the pre-link compiled contracts.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub pre_link_compilation_status: Option<CompilationStatus>,
|
pub pre_link_compilation_status: Option<CompilationStatus>,
|
||||||
/// Information on the post-link compiled contracts.
|
/// Information on the post-link compiled contracts.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub post_link_compilation_status: Option<CompilationStatus>,
|
pub post_link_compilation_status: Option<CompilationStatus>,
|
||||||
/// Information on the deployed libraries.
|
/// Information on the deployed libraries.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
|
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
|
||||||
/// Information on the deployed contracts.
|
/// Information on the deployed contracts.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
|
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information related to compilation
|
/// Information related to compilation
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize)]
|
||||||
#[serde(tag = "status")]
|
#[serde(tag = "status")]
|
||||||
pub enum CompilationStatus {
|
pub enum CompilationStatus {
|
||||||
/// The compilation was successful.
|
/// The compilation was successful.
|
||||||
@@ -703,11 +512,11 @@ pub enum CompilationStatus {
|
|||||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||||
/// the compiler was invoked.
|
/// the compiler was invoked.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
compiler_input: Option<CompilerInput>,
|
compiler_input: Option<CompilerInput>,
|
||||||
/// The output of the compiler. This is only included if the appropriate flag is set in the
|
/// The output of the compiler. This is only included if the appropriate flag is set in the
|
||||||
/// CLI contexts.
|
/// CLI contexts.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
compiler_output: Option<CompilerOutput>,
|
compiler_output: Option<CompilerOutput>,
|
||||||
},
|
},
|
||||||
/// The compilation failed.
|
/// The compilation failed.
|
||||||
@@ -715,251 +524,15 @@ pub enum CompilationStatus {
|
|||||||
/// The failure reason.
|
/// The failure reason.
|
||||||
reason: String,
|
reason: String,
|
||||||
/// The version of the compiler used to compile the contracts.
|
/// The version of the compiler used to compile the contracts.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
compiler_version: Option<Version>,
|
compiler_version: Option<Version>,
|
||||||
/// The path of the compiler used to compile the contracts.
|
/// The path of the compiler used to compile the contracts.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
compiler_path: Option<PathBuf>,
|
compiler_path: Option<PathBuf>,
|
||||||
/// The input provided to the compiler to compile the contracts. This is only included if
|
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||||
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||||
/// the compiler was invoked.
|
/// the compiler was invoked.
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
compiler_input: Option<CompilerInput>,
|
compiler_input: Option<CompilerInput>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information on each step in the execution.
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
|
||||||
pub struct StepReport {
|
|
||||||
/// Information on the transactions submitted as part of this step.
|
|
||||||
transactions: PlatformKeyedInformation<Vec<TransactionInformation>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct TransactionInformation {
|
|
||||||
/// The hash of the transaction
|
|
||||||
pub transaction_hash: TxHash,
|
|
||||||
pub submission_timestamp: u64,
|
|
||||||
pub block_timestamp: u64,
|
|
||||||
pub block_number: BlockNumber,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The metrics we collect for our benchmarks.
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
|
||||||
pub struct Metrics {
|
|
||||||
pub transaction_per_second: Metric<u64>,
|
|
||||||
pub gas_per_second: Metric<u64>,
|
|
||||||
/* Block Fullness */
|
|
||||||
pub gas_block_fullness: Metric<u64>,
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ref_time_block_fullness: Option<Metric<u64>>,
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub proof_size_block_fullness: Option<Metric<u64>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The data that we store for a given metric (e.g., TPS).
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
|
||||||
pub struct Metric<T> {
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub minimum: Option<PlatformKeyedInformation<T>>,
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub maximum: Option<PlatformKeyedInformation<T>>,
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mean: Option<PlatformKeyedInformation<T>>,
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub median: Option<PlatformKeyedInformation<T>>,
|
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
||||||
pub raw: Option<PlatformKeyedInformation<Vec<T>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Metric<T>
|
|
||||||
where
|
|
||||||
T: Default
|
|
||||||
+ Copy
|
|
||||||
+ Ord
|
|
||||||
+ PartialOrd
|
|
||||||
+ Add<Output = T>
|
|
||||||
+ Div<Output = T>
|
|
||||||
+ TryFrom<usize, Error: std::fmt::Debug>,
|
|
||||||
{
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Default::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn platform_identifiers(&self) -> BTreeSet<PlatformIdentifier> {
|
|
||||||
self.minimum
|
|
||||||
.as_ref()
|
|
||||||
.map(|m| m.keys())
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
.chain(
|
|
||||||
self.maximum
|
|
||||||
.as_ref()
|
|
||||||
.map(|m| m.keys())
|
|
||||||
.into_iter()
|
|
||||||
.flatten(),
|
|
||||||
)
|
|
||||||
.chain(self.mean.as_ref().map(|m| m.keys()).into_iter().flatten())
|
|
||||||
.chain(self.median.as_ref().map(|m| m.keys()).into_iter().flatten())
|
|
||||||
.chain(self.raw.as_ref().map(|m| m.keys()).into_iter().flatten())
|
|
||||||
.copied()
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_list(
|
|
||||||
&mut self,
|
|
||||||
platform_identifier: PlatformIdentifier,
|
|
||||||
original_list: Vec<T>,
|
|
||||||
) -> &mut Self {
|
|
||||||
let mut list = original_list.clone();
|
|
||||||
list.sort();
|
|
||||||
let Some(min) = list.first().copied() else {
|
|
||||||
return self;
|
|
||||||
};
|
|
||||||
let Some(max) = list.last().copied() else {
|
|
||||||
return self;
|
|
||||||
};
|
|
||||||
let sum = list.iter().fold(T::default(), |acc, num| acc + *num);
|
|
||||||
let mean = sum / TryInto::<T>::try_into(list.len()).unwrap();
|
|
||||||
|
|
||||||
let median = match list.len().is_multiple_of(2) {
|
|
||||||
true => {
|
|
||||||
let idx = list.len() / 2;
|
|
||||||
let val1 = *list.get(idx - 1).unwrap();
|
|
||||||
let val2 = *list.get(idx).unwrap();
|
|
||||||
(val1 + val2) / TryInto::<T>::try_into(2usize).unwrap()
|
|
||||||
}
|
|
||||||
false => {
|
|
||||||
let idx = list.len() / 2;
|
|
||||||
*list.get(idx).unwrap()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
self.minimum
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, min);
|
|
||||||
self.maximum
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, max);
|
|
||||||
self.mean
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, mean);
|
|
||||||
self.median
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, median);
|
|
||||||
self.raw
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, original_list);
|
|
||||||
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn combine(&self, other: &Self) -> Self {
|
|
||||||
let mut platform_identifiers = self.platform_identifiers();
|
|
||||||
platform_identifiers.extend(other.platform_identifiers());
|
|
||||||
|
|
||||||
let mut this = Self::new();
|
|
||||||
for platform_identifier in platform_identifiers {
|
|
||||||
let mut l1 = self
|
|
||||||
.raw
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|m| m.get(&platform_identifier))
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_default();
|
|
||||||
let l2 = other
|
|
||||||
.raw
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|m| m.get(&platform_identifier))
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_default();
|
|
||||||
l1.extend(l2);
|
|
||||||
this.with_list(platform_identifier, l1);
|
|
||||||
}
|
|
||||||
|
|
||||||
this
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
|
||||||
pub struct ContractInformation {
|
|
||||||
/// The size of the contract on the various platforms.
|
|
||||||
pub contract_size: PlatformKeyedInformation<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
|
||||||
pub struct MinedBlockInformation {
|
|
||||||
pub ethereum_block_information: EthereumMinedBlockInformation,
|
|
||||||
pub substrate_block_information: Option<SubstrateMinedBlockInformation>,
|
|
||||||
pub tx_counts: BTreeMap<StepPath, usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MinedBlockInformation {
|
|
||||||
pub fn gas_block_fullness_percentage(&self) -> u8 {
|
|
||||||
self.ethereum_block_information
|
|
||||||
.gas_block_fullness_percentage()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ref_time_block_fullness_percentage(&self) -> Option<u8> {
|
|
||||||
self.substrate_block_information
|
|
||||||
.as_ref()
|
|
||||||
.map(|block| block.ref_time_block_fullness_percentage())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn proof_size_block_fullness_percentage(&self) -> Option<u8> {
|
|
||||||
self.substrate_block_information
|
|
||||||
.as_ref()
|
|
||||||
.map(|block| block.proof_size_block_fullness_percentage())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
|
||||||
pub struct EthereumMinedBlockInformation {
|
|
||||||
/// The block number.
|
|
||||||
pub block_number: BlockNumber,
|
|
||||||
|
|
||||||
/// The block timestamp.
|
|
||||||
pub block_timestamp: BlockTimestamp,
|
|
||||||
|
|
||||||
/// The amount of gas mined in the block.
|
|
||||||
pub mined_gas: u128,
|
|
||||||
|
|
||||||
/// The gas limit of the block.
|
|
||||||
pub block_gas_limit: u128,
|
|
||||||
|
|
||||||
/// The hashes of the transactions that were mined as part of the block.
|
|
||||||
pub transaction_hashes: Vec<TxHash>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EthereumMinedBlockInformation {
|
|
||||||
pub fn gas_block_fullness_percentage(&self) -> u8 {
|
|
||||||
(self.mined_gas * 100 / self.block_gas_limit) as u8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
|
||||||
pub struct SubstrateMinedBlockInformation {
|
|
||||||
/// The ref time for substrate based chains.
|
|
||||||
pub ref_time: u128,
|
|
||||||
|
|
||||||
/// The max ref time for substrate based chains.
|
|
||||||
pub max_ref_time: u64,
|
|
||||||
|
|
||||||
/// The proof size for substrate based chains.
|
|
||||||
pub proof_size: u128,
|
|
||||||
|
|
||||||
/// The max proof size for substrate based chains.
|
|
||||||
pub max_proof_size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SubstrateMinedBlockInformation {
|
|
||||||
pub fn ref_time_block_fullness_percentage(&self) -> u8 {
|
|
||||||
(self.ref_time * 100 / self.max_ref_time as u128) as u8
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn proof_size_block_fullness_percentage(&self) -> u8 {
|
|
||||||
(self.proof_size * 100 / self.max_proof_size as u128) as u8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Information keyed by the platform identifier.
|
|
||||||
pub type PlatformKeyedInformation<T> = BTreeMap<PlatformIdentifier, T>;
|
|
||||||
|
|||||||
@@ -3,19 +3,16 @@
|
|||||||
|
|
||||||
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
|
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
use alloy::primitives::Address;
|
use alloy_primitives::Address;
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||||
use revive_dt_format::metadata::ContractInstance;
|
|
||||||
use revive_dt_format::metadata::Metadata;
|
use revive_dt_format::metadata::Metadata;
|
||||||
use revive_dt_format::steps::StepPath;
|
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use tokio::sync::{broadcast, oneshot};
|
use tokio::sync::{broadcast, oneshot};
|
||||||
|
|
||||||
use crate::MinedBlockInformation;
|
|
||||||
use crate::TransactionInformation;
|
|
||||||
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
||||||
|
|
||||||
macro_rules! __report_gen_emit_test_specific {
|
macro_rules! __report_gen_emit_test_specific {
|
||||||
@@ -347,16 +344,6 @@ macro_rules! define_event {
|
|||||||
),*
|
),*
|
||||||
}
|
}
|
||||||
|
|
||||||
impl $ident {
|
|
||||||
pub fn variant_name(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
$(
|
|
||||||
Self::$variant_ident { .. } => stringify!($variant_ident)
|
|
||||||
),*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
$(
|
$(
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
$(#[$variant_meta])*
|
$(#[$variant_meta])*
|
||||||
@@ -493,6 +480,11 @@ define_event! {
|
|||||||
/// The channel that the aggregator is to send the receive side of the channel on.
|
/// The channel that the aggregator is to send the receive side of the channel on.
|
||||||
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
||||||
},
|
},
|
||||||
|
/// An event emitted by runners when they've discovered a corpus file.
|
||||||
|
CorpusFileDiscovery {
|
||||||
|
/// The contents of the corpus file.
|
||||||
|
corpus: Corpus
|
||||||
|
},
|
||||||
/// An event emitted by runners when they've discovered a metadata file.
|
/// An event emitted by runners when they've discovered a metadata file.
|
||||||
MetadataFileDiscovery {
|
MetadataFileDiscovery {
|
||||||
/// The path of the metadata file discovered.
|
/// The path of the metadata file discovered.
|
||||||
@@ -621,36 +613,6 @@ define_event! {
|
|||||||
/// The address of the contract.
|
/// The address of the contract.
|
||||||
address: Address
|
address: Address
|
||||||
},
|
},
|
||||||
/// Reports the completion of the run.
|
|
||||||
Completion {},
|
|
||||||
|
|
||||||
/* Benchmarks Events */
|
|
||||||
/// An event emitted with information on a transaction that was submitted for a certain step
|
|
||||||
/// of the execution.
|
|
||||||
StepTransactionInformation {
|
|
||||||
/// A specifier for the execution that's taking place.
|
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
|
||||||
/// The path of the step that this transaction belongs to.
|
|
||||||
step_path: StepPath,
|
|
||||||
/// Information about the transaction
|
|
||||||
transaction_information: TransactionInformation
|
|
||||||
},
|
|
||||||
ContractInformation {
|
|
||||||
/// A specifier for the execution that's taking place.
|
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
|
||||||
/// The path of the solidity source code that contains the contract.
|
|
||||||
source_code_path: PathBuf,
|
|
||||||
/// The name of the contract
|
|
||||||
contract_name: String,
|
|
||||||
/// The size of the contract
|
|
||||||
contract_size: usize
|
|
||||||
},
|
|
||||||
BlockMined {
|
|
||||||
/// A specifier for the execution that's taking place.
|
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
|
||||||
/// Information on the mined block,
|
|
||||||
mined_block_information: MinedBlockInformation
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,13 +2,12 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
str::FromStr,
|
|
||||||
sync::{LazyLock, Mutex},
|
sync::{LazyLock, Mutex},
|
||||||
};
|
};
|
||||||
|
|
||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
|
|
||||||
use semver::{Version, VersionReq};
|
use semver::Version;
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
use crate::list::List;
|
use crate::list::List;
|
||||||
@@ -66,9 +65,6 @@ impl SolcDownloader {
|
|||||||
target: &'static str,
|
target: &'static str,
|
||||||
list: &'static str,
|
list: &'static str,
|
||||||
) -> anyhow::Result<Self> {
|
) -> anyhow::Result<Self> {
|
||||||
static MAXIMUM_COMPILER_VERSION_REQUIREMENT: LazyLock<VersionReq> =
|
|
||||||
LazyLock::new(|| VersionReq::from_str("<=0.8.30").unwrap());
|
|
||||||
|
|
||||||
let version_or_requirement = version.into();
|
let version_or_requirement = version.into();
|
||||||
match version_or_requirement {
|
match version_or_requirement {
|
||||||
VersionOrRequirement::Version(version) => Ok(Self {
|
VersionOrRequirement::Version(version) => Ok(Self {
|
||||||
@@ -83,10 +79,7 @@ impl SolcDownloader {
|
|||||||
.builds
|
.builds
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|build| build.version)
|
.map(|build| build.version)
|
||||||
.filter(|version| {
|
.filter(|version| requirement.matches(version))
|
||||||
MAXIMUM_COMPILER_VERSION_REQUIREMENT.matches(version)
|
|
||||||
&& requirement.matches(version)
|
|
||||||
})
|
|
||||||
.max()
|
.max()
|
||||||
else {
|
else {
|
||||||
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
|
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
|
||||||
|
|||||||
Submodule
+1
Submodule polkadot-sdk added at dc3d0e5ab7
Submodule resolc-compiler-tests deleted from 55da34c4f6
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Revive Differential Tests - Quick Start Script
|
# Revive Differential Tests - Quick Start Script
|
||||||
# This script clones the test repository, and runs the tool
|
# This script clones the test repository, sets up the corpus file, and runs the tool
|
||||||
|
|
||||||
set -e # Exit on any error
|
set -e # Exit on any error
|
||||||
|
|
||||||
@@ -14,6 +14,7 @@ NC='\033[0m' # No Color
|
|||||||
# Configuration
|
# Configuration
|
||||||
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
|
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
|
||||||
TEST_REPO_DIR="resolc-compiler-tests"
|
TEST_REPO_DIR="resolc-compiler-tests"
|
||||||
|
CORPUS_FILE="./corpus.json"
|
||||||
WORKDIR="workdir"
|
WORKDIR="workdir"
|
||||||
|
|
||||||
# Optional positional argument: path to polkadot-sdk directory
|
# Optional positional argument: path to polkadot-sdk directory
|
||||||
@@ -22,6 +23,7 @@ POLKADOT_SDK_DIR="${1:-}"
|
|||||||
# Binary paths (default to names in $PATH)
|
# Binary paths (default to names in $PATH)
|
||||||
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
||||||
ETH_RPC_BIN="eth-rpc"
|
ETH_RPC_BIN="eth-rpc"
|
||||||
|
SUBSTRATE_NODE_BIN="substrate-node"
|
||||||
|
|
||||||
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
@@ -49,13 +51,14 @@ if [ -n "$POLKADOT_SDK_DIR" ]; then
|
|||||||
|
|
||||||
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
||||||
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
||||||
|
SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node"
|
||||||
|
|
||||||
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ]; then
|
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then
|
||||||
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
||||||
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN"; do
|
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do
|
||||||
if [ ! -x "$bin" ]; then
|
if [ ! -x "$bin" ]; then
|
||||||
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -65,6 +68,19 @@ else
|
|||||||
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
|
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Create corpus file with absolute path resolved at runtime
|
||||||
|
echo -e "${GREEN}Creating corpus file...${NC}"
|
||||||
|
ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
|
||||||
|
|
||||||
|
cat > "$CORPUS_FILE" << EOF
|
||||||
|
{
|
||||||
|
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||||
|
"path": "$ABSOLUTE_PATH"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}"
|
||||||
|
|
||||||
# Create workdir if it doesn't exist
|
# Create workdir if it doesn't exist
|
||||||
mkdir -p "$WORKDIR"
|
mkdir -p "$WORKDIR"
|
||||||
|
|
||||||
@@ -73,15 +89,13 @@ echo "This may take a while..."
|
|||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
# Run the tool
|
# Run the tool
|
||||||
cargo build --release;
|
RUST_LOG="info" cargo run --release -- execute-tests \
|
||||||
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
|
--platform geth-evm-solc \
|
||||||
--platform revive-dev-node-polkavm-resolc \
|
--platform revive-dev-node-polkavm-resolc \
|
||||||
--test $(realpath "$TEST_REPO_DIR/fixtures/solidity") \
|
--corpus "$CORPUS_FILE" \
|
||||||
--working-directory "$WORKDIR" \
|
--working-directory "$WORKDIR" \
|
||||||
--concurrency.number-of-nodes 10 \
|
--concurrency.number-of-nodes 5 \
|
||||||
--concurrency.number-of-threads 5 \
|
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
|
||||||
--concurrency.number-of-concurrent-tasks 500 \
|
|
||||||
--wallet.additional-keys 100000 \
|
|
||||||
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
||||||
--eth-rpc.path "$ETH_RPC_BIN" \
|
--eth-rpc.path "$ETH_RPC_BIN" \
|
||||||
> logs.log \
|
> logs.log \
|
||||||
@@ -1,315 +0,0 @@
|
|||||||
"""
|
|
||||||
Utilities to print benchmark metrics from a report JSON into CSV.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
python scripts/print_benchmark_metrics_csv.py /absolute/path/to/report.json
|
|
||||||
|
|
||||||
The script prints, for each metadata path, case index, and mode combination,
|
|
||||||
CSV rows aligned to mined blocks with the following columns:
|
|
||||||
- block_number
|
|
||||||
- number_of_txs
|
|
||||||
- tps (transaction_per_second)
|
|
||||||
- gps (gas_per_second)
|
|
||||||
- gas_block_fullness
|
|
||||||
- ref_time (if available)
|
|
||||||
- max_ref_time (if available)
|
|
||||||
- proof_size (if available)
|
|
||||||
- max_proof_size (if available)
|
|
||||||
- ref_time_block_fullness (if available)
|
|
||||||
- proof_size_block_fullness (if available)
|
|
||||||
|
|
||||||
Important nuance: TPS and GPS arrays have (number_of_blocks - 1) items. The
|
|
||||||
first block row has no TPS/GPS; the CSV leaves those cells empty for the first
|
|
||||||
row and aligns subsequent values to their corresponding next block.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import csv
|
|
||||||
from typing import List, Mapping, TypedDict, no_type_check
|
|
||||||
|
|
||||||
|
|
||||||
class EthereumMinedBlockInformation(TypedDict):
|
|
||||||
"""EVM block information extracted from the report.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
block_number: The block height.
|
|
||||||
block_timestamp: The UNIX timestamp of the block.
|
|
||||||
mined_gas: Total gas used (mined) in the block.
|
|
||||||
block_gas_limit: The gas limit of the block.
|
|
||||||
transaction_hashes: List of transaction hashes included in the block.
|
|
||||||
"""
|
|
||||||
|
|
||||||
block_number: int
|
|
||||||
block_timestamp: int
|
|
||||||
mined_gas: int
|
|
||||||
block_gas_limit: int
|
|
||||||
transaction_hashes: List[str]
|
|
||||||
|
|
||||||
|
|
||||||
class SubstrateMinedBlockInformation(TypedDict):
|
|
||||||
"""Substrate-specific block resource usage fields.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
ref_time: The consumed ref time in the block.
|
|
||||||
max_ref_time: The maximum ref time allowed for the block.
|
|
||||||
proof_size: The consumed proof size in the block.
|
|
||||||
max_proof_size: The maximum proof size allowed for the block.
|
|
||||||
"""
|
|
||||||
|
|
||||||
ref_time: int
|
|
||||||
max_ref_time: int
|
|
||||||
proof_size: int
|
|
||||||
max_proof_size: int
|
|
||||||
|
|
||||||
|
|
||||||
class MinedBlockInformation(TypedDict):
|
|
||||||
"""Block-level information for a mined block with both EVM and optional Substrate fields."""
|
|
||||||
|
|
||||||
ethereum_block_information: EthereumMinedBlockInformation
|
|
||||||
substrate_block_information: SubstrateMinedBlockInformation | None
|
|
||||||
|
|
||||||
|
|
||||||
def substrate_block_information_ref_time(
|
|
||||||
block: SubstrateMinedBlockInformation | None,
|
|
||||||
) -> int | None:
|
|
||||||
if block is None:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return block["ref_time"]
|
|
||||||
|
|
||||||
|
|
||||||
def substrate_block_information_max_ref_time(
|
|
||||||
block: SubstrateMinedBlockInformation | None,
|
|
||||||
) -> int | None:
|
|
||||||
if block is None:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return block["max_ref_time"]
|
|
||||||
|
|
||||||
|
|
||||||
def substrate_block_information_proof_size(
|
|
||||||
block: SubstrateMinedBlockInformation | None,
|
|
||||||
) -> int | None:
|
|
||||||
if block is None:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return block["proof_size"]
|
|
||||||
|
|
||||||
|
|
||||||
def substrate_block_information_max_proof_size(
|
|
||||||
block: SubstrateMinedBlockInformation | None,
|
|
||||||
) -> int | None:
|
|
||||||
if block is None:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return block["max_proof_size"]
|
|
||||||
|
|
||||||
|
|
||||||
class Metric(TypedDict):
|
|
||||||
"""Metric data of integer values keyed by platform identifier.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
minimum: Single scalar minimum per platform.
|
|
||||||
maximum: Single scalar maximum per platform.
|
|
||||||
mean: Single scalar mean per platform.
|
|
||||||
median: Single scalar median per platform.
|
|
||||||
raw: Time-series (or list) of values per platform.
|
|
||||||
"""
|
|
||||||
|
|
||||||
minimum: Mapping[str, int]
|
|
||||||
maximum: Mapping[str, int]
|
|
||||||
mean: Mapping[str, int]
|
|
||||||
median: Mapping[str, int]
|
|
||||||
raw: Mapping[str, List[int]]
|
|
||||||
|
|
||||||
|
|
||||||
class Metrics(TypedDict):
|
|
||||||
"""All metrics that may be present for a given execution report.
|
|
||||||
|
|
||||||
Note that some metrics are optional and present only for specific platforms
|
|
||||||
or execution modes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
transaction_per_second: Metric
|
|
||||||
gas_per_second: Metric
|
|
||||||
gas_block_fullness: Metric
|
|
||||||
ref_time_block_fullness: Metric | None
|
|
||||||
proof_size_block_fullness: Metric | None
|
|
||||||
|
|
||||||
|
|
||||||
@no_type_check
|
|
||||||
def metrics_raw_item(
|
|
||||||
metrics: Metrics, name: str, target: str, index: int
|
|
||||||
) -> int | None:
|
|
||||||
l: list[int] = metrics.get(name, dict()).get("raw", dict()).get(target, dict())
|
|
||||||
try:
|
|
||||||
return l[index]
|
|
||||||
except:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class ExecutionReport(TypedDict):
|
|
||||||
"""Execution report for a mode containing mined blocks and metrics.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
mined_block_information: Mapping from platform identifier to the list of
|
|
||||||
mined blocks observed for that platform.
|
|
||||||
metrics: The computed metrics for the execution.
|
|
||||||
"""
|
|
||||||
|
|
||||||
mined_block_information: Mapping[str, List[MinedBlockInformation]]
|
|
||||||
metrics: Metrics
|
|
||||||
|
|
||||||
|
|
||||||
class CaseReport(TypedDict):
|
|
||||||
"""Report for a single case, keyed by mode string."""
|
|
||||||
|
|
||||||
mode_execution_reports: Mapping[str, ExecutionReport]
|
|
||||||
|
|
||||||
|
|
||||||
class MetadataFileReport(TypedDict):
|
|
||||||
"""Report subtree keyed by case indices for a metadata file path."""
|
|
||||||
|
|
||||||
case_reports: Mapping[str, CaseReport]
|
|
||||||
|
|
||||||
|
|
||||||
class ReportRoot(TypedDict):
|
|
||||||
"""Top-level report schema with execution information keyed by metadata path."""
|
|
||||||
|
|
||||||
execution_information: Mapping[str, MetadataFileReport]
|
|
||||||
|
|
||||||
|
|
||||||
BlockInformation = TypedDict(
|
|
||||||
"BlockInformation",
|
|
||||||
{
|
|
||||||
"Block Number": int,
|
|
||||||
"Timestamp": int,
|
|
||||||
"Datetime": None,
|
|
||||||
"Transaction Count": int,
|
|
||||||
"TPS": int | None,
|
|
||||||
"GPS": int | None,
|
|
||||||
"Gas Mined": int,
|
|
||||||
"Block Gas Limit": int,
|
|
||||||
"Block Fullness Gas": float,
|
|
||||||
"Ref Time": int | None,
|
|
||||||
"Max Ref Time": int | None,
|
|
||||||
"Block Fullness Ref Time": int | None,
|
|
||||||
"Proof Size": int | None,
|
|
||||||
"Max Proof Size": int | None,
|
|
||||||
"Block Fullness Proof Size": int | None,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
"""A typed dictionary used to hold all of the block information"""
|
|
||||||
|
|
||||||
|
|
||||||
def load_report(path: str) -> ReportRoot:
|
|
||||||
"""Load the report JSON from disk.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
path: Absolute or relative filesystem path to the JSON report file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The parsed report as a typed dictionary structure.
|
|
||||||
"""
|
|
||||||
|
|
||||||
with open(path, "r", encoding="utf-8") as f:
|
|
||||||
data: ReportRoot = json.load(f)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
report_path: str = sys.argv[1]
|
|
||||||
report: ReportRoot = load_report(report_path)
|
|
||||||
|
|
||||||
# TODO: Remove this in the future, but for now, the target is fixed.
|
|
||||||
target: str = sys.argv[2]
|
|
||||||
|
|
||||||
csv_writer = csv.writer(sys.stdout)
|
|
||||||
|
|
||||||
for _, metadata_file_report in report["execution_information"].items():
|
|
||||||
for _, case_report in metadata_file_report["case_reports"].items():
|
|
||||||
for _, execution_report in case_report["mode_execution_reports"].items():
|
|
||||||
blocks_information: list[MinedBlockInformation] = execution_report[
|
|
||||||
"mined_block_information"
|
|
||||||
][target]
|
|
||||||
|
|
||||||
resolved_blocks: list[BlockInformation] = []
|
|
||||||
for i, block_information in enumerate(blocks_information):
|
|
||||||
mined_gas: int = block_information["ethereum_block_information"][
|
|
||||||
"mined_gas"
|
|
||||||
]
|
|
||||||
block_gas_limit: int = block_information[
|
|
||||||
"ethereum_block_information"
|
|
||||||
]["block_gas_limit"]
|
|
||||||
resolved_blocks.append(
|
|
||||||
{
|
|
||||||
"Block Number": block_information[
|
|
||||||
"ethereum_block_information"
|
|
||||||
]["block_number"],
|
|
||||||
"Timestamp": block_information[
|
|
||||||
"ethereum_block_information"
|
|
||||||
]["block_timestamp"],
|
|
||||||
"Datetime": None,
|
|
||||||
"Transaction Count": len(
|
|
||||||
block_information["ethereum_block_information"][
|
|
||||||
"transaction_hashes"
|
|
||||||
]
|
|
||||||
),
|
|
||||||
"TPS": (
|
|
||||||
None
|
|
||||||
if i == 0
|
|
||||||
else execution_report["metrics"][
|
|
||||||
"transaction_per_second"
|
|
||||||
]["raw"][target][i - 1]
|
|
||||||
),
|
|
||||||
"GPS": (
|
|
||||||
None
|
|
||||||
if i == 0
|
|
||||||
else execution_report["metrics"]["gas_per_second"][
|
|
||||||
"raw"
|
|
||||||
][target][i - 1]
|
|
||||||
),
|
|
||||||
"Gas Mined": block_information[
|
|
||||||
"ethereum_block_information"
|
|
||||||
]["mined_gas"],
|
|
||||||
"Block Gas Limit": block_information[
|
|
||||||
"ethereum_block_information"
|
|
||||||
]["block_gas_limit"],
|
|
||||||
"Block Fullness Gas": mined_gas / block_gas_limit,
|
|
||||||
"Ref Time": substrate_block_information_ref_time(
|
|
||||||
block_information["substrate_block_information"]
|
|
||||||
),
|
|
||||||
"Max Ref Time": substrate_block_information_max_ref_time(
|
|
||||||
block_information["substrate_block_information"]
|
|
||||||
),
|
|
||||||
"Block Fullness Ref Time": metrics_raw_item(
|
|
||||||
execution_report["metrics"],
|
|
||||||
"ref_time_block_fullness",
|
|
||||||
target,
|
|
||||||
i,
|
|
||||||
),
|
|
||||||
"Proof Size": substrate_block_information_proof_size(
|
|
||||||
block_information["substrate_block_information"]
|
|
||||||
),
|
|
||||||
"Max Proof Size": substrate_block_information_max_proof_size(
|
|
||||||
block_information["substrate_block_information"]
|
|
||||||
),
|
|
||||||
"Block Fullness Proof Size": metrics_raw_item(
|
|
||||||
execution_report["metrics"],
|
|
||||||
"proof_size_block_fullness",
|
|
||||||
target,
|
|
||||||
i,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
csv_writer = csv.DictWriter(sys.stdout, resolved_blocks[0].keys())
|
|
||||||
csv_writer.writeheader()
|
|
||||||
csv_writer.writerows(resolved_blocks)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,259 +0,0 @@
|
|||||||
"""
|
|
||||||
This script is used to turn the JSON report produced by the revive differential tests tool into an
|
|
||||||
easy to consume markdown document for the purpose of reporting this information in the Polkadot SDK
|
|
||||||
CI. The full models used in the JSON report can be found in the revive differential tests repo and
|
|
||||||
the models used in this script are just a partial reproduction of the full report models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json, typing, io, sys
|
|
||||||
|
|
||||||
|
|
||||||
class Report(typing.TypedDict):
|
|
||||||
context: "Context"
|
|
||||||
execution_information: dict["MetadataFilePathString", "MetadataFileReport"]
|
|
||||||
|
|
||||||
|
|
||||||
class MetadataFileReport(typing.TypedDict):
|
|
||||||
case_reports: dict["CaseIdxString", "CaseReport"]
|
|
||||||
|
|
||||||
|
|
||||||
class CaseReport(typing.TypedDict):
|
|
||||||
mode_execution_reports: dict["ModeString", "ExecutionReport"]
|
|
||||||
|
|
||||||
|
|
||||||
class ExecutionReport(typing.TypedDict):
|
|
||||||
status: "TestCaseStatus"
|
|
||||||
|
|
||||||
|
|
||||||
class Context(typing.TypedDict):
|
|
||||||
Test: "TestContext"
|
|
||||||
|
|
||||||
|
|
||||||
class TestContext(typing.TypedDict):
|
|
||||||
corpus_configuration: "CorpusConfiguration"
|
|
||||||
|
|
||||||
|
|
||||||
class CorpusConfiguration(typing.TypedDict):
|
|
||||||
test_specifiers: list["TestSpecifier"]
|
|
||||||
|
|
||||||
|
|
||||||
class CaseStatusSuccess(typing.TypedDict):
|
|
||||||
status: typing.Literal["Succeeded"]
|
|
||||||
steps_executed: int
|
|
||||||
|
|
||||||
|
|
||||||
class CaseStatusFailure(typing.TypedDict):
|
|
||||||
status: typing.Literal["Failed"]
|
|
||||||
reason: str
|
|
||||||
|
|
||||||
|
|
||||||
class CaseStatusIgnored(typing.TypedDict):
|
|
||||||
status: typing.Literal["Ignored"]
|
|
||||||
reason: str
|
|
||||||
|
|
||||||
|
|
||||||
TestCaseStatus = typing.Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
|
|
||||||
"""A union type of all of the possible statuses that could be reported for a case."""
|
|
||||||
|
|
||||||
TestSpecifier = str
|
|
||||||
"""A test specifier string. For example resolc-compiler-tests/fixtures/solidity/test.json::0::Y+"""
|
|
||||||
|
|
||||||
ModeString = str
|
|
||||||
"""The mode string. For example Y+ >=0.8.13"""
|
|
||||||
|
|
||||||
MetadataFilePathString = str
|
|
||||||
"""The path to a metadata file. For example resolc-compiler-tests/fixtures/solidity/test.json"""
|
|
||||||
|
|
||||||
CaseIdxString = str
|
|
||||||
"""The index of a case as a string. For example '0'"""
|
|
||||||
|
|
||||||
PlatformString = typing.Union[
|
|
||||||
typing.Literal["revive-dev-node-revm-solc"],
|
|
||||||
typing.Literal["revive-dev-node-polkavm-resolc"],
|
|
||||||
]
|
|
||||||
"""A string of the platform on which the test was run"""
|
|
||||||
|
|
||||||
|
|
||||||
def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
|
|
||||||
"""
|
|
||||||
Given a path, this function returns the path relative to the resolc-compiler-test directory. The
|
|
||||||
following is an example of an input and an output:
|
|
||||||
|
|
||||||
Input: ~/polkadot-sdk/revive-differential-tests/resolc-compiler-tests/fixtures/solidity/test.json
|
|
||||||
Output: test.json
|
|
||||||
"""
|
|
||||||
|
|
||||||
return f"{path.split('resolc-compiler-tests/fixtures/solidity')[-1].strip('/')}"
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
with open(sys.argv[1], "r") as file:
|
|
||||||
report: Report = json.load(file)
|
|
||||||
|
|
||||||
# Getting the platform string and resolving it into a simpler version of
|
|
||||||
# itself.
|
|
||||||
platform_identifier: PlatformString = typing.cast(PlatformString, sys.argv[2])
|
|
||||||
if platform_identifier == "revive-dev-node-polkavm-resolc":
|
|
||||||
platform: str = "PolkaVM"
|
|
||||||
elif platform_identifier == "revive-dev-node-revm-solc":
|
|
||||||
platform: str = "REVM"
|
|
||||||
else:
|
|
||||||
platform: str = platform_identifier
|
|
||||||
|
|
||||||
# Starting the markdown document and adding information to it as we go.
|
|
||||||
markdown_document: io.TextIOWrapper = open("report.md", "w")
|
|
||||||
print(f"# Differential Tests Results ({platform})", file=markdown_document)
|
|
||||||
|
|
||||||
# Getting all of the test specifiers from the report and making them relative to the tests dir.
|
|
||||||
test_specifiers: list[str] = list(
|
|
||||||
map(
|
|
||||||
path_relative_to_resolc_compiler_test_directory,
|
|
||||||
report["context"]["Test"]["corpus_configuration"]["test_specifiers"],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print("## Specified Tests", file=markdown_document)
|
|
||||||
for test_specifier in test_specifiers:
|
|
||||||
print(f"* ``{test_specifier}``", file=markdown_document)
|
|
||||||
|
|
||||||
# Counting the total number of test cases, successes, failures, and ignored tests
|
|
||||||
total_number_of_cases: int = 0
|
|
||||||
total_number_of_successes: int = 0
|
|
||||||
total_number_of_failures: int = 0
|
|
||||||
total_number_of_ignores: int = 0
|
|
||||||
for _, mode_to_case_mapping in report["execution_information"].items():
|
|
||||||
for _, case_idx_to_report_mapping in mode_to_case_mapping[
|
|
||||||
"case_reports"
|
|
||||||
].items():
|
|
||||||
for _, execution_report in case_idx_to_report_mapping[
|
|
||||||
"mode_execution_reports"
|
|
||||||
].items():
|
|
||||||
status: TestCaseStatus = execution_report["status"]
|
|
||||||
|
|
||||||
total_number_of_cases += 1
|
|
||||||
if status["status"] == "Succeeded":
|
|
||||||
total_number_of_successes += 1
|
|
||||||
elif status["status"] == "Failed":
|
|
||||||
total_number_of_failures += 1
|
|
||||||
elif status["status"] == "Ignored":
|
|
||||||
total_number_of_ignores += 1
|
|
||||||
else:
|
|
||||||
raise Exception(
|
|
||||||
f"Encountered a status that's unknown to the script: {status}"
|
|
||||||
)
|
|
||||||
|
|
||||||
print("## Counts", file=markdown_document)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Test Cases:** {total_number_of_cases}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Successes:** {total_number_of_successes}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Failures:** {total_number_of_failures}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Ignores:** {total_number_of_ignores}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Grouping the various test cases into dictionaries and groups depending on their status to make
|
|
||||||
# them easier to include in the markdown document later on.
|
|
||||||
successful_cases: dict[
|
|
||||||
MetadataFilePathString, dict[CaseIdxString, set[ModeString]]
|
|
||||||
] = {}
|
|
||||||
for metadata_file_path, mode_to_case_mapping in report[
|
|
||||||
"execution_information"
|
|
||||||
].items():
|
|
||||||
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
|
|
||||||
"case_reports"
|
|
||||||
].items():
|
|
||||||
for mode_string, execution_report in case_idx_to_report_mapping[
|
|
||||||
"mode_execution_reports"
|
|
||||||
].items():
|
|
||||||
status: TestCaseStatus = execution_report["status"]
|
|
||||||
metadata_file_path: str = (
|
|
||||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
|
||||||
)
|
|
||||||
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
|
||||||
|
|
||||||
if status["status"] == "Succeeded":
|
|
||||||
successful_cases.setdefault(
|
|
||||||
metadata_file_path,
|
|
||||||
{},
|
|
||||||
).setdefault(
|
|
||||||
case_idx_string, set()
|
|
||||||
).add(mode_string)
|
|
||||||
|
|
||||||
print("## Failures", file=markdown_document)
|
|
||||||
print(
|
|
||||||
"The test specifiers seen in this section have the format 'path::case_idx::compilation_mode'\
|
|
||||||
and they're compatible with the revive differential tests framework and can be specified\
|
|
||||||
to it directly in the same way that they're provided through the `--test` argument of the\
|
|
||||||
framework.\n",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"The failures are provided in an expandable section to ensure that the PR does not get \
|
|
||||||
polluted with information. Please click on the section below for more information",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"<details><summary>Detailed Differential Tests Failure Information</summary>\n\n",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print("| Test Specifier | Failure Reason | Note |", file=markdown_document)
|
|
||||||
print("| -- | -- | -- |", file=markdown_document)
|
|
||||||
|
|
||||||
for metadata_file_path, mode_to_case_mapping in report[
|
|
||||||
"execution_information"
|
|
||||||
].items():
|
|
||||||
for case_idx_string, case_idx_to_report_mapping in mode_to_case_mapping[
|
|
||||||
"case_reports"
|
|
||||||
].items():
|
|
||||||
for mode_string, execution_report in case_idx_to_report_mapping[
|
|
||||||
"mode_execution_reports"
|
|
||||||
].items():
|
|
||||||
status: TestCaseStatus = execution_report["status"]
|
|
||||||
metadata_file_path: str = (
|
|
||||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
|
||||||
)
|
|
||||||
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
|
||||||
|
|
||||||
if status["status"] != "Failed":
|
|
||||||
continue
|
|
||||||
|
|
||||||
failure_reason: str = (
|
|
||||||
status["reason"].replace("\n", " ").replace("|", " ")
|
|
||||||
)
|
|
||||||
|
|
||||||
note: str = ""
|
|
||||||
modes_where_this_case_succeeded: set[ModeString] = (
|
|
||||||
successful_cases.setdefault(
|
|
||||||
metadata_file_path,
|
|
||||||
{},
|
|
||||||
).setdefault(case_idx_string, set())
|
|
||||||
)
|
|
||||||
if len(modes_where_this_case_succeeded) != 0:
|
|
||||||
note: str = (
|
|
||||||
f"This test case succeeded with other compilation modes: {modes_where_this_case_succeeded}"
|
|
||||||
)
|
|
||||||
|
|
||||||
test_specifier: str = (
|
|
||||||
f"{metadata_file_path}::{case_idx_string}::{mode_string}"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"| ``{test_specifier}`` | ``{failure_reason}`` | {note} |",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print("\n\n</details>", file=markdown_document)
|
|
||||||
|
|
||||||
# The primary downside of not using `with`, but I guess it's better since I don't want to over
|
|
||||||
# indent the code.
|
|
||||||
markdown_document.close()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
Reference in New Issue
Block a user