mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 20:47:58 +00:00
Compare commits
106 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cec992f80a | |||
| b53550e43e | |||
| 9491263857 | |||
| b41c0e61c0 | |||
| 9d1c71756f | |||
| 8b0a0c3518 | |||
| 94b04c0189 | |||
| 2d3602aaed | |||
| d38e6d419d | |||
| 62478ee2f9 | |||
| dda369c8b5 | |||
| 08c1572870 | |||
| cd6b7969ac | |||
| 78ac7ee381 | |||
| 3edaebdcae | |||
| 66feb36b4e | |||
| cc753a1a2c | |||
| 31dfd67569 | |||
| a6e4932a08 | |||
| 06c2e023a9 | |||
| 347dcb4488 | |||
| f9a63a5641 | |||
| fb009f65c1 | |||
| dff4c25e24 | |||
| e433d93cbf | |||
| 408754e8fb | |||
| 59bfffe5fe | |||
| 380ea693be | |||
| d02152b565 | |||
| 75159229df | |||
| 2af1a62319 | |||
| e09be4f3fa | |||
| 33b5faca45 | |||
| 172fb4700f | |||
| fefea17c8e | |||
| b71445b632 | |||
| f1a911545e | |||
| 48e7d69158 | |||
| 260ac5d98e | |||
| 94f116f843 | |||
| 0d7a87a728 | |||
| 29bf5304ec | |||
| 491c23efb3 | |||
| 3c86cbb7ef | |||
| fde07b7c0d | |||
| ebc24a588b | |||
| 21e25f09e6 | |||
| 8c412dc924 | |||
| 6da3172581 | |||
| c6eb04b04e | |||
| e5114d31dc | |||
| 74fdeb4a2e | |||
| f9dc362c03 | |||
| c2ba2cfed6 | |||
| 3dda739cef | |||
| 97e3f8bbff | |||
| 7189361a58 | |||
| 9b700bfec2 | |||
| 98b62d705f | |||
| 1a894f791a | |||
| c2526e48e7 | |||
| 7878f68c26 | |||
| 8b1afc36a3 | |||
| 60328cd493 | |||
| eb264fcc7b | |||
| 84b139d3b4 | |||
| d93824d973 | |||
| bec5a7e390 | |||
| 85033cfead | |||
| 76d6a154c1 | |||
| c58551803d | |||
| 185edcfad9 | |||
| 09d56f5177 | |||
| a59e287fa1 | |||
| f2045db0e9 | |||
| 5a11f44673 | |||
| 46aea0890d | |||
| 9b40c9b9e3 | |||
| f67a9bf643 | |||
| 67d767ffde | |||
| f7fbe094ec | |||
| 90b2dd4cfe | |||
| 64d63ef999 | |||
| 757bfbe116 | |||
| 8619e7feb0 | |||
| edba49b301 | |||
| 9980926d40 | |||
| ff993d44a5 | |||
| 8cbb1a9f77 | |||
| 56c2fe8c0c | |||
| 330a773a1c | |||
| f51693cb9f | |||
| 4db7009640 | |||
| 5a36e242ec | |||
| 33329632b5 | |||
| 429f2e92a2 | |||
| 65f41f2038 | |||
| 3ed8a1ca1c | |||
| 2923d675cd | |||
| 8f5bcf08ad | |||
| 90fb89adc0 | |||
| b03ad3027e | |||
| 972f3b6d5b | |||
| 6f4aa731ab | |||
| 589a5dc988 | |||
| c6d55515be |
@@ -0,0 +1,141 @@
|
|||||||
|
name: "Run Revive Differential Tests"
|
||||||
|
description: "Builds and runs revive-differential-tests (retester) from this repo against the caller's Polkadot SDK."
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
# Setup arguments & environment
|
||||||
|
polkadot-sdk-path:
|
||||||
|
description: "The path of the polkadot-sdk that should be compiled for the tests to run against."
|
||||||
|
required: false
|
||||||
|
default: "."
|
||||||
|
type: string
|
||||||
|
cargo-command:
|
||||||
|
description: "The cargo command to use in compilations and running of tests (e.g., forklift cargo)."
|
||||||
|
required: false
|
||||||
|
default: "cargo"
|
||||||
|
type: string
|
||||||
|
revive-differential-tests-ref:
|
||||||
|
description: "The branch, tag or SHA to checkout for the revive-differential-tests."
|
||||||
|
required: false
|
||||||
|
default: "main"
|
||||||
|
type: string
|
||||||
|
resolc-version:
|
||||||
|
description: "The version of resolc to install and use in tests."
|
||||||
|
required: false
|
||||||
|
default: "0.5.0"
|
||||||
|
type: string
|
||||||
|
use-compilation-caches:
|
||||||
|
description: "Controls if the compilation caches will be used for the test run or not."
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
# Test Execution Arguments
|
||||||
|
platform:
|
||||||
|
description: "The identifier of the platform to run the tests on (e.g., geth-evm-solc, revive-dev-node-revm-solc)"
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
polkadot-omnichain-node-chain-spec-path:
|
||||||
|
description: "The path of the chain-spec of the chain we're spawning'. This is only required if the polkadot-omni-node is one of the selected platforms."
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
polkadot-omnichain-node-parachain-id:
|
||||||
|
description: "The id of the parachain to spawn with the polkadot-omni-node. This is only required if the polkadot-omni-node is one of the selected platforms."
|
||||||
|
type: number
|
||||||
|
required: false
|
||||||
|
expectations-file-path:
|
||||||
|
description: "Path to the expectations file to use to compare against."
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Checkout the Differential Tests Repository
|
||||||
|
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
with:
|
||||||
|
repository: paritytech/revive-differential-tests
|
||||||
|
ref: ${{ inputs['revive-differential-tests-ref'] }}
|
||||||
|
path: revive-differential-tests
|
||||||
|
submodules: recursive
|
||||||
|
- name: Installing the Latest Resolc
|
||||||
|
shell: bash
|
||||||
|
if: ${{ runner.os == 'Linux' && runner.arch == 'X64' }}
|
||||||
|
run: |
|
||||||
|
VERSION="${{ inputs['resolc-version'] }}"
|
||||||
|
ASSET_URL="https://github.com/paritytech/revive/releases/download/v$VERSION/resolc-x86_64-unknown-linux-musl"
|
||||||
|
echo "Downloading resolc v$VERSION from $ASSET_URL"
|
||||||
|
curl -Lsf --show-error -o resolc "$ASSET_URL"
|
||||||
|
chmod +x resolc
|
||||||
|
./resolc --version
|
||||||
|
- name: Installing Retester
|
||||||
|
shell: bash
|
||||||
|
run: ${{ inputs['cargo-command'] }} install --locked --path revive-differential-tests/crates/core
|
||||||
|
- name: Creating a workdir for retester
|
||||||
|
shell: bash
|
||||||
|
run: mkdir workdir
|
||||||
|
- name: Downloading & Initializing the compilation caches
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs['use-compilation-caches'] == true }}
|
||||||
|
run: |
|
||||||
|
curl -fL --retry 3 --retry-all-errors --connect-timeout 10 -o cache.tar.gz "https://github.com/paritytech/revive-differential-tests/releases/download/compilation-caches-v1.1/cache.tar.gz"
|
||||||
|
tar -zxf cache.tar.gz -C ./workdir > /dev/null 2>&1
|
||||||
|
- name: Building the dependencies from the Polkadot SDK
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
${{ inputs['cargo-command'] }} build --locked --profile release -p pallet-revive-eth-rpc -p revive-dev-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
|
||||||
|
${{ inputs['cargo-command'] }} build --locked --profile release --bin polkadot-omni-node --manifest-path ${{ inputs['polkadot-sdk-path'] }}/Cargo.toml
|
||||||
|
- name: Installing retester
|
||||||
|
shell: bash
|
||||||
|
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/core
|
||||||
|
- name: Installing report-processor
|
||||||
|
shell: bash
|
||||||
|
run: ${{ inputs['cargo-command'] }} install --path ./revive-differential-tests/crates/report-processor
|
||||||
|
- name: Running the Differential Tests
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
OMNI_ARGS=()
|
||||||
|
if [[ -n "${{ inputs['polkadot-omnichain-node-parachain-id'] }}" ]]; then
|
||||||
|
OMNI_ARGS+=(
|
||||||
|
--polkadot-omni-node.parachain-id
|
||||||
|
"${{ inputs['polkadot-omnichain-node-parachain-id'] }}"
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
if [[ -n "${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}" ]]; then
|
||||||
|
OMNI_ARGS+=(
|
||||||
|
--polkadot-omni-node.chain-spec-path
|
||||||
|
"${{ inputs['polkadot-omnichain-node-chain-spec-path'] }}"
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
retester test \
|
||||||
|
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/simple \
|
||||||
|
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/complex \
|
||||||
|
--test ./revive-differential-tests/resolc-compiler-tests/fixtures/solidity/translated_semantic_tests \
|
||||||
|
--platform ${{ inputs['platform'] }} \
|
||||||
|
--report.file-name report.json \
|
||||||
|
--concurrency.number-of-nodes 10 \
|
||||||
|
--concurrency.number-of-threads 10 \
|
||||||
|
--concurrency.number-of-concurrent-tasks 100 \
|
||||||
|
--working-directory ./workdir \
|
||||||
|
--revive-dev-node.consensus manual-seal-200 \
|
||||||
|
--revive-dev-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/revive-dev-node \
|
||||||
|
--eth-rpc.path ${{ inputs['polkadot-sdk-path'] }}/target/release/eth-rpc \
|
||||||
|
--polkadot-omni-node.path ${{ inputs['polkadot-sdk-path'] }}/target/release/polkadot-omni-node \
|
||||||
|
--resolc.path ./resolc \
|
||||||
|
"${OMNI_ARGS[@]}" || true
|
||||||
|
- name: Generate the expectation file
|
||||||
|
shell: bash
|
||||||
|
run: report-processor generate-expectations-file --report-path ./workdir/report.json --output-path ./workdir/expectations.json --remove-prefix ./revive-differential-tests/resolc-compiler-tests
|
||||||
|
- name: Upload the Report to the CI
|
||||||
|
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||||
|
with:
|
||||||
|
name: ${{ inputs['platform'] }}-report.json
|
||||||
|
path: ./workdir/report.json
|
||||||
|
- name: Upload the Report to the CI
|
||||||
|
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||||
|
with:
|
||||||
|
name: ${{ inputs['platform'] }}.json
|
||||||
|
path: ./workdir/expectations.json
|
||||||
|
- name: Check Expectations
|
||||||
|
shell: bash
|
||||||
|
if: ${{ inputs['expectations-file-path'] != '' }}
|
||||||
|
run: report-processor compare-expectation-files --base-expectation-path ${{ inputs['expectations-file-path'] }} --other-expectation-path ./workdir/expectations.json
|
||||||
+161
-80
@@ -15,93 +15,107 @@ concurrency:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
|
POLKADOT_VERSION: polkadot-stable2506-2
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
cache-polkadot:
|
machete:
|
||||||
name: Build and cache Polkadot binaries on ${{ matrix.os }}
|
name: Check for Unneeded Dependencies
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ubuntu-24.04
|
||||||
strategy:
|
env:
|
||||||
matrix:
|
SCCACHE_GHA_ENABLED: "true"
|
||||||
os: [ubuntu-24.04, macos-14]
|
RUSTC_WRAPPER: "sccache"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repo and submodules
|
- name: Checkout This Repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
- name: Run Sccache
|
||||||
- name: Install dependencies (Linux)
|
uses: mozilla-actions/sccache-action@v0.0.9
|
||||||
if: matrix.os == 'ubuntu-24.04'
|
- name: Install the Rust Toolchain
|
||||||
run: |
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
sudo apt-get update
|
- name: Install the Cargo Make Binary
|
||||||
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
uses: davidB/rust-cargo-make@v1
|
||||||
rustup target add wasm32-unknown-unknown
|
- name: Run Cargo Machete
|
||||||
rustup component add rust-src
|
run: cargo make machete
|
||||||
|
check-fmt:
|
||||||
- name: Install dependencies (macOS)
|
name: Check Formatting
|
||||||
if: matrix.os == 'macos-14'
|
runs-on: ubuntu-24.04
|
||||||
run: |
|
env:
|
||||||
brew install protobuf
|
SCCACHE_GHA_ENABLED: "true"
|
||||||
rustup target add wasm32-unknown-unknown
|
RUSTC_WRAPPER: "sccache"
|
||||||
rustup component add rust-src
|
steps:
|
||||||
|
- name: Checkout This Repository
|
||||||
- name: Cache binaries
|
uses: actions/checkout@v4
|
||||||
id: cache
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
with:
|
||||||
path: |
|
submodules: recursive
|
||||||
~/.cargo/bin/substrate-node
|
- name: Run Sccache
|
||||||
~/.cargo/bin/eth-rpc
|
uses: mozilla-actions/sccache-action@v0.0.9
|
||||||
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
- name: Install the Rust Toolchain
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
- name: Build substrate-node
|
- name: Install the Cargo Make Binary
|
||||||
if: steps.cache.outputs.cache-hit != 'true'
|
uses: davidB/rust-cargo-make@v1
|
||||||
run: |
|
- name: Run Cargo Formatter
|
||||||
cd polkadot-sdk
|
run: cargo make fmt-check
|
||||||
cargo install --locked --force --profile=production --path substrate/bin/node/cli --bin substrate-node --features cli
|
check-clippy:
|
||||||
|
name: Check Clippy Lints
|
||||||
- name: Build eth-rpc
|
runs-on: ubuntu-24.04
|
||||||
if: steps.cache.outputs.cache-hit != 'true'
|
env:
|
||||||
run: |
|
SCCACHE_GHA_ENABLED: "true"
|
||||||
cd polkadot-sdk
|
RUSTC_WRAPPER: "sccache"
|
||||||
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
|
steps:
|
||||||
|
- name: Checkout This Repository
|
||||||
ci:
|
uses: actions/checkout@v4
|
||||||
name: CI on ${{ matrix.os }}
|
with:
|
||||||
needs: cache-polkadot
|
submodules: recursive
|
||||||
|
- name: Run Sccache
|
||||||
|
uses: mozilla-actions/sccache-action@v0.0.9
|
||||||
|
- name: Install the Rust Toolchain
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
- name: Install the Cargo Make Binary
|
||||||
|
uses: davidB/rust-cargo-make@v1
|
||||||
|
- name: Run Cargo Clippy
|
||||||
|
run: cargo make clippy
|
||||||
|
test:
|
||||||
|
name: Unit Tests
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
needs: cache-polkadot
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-24.04, macos-14]
|
os: [ubuntu-24.04, macos-14]
|
||||||
|
env:
|
||||||
|
SCCACHE_GHA_ENABLED: "true"
|
||||||
|
RUSTC_WRAPPER: "sccache"
|
||||||
|
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repo
|
- name: Checkout This Repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Restore binaries from cache
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
with:
|
||||||
path: |
|
submodules: recursive
|
||||||
~/.cargo/bin/substrate-node
|
- name: Run Sccache
|
||||||
~/.cargo/bin/eth-rpc
|
uses: mozilla-actions/sccache-action@v0.0.9
|
||||||
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
- name: Install the Rust Toolchain
|
||||||
|
|
||||||
- name: Setup Rust toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
with:
|
with:
|
||||||
rustflags: ""
|
target: "wasm32-unknown-unknown"
|
||||||
|
components: "rust-src,rust-std"
|
||||||
- name: Add wasm32 target
|
- name: Install the Cargo Make Binary
|
||||||
run: |
|
uses: davidB/rust-cargo-make@v1
|
||||||
rustup target add wasm32-unknown-unknown
|
- name: Caching Step
|
||||||
rustup component add rust-src
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/bin/eth-rpc
|
||||||
|
~/.cargo/bin/revive-dev-node
|
||||||
|
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
||||||
- name: Install Geth on Ubuntu
|
- name: Install Geth on Ubuntu
|
||||||
if: matrix.os == 'ubuntu-24.04'
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
run: |
|
run: |
|
||||||
|
sudo add-apt-repository -y ppa:ethereum/ethereum
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y protobuf-compiler
|
sudo apt-get install -y protobuf-compiler
|
||||||
|
|
||||||
|
sudo apt-get install -y solc
|
||||||
|
|
||||||
# We were facing some issues in CI with the 1.16.* versions of geth, and specifically on
|
# We were facing some issues in CI with the 1.16.* versions of geth, and specifically on
|
||||||
# Ubuntu. Eventually, we found out that the last version of geth that worked in our CI was
|
# Ubuntu. Eventually, we found out that the last version of geth that worked in our CI was
|
||||||
# version 1.15.11. Thus, this is the version that we want to use in CI. The PPA sadly does
|
# version 1.15.11. Thus, this is the version that we want to use in CI. The PPA sadly does
|
||||||
@@ -122,26 +136,93 @@ jobs:
|
|||||||
wget -qO- "$URL" | sudo tar xz -C /usr/local/bin --strip-components=1
|
wget -qO- "$URL" | sudo tar xz -C /usr/local/bin --strip-components=1
|
||||||
geth --version
|
geth --version
|
||||||
|
|
||||||
|
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-x86_64-unknown-linux-musl -o resolc
|
||||||
|
chmod +x resolc
|
||||||
|
sudo mv resolc /usr/local/bin
|
||||||
- name: Install Geth on macOS
|
- name: Install Geth on macOS
|
||||||
if: matrix.os == 'macos-14'
|
if: matrix.os == 'macos-14'
|
||||||
run: |
|
run: |
|
||||||
brew tap ethereum/ethereum
|
brew tap ethereum/ethereum
|
||||||
brew install ethereum protobuf
|
brew install ethereum protobuf
|
||||||
|
|
||||||
- name: Machete
|
brew install solidity
|
||||||
uses: bnjbvr/cargo-machete@v0.7.1
|
|
||||||
|
|
||||||
- name: Format
|
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-universal-apple-darwin -o resolc
|
||||||
run: make format
|
chmod +x resolc
|
||||||
|
sudo mv resolc /usr/local/bin
|
||||||
|
- name: Install Kurtosis on macOS
|
||||||
|
if: matrix.os == 'macos-14'
|
||||||
|
run: brew install kurtosis-tech/tap/kurtosis-cli
|
||||||
|
- name: Install Kurtosis on Ubuntu
|
||||||
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
|
run: |
|
||||||
|
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install kurtosis-cli
|
||||||
|
- name: Run Tests
|
||||||
|
run: cargo make test
|
||||||
|
cache-polkadot:
|
||||||
|
name: Build and Cache Polkadot Binaries on ${{ matrix.os }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-24.04, macos-14]
|
||||||
|
env:
|
||||||
|
SCCACHE_GHA_ENABLED: "true"
|
||||||
|
RUSTC_WRAPPER: "sccache"
|
||||||
|
RUSTFLAGS: "-Awarnings"
|
||||||
|
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
||||||
|
steps:
|
||||||
|
- name: Caching Step
|
||||||
|
id: cache-step
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/bin/eth-rpc
|
||||||
|
~/.cargo/bin/revive-dev-node
|
||||||
|
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
||||||
|
- name: Checkout the Polkadot SDK Repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
if: steps.cache-step.outputs.cache-hit != 'true'
|
||||||
|
with:
|
||||||
|
repository: paritytech/polkadot-sdk
|
||||||
|
ref: ${{ env.POLKADOT_SDK_COMMIT_HASH }}
|
||||||
|
submodules: recursive
|
||||||
|
- name: Run Sccache
|
||||||
|
uses: mozilla-actions/sccache-action@v0.0.9
|
||||||
|
if: steps.cache-step.outputs.cache-hit != 'true'
|
||||||
|
- name: Install the Rust Toolchain
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
if: steps.cache-step.outputs.cache-hit != 'true'
|
||||||
|
with:
|
||||||
|
target: "wasm32-unknown-unknown"
|
||||||
|
components: "rust-src"
|
||||||
|
toolchain: "1.90.0"
|
||||||
|
|
||||||
- name: Clippy
|
- name: Install dependencies (Linux)
|
||||||
run: make clippy
|
if: matrix.os == 'ubuntu-24.04' && steps.cache-step.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
- name: Check substrate-node version
|
sudo apt-get update
|
||||||
run: substrate-node --version
|
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
||||||
|
- name: Install dependencies (macOS)
|
||||||
- name: Check eth-rpc version
|
if: matrix.os == 'macos-14' && steps.cache-step.outputs.cache-hit != 'true'
|
||||||
run: eth-rpc --version
|
run: |
|
||||||
|
brew install protobuf llvm
|
||||||
- name: Test cargo workspace
|
LLVM_PREFIX="$(brew --prefix llvm)"
|
||||||
run: make test
|
echo "LDFLAGS=-L${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
||||||
|
echo "CPPFLAGS=-I${LLVM_PREFIX}/include" >> "$GITHUB_ENV"
|
||||||
|
echo "CMAKE_PREFIX_PATH=${LLVM_PREFIX}" >> "$GITHUB_ENV"
|
||||||
|
echo "LIBCLANG_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
||||||
|
echo "DYLD_FALLBACK_LIBRARY_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
||||||
|
echo "${LLVM_PREFIX}/bin" >> "$GITHUB_PATH"
|
||||||
|
- name: Build Polkadot Dependencies
|
||||||
|
if: steps.cache-step.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
cargo build \
|
||||||
|
--locked \
|
||||||
|
--profile production \
|
||||||
|
--package revive-dev-node \
|
||||||
|
--package pallet-revive-eth-rpc;
|
||||||
|
mv ./target/production/revive-dev-node ~/.cargo/bin
|
||||||
|
mv ./target/production/eth-rpc ~/.cargo/bin
|
||||||
|
chmod +x ~/.cargo/bin/*
|
||||||
|
|||||||
@@ -3,7 +3,15 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
node_modules
|
node_modules
|
||||||
/*.json
|
/*.json
|
||||||
|
*.sh
|
||||||
|
|
||||||
# We do not want to commit any log files that we produce from running the code locally so this is
|
# We do not want to commit any log files that we produce from running the code locally so this is
|
||||||
# added to the .gitignore file.
|
# added to the .gitignore file.
|
||||||
*.log
|
*.log
|
||||||
|
|
||||||
|
profile.json.gz
|
||||||
|
workdir*
|
||||||
|
|
||||||
|
!/schema.json
|
||||||
|
!/dev-genesis.json
|
||||||
|
!/scripts/*
|
||||||
|
|||||||
+3
-3
@@ -1,3 +1,3 @@
|
|||||||
[submodule "polkadot-sdk"]
|
[submodule "resolc-compiler-tests"]
|
||||||
path = polkadot-sdk
|
path = resolc-compiler-tests
|
||||||
url = https://github.com/paritytech/polkadot-sdk.git
|
url = https://github.com/paritytech/resolc-compiler-tests
|
||||||
|
|||||||
Generated
+4314
-856
File diff suppressed because it is too large
Load Diff
+40
-28
@@ -8,9 +8,10 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
license = "MIT/Apache-2.0"
|
license = "MIT/Apache-2.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
repository = "https://github.com/paritytech/revive-differential-testing.git"
|
repository = "https://github.com/paritytech/revive-differential-testing.git"
|
||||||
rust-version = "1.85.0"
|
rust-version = "1.87.0"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
revive-dt-common = { version = "0.1.0", path = "crates/common" }
|
||||||
revive-dt-compiler = { version = "0.1.0", path = "crates/compiler" }
|
revive-dt-compiler = { version = "0.1.0", path = "crates/compiler" }
|
||||||
revive-dt-config = { version = "0.1.0", path = "crates/config" }
|
revive-dt-config = { version = "0.1.0", path = "crates/config" }
|
||||||
revive-dt-core = { version = "0.1.0", path = "crates/core" }
|
revive-dt-core = { version = "0.1.0", path = "crates/core" }
|
||||||
@@ -20,62 +21,73 @@ revive-dt-node-interaction = { version = "0.1.0", path = "crates/node-interactio
|
|||||||
revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
|
revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
|
||||||
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
||||||
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
||||||
|
revive-dt-report-processor = { version = "0.1.0", path = "crates/report-processor" }
|
||||||
|
|
||||||
alloy-primitives = "1.2.1"
|
alloy = { version = "1.4.1", features = ["full", "genesis", "json-rpc"] }
|
||||||
alloy-sol-types = "1.2.1"
|
ansi_term = "0.12.1"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
clap = { version = "4", features = ["derive"] }
|
bson = { version = "2.15.0" }
|
||||||
|
cacache = { version = "13.1.0" }
|
||||||
|
clap = { version = "4", features = ["derive", "wrap_help"] }
|
||||||
|
dashmap = { version = "6.1.0" }
|
||||||
|
foundry-compilers-artifacts = { version = "0.18.0" }
|
||||||
futures = { version = "0.3.31" }
|
futures = { version = "0.3.31" }
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
reqwest = { version = "0.12.15", features = ["blocking", "json"] }
|
regex = "1"
|
||||||
|
moka = "0.12.10"
|
||||||
|
paste = "1.0.15"
|
||||||
|
reqwest = { version = "0.12.15", features = ["json"] }
|
||||||
once_cell = "1.21"
|
once_cell = "1.21"
|
||||||
rayon = { version = "1.10" }
|
schemars = { version = "1.0.4", features = ["semver1"] }
|
||||||
semver = { version = "1.0", features = ["serde"] }
|
semver = { version = "1.0", features = ["serde"] }
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive"] }
|
serde = { version = "1.0", default-features = false, features = ["derive"] }
|
||||||
serde_json = { version = "1.0", default-features = false, features = [
|
serde_json = { version = "1.0", default-features = false, features = [
|
||||||
"arbitrary_precision",
|
"arbitrary_precision",
|
||||||
"std",
|
"std",
|
||||||
|
"unbounded_depth",
|
||||||
] }
|
] }
|
||||||
|
serde_with = { version = "3.14.0", features = ["hex"] }
|
||||||
|
serde_yaml_ng = { version = "0.10.0" }
|
||||||
sha2 = { version = "0.10.9" }
|
sha2 = { version = "0.10.9" }
|
||||||
sp-core = "36.1.0"
|
sp-core = "36.1.0"
|
||||||
sp-runtime = "41.1.0"
|
sp-runtime = "41.1.0"
|
||||||
|
strum = { version = "0.27.2", features = ["derive"] }
|
||||||
|
subxt = { version = "0.44.0" }
|
||||||
temp-dir = { version = "0.1.16" }
|
temp-dir = { version = "0.1.16" }
|
||||||
tempfile = "3.3"
|
tempfile = "3.3"
|
||||||
tokio = { version = "1", default-features = false, features = [
|
thiserror = "2"
|
||||||
|
tokio = { version = "1.47.0", default-features = false, features = [
|
||||||
"rt-multi-thread",
|
"rt-multi-thread",
|
||||||
|
"process",
|
||||||
|
"rt",
|
||||||
] }
|
] }
|
||||||
|
tower = { version = "0.5.2", features = ["limit"] }
|
||||||
uuid = { version = "1.8", features = ["v4"] }
|
uuid = { version = "1.8", features = ["v4"] }
|
||||||
tracing = "0.1.41"
|
tracing = { version = "0.1.41" }
|
||||||
|
tracing-appender = { version = "0.2.3" }
|
||||||
tracing-subscriber = { version = "0.3.19", default-features = false, features = [
|
tracing-subscriber = { version = "0.3.19", default-features = false, features = [
|
||||||
"fmt",
|
"fmt",
|
||||||
"json",
|
"json",
|
||||||
"env-filter",
|
"env-filter",
|
||||||
] }
|
] }
|
||||||
indexmap = { version = "2.10.0", default-features = false }
|
indexmap = { version = "2.10.0", default-features = false }
|
||||||
|
itertools = { version = "0.14.0" }
|
||||||
|
|
||||||
# revive compiler
|
# revive compiler
|
||||||
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
revive-solc-json-interface = { version = "0.5.0" }
|
||||||
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
revive-common = { version = "0.3.0" }
|
||||||
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
revive-differential = { version = "0.3.0" }
|
||||||
|
|
||||||
[workspace.dependencies.alloy]
|
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
|
||||||
version = "1.0"
|
|
||||||
default-features = false
|
|
||||||
features = [
|
|
||||||
"json-abi",
|
|
||||||
"providers",
|
|
||||||
"provider-ipc",
|
|
||||||
"provider-debug-api",
|
|
||||||
"reqwest",
|
|
||||||
"rpc-types",
|
|
||||||
"signer-local",
|
|
||||||
"std",
|
|
||||||
"network",
|
|
||||||
"serde",
|
|
||||||
"rpc-types-eth",
|
|
||||||
]
|
|
||||||
|
|
||||||
[profile.bench]
|
[profile.bench]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
lto = true
|
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
|
lto = true
|
||||||
|
|
||||||
|
[profile.production]
|
||||||
|
inherits = "release"
|
||||||
|
codegen-units = 1
|
||||||
|
lto = true
|
||||||
|
|
||||||
|
[workspace.lints.clippy]
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
.PHONY: format clippy test machete
|
|
||||||
|
|
||||||
format:
|
|
||||||
cargo fmt --all -- --check
|
|
||||||
|
|
||||||
clippy:
|
|
||||||
cargo clippy --all-features --workspace -- --deny warnings
|
|
||||||
|
|
||||||
machete:
|
|
||||||
cargo install cargo-machete
|
|
||||||
cargo machete crates
|
|
||||||
|
|
||||||
test: format clippy machete
|
|
||||||
cargo test --workspace -- --nocapture
|
|
||||||
|
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
[config]
|
||||||
|
default_to_workspace = false
|
||||||
|
|
||||||
|
[tasks.machete]
|
||||||
|
command = "cargo"
|
||||||
|
args = ["machete", "crates"]
|
||||||
|
install_crate = "cargo-machete"
|
||||||
|
|
||||||
|
[tasks.fmt-check]
|
||||||
|
command = "cargo"
|
||||||
|
args = ["fmt", "--all", "--", "--check"]
|
||||||
|
install_crate = "rustfmt"
|
||||||
|
|
||||||
|
[tasks.clippy]
|
||||||
|
command = "cargo"
|
||||||
|
args = ["clippy", "--all-features", "--workspace", "--", "--deny", "warnings"]
|
||||||
|
install_crate = "clippy"
|
||||||
|
|
||||||
|
[tasks.test]
|
||||||
|
command = "cargo"
|
||||||
|
args = ["test", "--workspace", "--", "--nocapture"]
|
||||||
@@ -1,34 +1,120 @@
|
|||||||
# revive-differential-tests
|
<div align="center">
|
||||||
|
<h1><code>Revive Differential Tests</code></h1>
|
||||||
|
|
||||||
The revive differential testing framework allows to define smart contract tests in a declarative manner in order to compile and execute them against different Ethereum-compatible blockchain implmentations. This is useful to:
|
<p>
|
||||||
- Analyze observable differences in contract compilation and execution across different blockchain implementations, including contract storage, account balances, transaction output and emitted events on a per-transaction base.
|
<strong>Differential testing for Ethereum-compatible smart contract stacks</strong>
|
||||||
- Collect and compare benchmark metrics such as code size, gas usage or transaction throughput per seconds (TPS) of different blockchain implementations.
|
</p>
|
||||||
- Ensure reproducible contract builds across multiple compiler implementations or multiple host platforms.
|
</div>
|
||||||
- Implement end-to-end regression tests for Ethereum-compatible smart contract stacks.
|
|
||||||
|
|
||||||
# Declarative test format
|
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
||||||
|
|
||||||
For now, the format used to write tests is the [matter-labs era compiler format](https://github.com/matter-labs/era-compiler-tests?tab=readme-ov-file#matter-labs-simplecomplex-format). This allows us to re-use many tests from their corpora.
|
- Geth (EVM reference implementation)
|
||||||
|
- Revive Dev Node (Substrate-based PolkaVM + `eth-rpc` proxy)
|
||||||
|
|
||||||
# The `retester` utility
|
Use it to:
|
||||||
|
|
||||||
The `retester` helper utilty is used to run the tests. To get an idea of what `retester` can do, please consults its command line help:
|
- Detect observable differences between platforms (execution success, logs, state changes)
|
||||||
|
- Ensure reproducible builds across compilers/hosts
|
||||||
|
- Run end-to-end regression suites
|
||||||
|
|
||||||
```
|
This framework uses the [MatterLabs tests format](https://github.com/matter-labs/era-compiler-tests/tree/main/solidity) for declarative tests which is composed of the following:
|
||||||
cargo run -p revive-dt-core -- --help
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, to run the [complex Solidity tests](https://github.com/matter-labs/era-compiler-tests/tree/main/solidity/complex), define a corpus structure as follows:
|
- Metadata files, this is akin to a module of tests in Rust.
|
||||||
|
- Each metadata file contains multiple cases, a case is akin to a Rust test where a module can contain multiple tests.
|
||||||
|
- Each case contains multiple steps and assertions, this is akin to any Rust test that contains multiple statements.
|
||||||
|
|
||||||
```json
|
Metadata files are JSON files, but Solidity files can also be metadata files if they include inline metadata provided as a comment at the top of the contract.
|
||||||
{
|
|
||||||
"name": "ML Solidity Complex",
|
|
||||||
"path": "/path/to/era-compiler-tests/solidity/complex"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Assuming this to be saved in a `ml-solidity-complex.json` file, the following command will try to compile and execute the tests found inside the corpus:
|
All of the steps contained within each test case are either:
|
||||||
|
|
||||||
|
- Transactions that need to be submitted and assertions to run on the submitted transactions.
|
||||||
|
- Assertions on the state of the chain (e.g., account balances, storage, etc...)
|
||||||
|
|
||||||
|
All of the transactions submitted by the this tool to the test nodes follow a similar logic to what wallets do. We first use alloy to estimate the transaction fees, then we attach that to the transaction and submit it to the node and then await the transaction receipt.
|
||||||
|
|
||||||
|
This repository contains none of the tests and only contains the testing framework or the test runner. The tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository which is a clone of [MatterLab's test suite](https://github.com/matter-labs/era-compiler-tests) with some modifications and adjustments made to suit our use case.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
||||||
|
|
||||||
|
- Stable Rust
|
||||||
|
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
||||||
|
- Revive Dev Node - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
||||||
|
- ETH-RPC - All communication with Revive Dev Node is done through the ETH RPC.
|
||||||
|
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
||||||
|
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
||||||
|
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
||||||
|
|
||||||
|
All of the above need to be installed and available in the path in order for the tool to work.
|
||||||
|
|
||||||
|
## Running The Tool
|
||||||
|
|
||||||
|
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
|
||||||
|
|
||||||
|
The simplest command to run this tool is the following:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
RUST_LOG=debug cargo r --release -p revive-dt-core -- --corpus ml-solidity-complex.json
|
RUST_LOG="info" cargo run --release -- test \
|
||||||
|
--test ./resolc-compiler-tests/fixtures/solidity \
|
||||||
|
--platform geth-evm-solc \
|
||||||
|
--working-directory workdir \
|
||||||
|
> logs.log \
|
||||||
|
2> output.log
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The above command will run the tool executing every one of the tests discovered in the path provided to the tool. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>User Managed Nodes</summary>
|
||||||
|
|
||||||
|
This section describes how the user can make use of nodes that they manage rather than allowing the tool to spawn and manage the nodes on the user's behalf.
|
||||||
|
|
||||||
|
> ⚠️ This is an advanced feature of the tool and could lead test successes or failures to not be reproducible. Please use this feature with caution and only if you understand the implications of running your own node instead of having the framework manage your nodes. ⚠️
|
||||||
|
|
||||||
|
If you're an advanced user and you'd like to manage your own nodes instead of having the tool initialize, spawn, and manage them, then you can choose to run your own nodes and then provide them to the tool to make use of just like the following:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
PLATFORM="revive-dev-node-revm-solc"
|
||||||
|
|
||||||
|
retester export-genesis "$PLATFORM" > chainspec.json
|
||||||
|
|
||||||
|
# Start revive-dev-node in a detached tmux session
|
||||||
|
tmux new-session -d -s revive-dev-node \
|
||||||
|
'RUST_LOG="error,evm=debug,sc_rpc_server=info,runtime::revive=debug" revive-dev-node \
|
||||||
|
--dev \
|
||||||
|
--chain chainspec.json \
|
||||||
|
--force-authoring \
|
||||||
|
--rpc-methods Unsafe \
|
||||||
|
--rpc-cors all \
|
||||||
|
--rpc-max-connections 4294967295 \
|
||||||
|
--pool-limit 4294967295 \
|
||||||
|
--pool-kbytes 4294967295'
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Start eth-rpc in a detached tmux session
|
||||||
|
tmux new-session -d -s eth-rpc \
|
||||||
|
'RUST_LOG="info,eth-rpc=debug" eth-rpc \
|
||||||
|
--dev \
|
||||||
|
--node-rpc-url ws://127.0.0.1:9944 \
|
||||||
|
--rpc-max-connections 4294967295'
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# Run the tests (logs to files as before)
|
||||||
|
RUST_LOG="info" retester test \
|
||||||
|
--platform "$PLATFORM" \
|
||||||
|
--corpus ./revive-differential-tests/fixtures/solidity \
|
||||||
|
--working-directory ./workdir \
|
||||||
|
--concurrency.number-of-nodes 1 \
|
||||||
|
--concurrency.number-of-concurrent-tasks 5 \
|
||||||
|
--revive-dev-node.existing-rpc-url "http://localhost:8545" \
|
||||||
|
> logs.log
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|||||||
@@ -33,9 +33,5 @@
|
|||||||
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"timestamp": "0x00",
|
"timestamp": "0x00",
|
||||||
"alloc": {
|
"alloc": {}
|
||||||
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": {
|
|
||||||
"balance": "1000000000000000000"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Binary file not shown.
@@ -1,13 +1,24 @@
|
|||||||
{
|
{
|
||||||
"modes": [
|
"modes": [
|
||||||
"Y >=0.8.9",
|
"Y >=0.8.9",
|
||||||
"E",
|
"E"
|
||||||
"I"
|
|
||||||
],
|
],
|
||||||
"cases": [
|
"cases": [
|
||||||
{
|
{
|
||||||
"name": "first",
|
"name": "first",
|
||||||
"inputs": [
|
"inputs": [
|
||||||
|
{
|
||||||
|
"address": "0xdeadbeef00000000000000000000000000000042",
|
||||||
|
"expected_balance": "1233"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"address": "0xdeadbeef00000000000000000000000000000042",
|
||||||
|
"is_storage_empty": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"address": "0xdeadbeef00000000000000000000000000000042",
|
||||||
|
"is_storage_empty": false
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"instance": "WBTC_1",
|
"instance": "WBTC_1",
|
||||||
"method": "#deployer",
|
"method": "#deployer",
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
[package]
|
||||||
|
name = "revive-dt-common"
|
||||||
|
description = "A library containing common concepts that other crates in the workspace can rely on"
|
||||||
|
version.workspace = true
|
||||||
|
authors.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
rust-version.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
alloy = { workspace = true }
|
||||||
|
anyhow = { workspace = true }
|
||||||
|
clap = { workspace = true }
|
||||||
|
moka = { workspace = true, features = ["sync"] }
|
||||||
|
once_cell = { workspace = true }
|
||||||
|
regex = { workspace = true }
|
||||||
|
semver = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
schemars = { workspace = true }
|
||||||
|
strum = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
@@ -0,0 +1,49 @@
|
|||||||
|
//! This module implements a cached file system allowing for results to be stored in-memory rather
|
||||||
|
//! rather being queried from the file system again.
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
|
use std::io::{Error, Result};
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use moka::sync::Cache;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
|
pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||||
|
static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000));
|
||||||
|
|
||||||
|
let path = path.as_ref().canonicalize()?;
|
||||||
|
match READ_CACHE.get(path.as_path()) {
|
||||||
|
Some(content) => Ok(content),
|
||||||
|
None => {
|
||||||
|
let content = fs::read(path.as_path())?;
|
||||||
|
READ_CACHE.insert(path, content.clone());
|
||||||
|
Ok(content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
|
||||||
|
let content = read(path)?;
|
||||||
|
String::from_utf8(content).map_err(|_| {
|
||||||
|
Error::new(
|
||||||
|
std::io::ErrorKind::InvalidData,
|
||||||
|
"The contents of the file are not valid UTF8",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result<PathBuf>>>> {
|
||||||
|
static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000));
|
||||||
|
|
||||||
|
let path = path.as_ref().canonicalize()?;
|
||||||
|
match READ_DIR_CACHE.get(path.as_path()) {
|
||||||
|
Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>),
|
||||||
|
None => {
|
||||||
|
let entries = fs::read_dir(path.as_path())?
|
||||||
|
.flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path()))
|
||||||
|
.collect();
|
||||||
|
READ_DIR_CACHE.insert(path.clone(), entries);
|
||||||
|
Ok(read_dir(path).unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
use std::{
|
||||||
|
fs::{read_dir, remove_dir_all, remove_file},
|
||||||
|
path::Path,
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
|
||||||
|
/// This method clears the passed directory of all of the files and directories contained within
|
||||||
|
/// without deleting the directory.
|
||||||
|
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
|
||||||
|
for entry in read_dir(path.as_ref())
|
||||||
|
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
|
||||||
|
{
|
||||||
|
let entry = entry.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to read an entry in directory: {}",
|
||||||
|
path.as_ref().display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let entry_path = entry.path();
|
||||||
|
|
||||||
|
if entry_path.is_file() {
|
||||||
|
remove_file(&entry_path)
|
||||||
|
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
|
||||||
|
} else {
|
||||||
|
remove_dir_all(&entry_path)
|
||||||
|
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
mod clear_dir;
|
||||||
|
|
||||||
|
pub use clear_dir::*;
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
/// An iterator that could be either of two iterators.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum EitherIter<A, B> {
|
||||||
|
A(A),
|
||||||
|
B(B),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<A, B, T> Iterator for EitherIter<A, B>
|
||||||
|
where
|
||||||
|
A: Iterator<Item = T>,
|
||||||
|
B: Iterator<Item = T>,
|
||||||
|
{
|
||||||
|
type Item = T;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
match self {
|
||||||
|
EitherIter::A(iter) => iter.next(),
|
||||||
|
EitherIter::B(iter) => iter.next(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
+27
-9
@@ -1,4 +1,8 @@
|
|||||||
use std::{borrow::Cow, collections::HashSet, path::PathBuf};
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::HashSet,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
};
|
||||||
|
|
||||||
/// An iterator that finds files of a certain extension in the provided directory. You can think of
|
/// An iterator that finds files of a certain extension in the provided directory. You can think of
|
||||||
/// this a glob pattern similar to: `${path}/**/*.md`
|
/// this a glob pattern similar to: `${path}/**/*.md`
|
||||||
@@ -15,14 +19,20 @@ pub struct FilesWithExtensionIterator {
|
|||||||
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
|
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
|
||||||
/// then we visit one of the next directories to visit.
|
/// then we visit one of the next directories to visit.
|
||||||
files_matching_allowed_extensions: Vec<PathBuf>,
|
files_matching_allowed_extensions: Vec<PathBuf>,
|
||||||
|
|
||||||
|
/// This option controls if the the cached file system should be used or not. This could be
|
||||||
|
/// better for certain cases where the entries in the directories do not change and therefore
|
||||||
|
/// caching can be used.
|
||||||
|
use_cached_fs: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FilesWithExtensionIterator {
|
impl FilesWithExtensionIterator {
|
||||||
pub fn new(root_directory: PathBuf) -> Self {
|
pub fn new(root_directory: impl AsRef<Path>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
allowed_extensions: Default::default(),
|
allowed_extensions: Default::default(),
|
||||||
directories_to_search: vec![root_directory],
|
directories_to_search: vec![root_directory.as_ref().to_path_buf()],
|
||||||
files_matching_allowed_extensions: Default::default(),
|
files_matching_allowed_extensions: Default::default(),
|
||||||
|
use_cached_fs: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -33,6 +43,11 @@ impl FilesWithExtensionIterator {
|
|||||||
self.allowed_extensions.insert(allowed_extension.into());
|
self.allowed_extensions.insert(allowed_extension.into());
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self {
|
||||||
|
self.use_cached_fs = use_cached_fs;
|
||||||
|
self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Iterator for FilesWithExtensionIterator {
|
impl Iterator for FilesWithExtensionIterator {
|
||||||
@@ -45,16 +60,19 @@ impl Iterator for FilesWithExtensionIterator {
|
|||||||
|
|
||||||
let directory_to_search = self.directories_to_search.pop()?;
|
let directory_to_search = self.directories_to_search.pop()?;
|
||||||
|
|
||||||
// Read all of the entries in the directory. If we failed to read this dir's entires then we
|
let iterator = if self.use_cached_fs {
|
||||||
// elect to just ignore it and look in the next directory, we do that by calling the next
|
let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else {
|
||||||
// method again on the iterator, which is an intentional decision that we made here instead
|
return self.next();
|
||||||
// of panicking.
|
};
|
||||||
|
Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>>
|
||||||
|
} else {
|
||||||
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
|
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
|
||||||
return self.next();
|
return self.next();
|
||||||
};
|
};
|
||||||
|
Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_>
|
||||||
|
};
|
||||||
|
|
||||||
for entry in dir_entries.flatten() {
|
for entry_path in iterator.flatten() {
|
||||||
let entry_path = entry.path();
|
|
||||||
if entry_path.is_dir() {
|
if entry_path.is_dir() {
|
||||||
self.directories_to_search.push(entry_path)
|
self.directories_to_search.push(entry_path)
|
||||||
} else if entry_path.is_file()
|
} else if entry_path.is_file()
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
mod either_iter;
|
||||||
|
mod files_with_extension_iterator;
|
||||||
|
|
||||||
|
pub use either_iter::*;
|
||||||
|
pub use files_with_extension_iterator::*;
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
//! This crate provides common concepts, functionality, types, macros, and more that other crates in
|
||||||
|
//! the workspace can benefit from.
|
||||||
|
|
||||||
|
pub mod cached_fs;
|
||||||
|
pub mod fs;
|
||||||
|
pub mod iterators;
|
||||||
|
pub mod macros;
|
||||||
|
pub mod types;
|
||||||
@@ -1,3 +1,25 @@
|
|||||||
|
#[macro_export]
|
||||||
|
macro_rules! impl_for_wrapper {
|
||||||
|
(Display, $ident: ident) => {
|
||||||
|
#[automatically_derived]
|
||||||
|
impl std::fmt::Display for $ident {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
std::fmt::Display::fmt(&self.0, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
(FromStr, $ident: ident) => {
|
||||||
|
#[automatically_derived]
|
||||||
|
impl std::str::FromStr for $ident {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||||
|
s.parse().map(Self).map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/// Defines wrappers around types.
|
/// Defines wrappers around types.
|
||||||
///
|
///
|
||||||
/// For example, the macro invocation seen below:
|
/// For example, the macro invocation seen below:
|
||||||
@@ -12,11 +34,9 @@
|
|||||||
/// pub struct CaseId(usize);
|
/// pub struct CaseId(usize);
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// And would also implement a number of methods on this type making it easier
|
/// And would also implement a number of methods on this type making it easier to use.
|
||||||
/// to use.
|
|
||||||
///
|
///
|
||||||
/// These wrapper types become very useful as they make the code a lot easier
|
/// These wrapper types become very useful as they make the code a lot easier to read.
|
||||||
/// to read.
|
|
||||||
///
|
///
|
||||||
/// Take the following as an example:
|
/// Take the following as an example:
|
||||||
///
|
///
|
||||||
@@ -26,33 +46,37 @@
|
|||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// In the above code it's hard to understand what the various types refer to or
|
/// In the above code it's hard to understand what the various types refer to or what to expect them
|
||||||
/// what to expect them to contain.
|
/// to contain.
|
||||||
///
|
///
|
||||||
/// With these wrapper types we're able to create code that's self-documenting
|
/// With these wrapper types we're able to create code that's self-documenting in that the types
|
||||||
/// in that the types tell us what the code is referring to. The above code is
|
/// tell us what the code is referring to. The above code is transformed into
|
||||||
/// transformed into
|
|
||||||
///
|
///
|
||||||
/// ```rust,ignore
|
/// ```rust,ignore
|
||||||
/// struct State {
|
/// struct State {
|
||||||
/// contracts: HashMap<CaseId, HashMap<ContractName, ContractByteCode>>
|
/// contracts: HashMap<CaseId, HashMap<ContractName, ContractByteCode>>
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
|
///
|
||||||
|
/// Note that we follow the same syntax for defining wrapper structs but we do not permit the use of
|
||||||
|
/// generics.
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! define_wrapper_type {
|
macro_rules! define_wrapper_type {
|
||||||
(
|
(
|
||||||
$(#[$meta: meta])*
|
$(#[$meta: meta])*
|
||||||
$ident: ident($ty: ty) $(;)?
|
$vis:vis struct $ident: ident($ty: ty)
|
||||||
|
|
||||||
|
$(
|
||||||
|
impl $($trait_ident: ident),*
|
||||||
|
)?
|
||||||
|
|
||||||
|
;
|
||||||
) => {
|
) => {
|
||||||
$(#[$meta])*
|
$(#[$meta])*
|
||||||
pub struct $ident($ty);
|
$vis struct $ident($ty);
|
||||||
|
|
||||||
impl $ident {
|
impl $ident {
|
||||||
pub fn new(value: $ty) -> Self {
|
pub fn new(value: impl Into<$ty>) -> Self {
|
||||||
Self(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_from<T: Into<$ty>>(value: T) -> Self {
|
|
||||||
Self(value.into())
|
Self(value.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,5 +126,15 @@ macro_rules! define_wrapper_type {
|
|||||||
value.0
|
value.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
$(
|
||||||
|
$crate::macros::impl_for_wrapper!($trait_ident, $ident);
|
||||||
|
)*
|
||||||
|
)?
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Technically not needed but this allows for the macro to be found in the `macros` module of the
|
||||||
|
/// crate in addition to being found in the root of the crate.
|
||||||
|
pub use {define_wrapper_type, impl_for_wrapper};
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
mod define_wrapper_type;
|
||||||
|
|
||||||
|
pub use define_wrapper_type::*;
|
||||||
@@ -0,0 +1,136 @@
|
|||||||
|
use clap::ValueEnum;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||||
|
|
||||||
|
/// An enum of the platform identifiers of all of the platforms supported by this framework. This
|
||||||
|
/// could be thought of like the target triple from Rust and LLVM where it specifies the platform
|
||||||
|
/// completely starting with the node, the vm, and finally the compiler used for this combination.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
ValueEnum,
|
||||||
|
EnumString,
|
||||||
|
Display,
|
||||||
|
AsRefStr,
|
||||||
|
IntoStaticStr,
|
||||||
|
JsonSchema,
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
#[strum(serialize_all = "kebab-case")]
|
||||||
|
pub enum PlatformIdentifier {
|
||||||
|
/// The Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||||
|
GethEvmSolc,
|
||||||
|
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||||
|
LighthouseGethEvmSolc,
|
||||||
|
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
||||||
|
ReviveDevNodePolkavmResolc,
|
||||||
|
/// The revive dev node with the REVM backend with the solc compiler.
|
||||||
|
ReviveDevNodeRevmSolc,
|
||||||
|
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
|
||||||
|
ZombienetPolkavmResolc,
|
||||||
|
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
|
||||||
|
ZombienetRevmSolc,
|
||||||
|
/// A polkadot-omni-chain based node with a custom runtime with the PolkaVM backend and the
|
||||||
|
/// resolc compiler.
|
||||||
|
PolkadotOmniNodePolkavmResolc,
|
||||||
|
/// A polkadot-omni-chain based node with a custom runtime with the REVM backend and the solc
|
||||||
|
/// compiler.
|
||||||
|
PolkadotOmniNodeRevmSolc,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
ValueEnum,
|
||||||
|
EnumString,
|
||||||
|
Display,
|
||||||
|
AsRefStr,
|
||||||
|
IntoStaticStr,
|
||||||
|
JsonSchema,
|
||||||
|
)]
|
||||||
|
pub enum CompilerIdentifier {
|
||||||
|
/// The solc compiler.
|
||||||
|
Solc,
|
||||||
|
/// The resolc compiler.
|
||||||
|
Resolc,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An enum representing the identifiers of the supported nodes.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
ValueEnum,
|
||||||
|
EnumString,
|
||||||
|
Display,
|
||||||
|
AsRefStr,
|
||||||
|
IntoStaticStr,
|
||||||
|
JsonSchema,
|
||||||
|
)]
|
||||||
|
pub enum NodeIdentifier {
|
||||||
|
/// The go-ethereum node implementation.
|
||||||
|
Geth,
|
||||||
|
/// The go-ethereum node implementation.
|
||||||
|
LighthouseGeth,
|
||||||
|
/// The revive dev node implementation.
|
||||||
|
ReviveDevNode,
|
||||||
|
/// A zombienet spawned nodes
|
||||||
|
Zombienet,
|
||||||
|
/// The polkadot-omni-node.
|
||||||
|
PolkadotOmniNode,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An enum representing the identifiers of the supported VMs.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
ValueEnum,
|
||||||
|
EnumString,
|
||||||
|
Display,
|
||||||
|
AsRefStr,
|
||||||
|
IntoStaticStr,
|
||||||
|
JsonSchema,
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
#[strum(serialize_all = "lowercase")]
|
||||||
|
pub enum VmIdentifier {
|
||||||
|
/// The ethereum virtual machine.
|
||||||
|
Evm,
|
||||||
|
/// The EraVM virtual machine.
|
||||||
|
EraVM,
|
||||||
|
/// Polkadot's PolaVM Risc-v based virtual machine.
|
||||||
|
PolkaVM,
|
||||||
|
}
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
mod identifiers;
|
||||||
|
mod mode;
|
||||||
|
mod parsed_test_specifier;
|
||||||
|
mod private_key_allocator;
|
||||||
|
mod round_robin_pool;
|
||||||
|
mod version_or_requirement;
|
||||||
|
|
||||||
|
pub use identifiers::*;
|
||||||
|
pub use mode::*;
|
||||||
|
pub use parsed_test_specifier::*;
|
||||||
|
pub use private_key_allocator::*;
|
||||||
|
pub use round_robin_pool::*;
|
||||||
|
pub use version_or_requirement::*;
|
||||||
@@ -0,0 +1,450 @@
|
|||||||
|
use crate::iterators::EitherIter;
|
||||||
|
use crate::types::VersionOrRequirement;
|
||||||
|
use anyhow::{Context as _, bail};
|
||||||
|
use regex::Regex;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use semver::Version;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::fmt::Display;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
|
/// This represents a mode that a given test should be run with, if possible.
|
||||||
|
///
|
||||||
|
/// We obtain this by taking a [`ParsedMode`], which may be looser or more strict
|
||||||
|
/// in its requirements, and then expanding it out into a list of [`Mode`]s.
|
||||||
|
///
|
||||||
|
/// Use [`ParsedMode::to_test_modes()`] to do this.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct Mode {
|
||||||
|
pub pipeline: ModePipeline,
|
||||||
|
pub optimize_setting: ModeOptimizerSetting,
|
||||||
|
pub version: Option<semver::VersionReq>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for Mode {
|
||||||
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
|
self.to_string().cmp(&other.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for Mode {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Mode {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
self.pipeline.fmt(f)?;
|
||||||
|
f.write_str(" ")?;
|
||||||
|
self.optimize_setting.fmt(f)?;
|
||||||
|
|
||||||
|
if let Some(version) = &self.version {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
version.fmt(f)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for Mode {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let parsed_mode = ParsedMode::from_str(s)?;
|
||||||
|
let mut iter = parsed_mode.to_modes();
|
||||||
|
let (Some(mode), None) = (iter.next(), iter.next()) else {
|
||||||
|
bail!("Failed to parse the mode")
|
||||||
|
};
|
||||||
|
Ok(mode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Mode {
|
||||||
|
/// Return all of the available mode combinations.
|
||||||
|
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
||||||
|
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| {
|
||||||
|
ModePipeline::test_cases()
|
||||||
|
.flat_map(|pipeline| {
|
||||||
|
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
|
||||||
|
pipeline,
|
||||||
|
optimize_setting,
|
||||||
|
version: None,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
});
|
||||||
|
ALL_MODES.iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
|
||||||
|
/// the requirement is present on the object. Otherwise, the passed default version is used.
|
||||||
|
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
|
||||||
|
match self.version {
|
||||||
|
Some(ref requirement) => requirement.clone().into(),
|
||||||
|
None => default.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// What do we want the compiler to do?
|
||||||
|
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||||
|
pub enum ModePipeline {
|
||||||
|
/// Compile Solidity code via Yul IR
|
||||||
|
ViaYulIR,
|
||||||
|
/// Compile Solidity direct to assembly
|
||||||
|
ViaEVMAssembly,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ModePipeline {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
// via Yul IR
|
||||||
|
"Y" => Ok(ModePipeline::ViaYulIR),
|
||||||
|
// Don't go via Yul IR
|
||||||
|
"E" => Ok(ModePipeline::ViaEVMAssembly),
|
||||||
|
// Anything else that we see isn't a mode at all
|
||||||
|
_ => Err(anyhow::anyhow!(
|
||||||
|
"Unsupported pipeline '{s}': expected 'Y' or 'E'"
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ModePipeline {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
ModePipeline::ViaYulIR => f.write_str("Y"),
|
||||||
|
ModePipeline::ViaEVMAssembly => f.write_str("E"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ModePipeline {
|
||||||
|
/// Should we go via Yul IR?
|
||||||
|
pub fn via_yul_ir(&self) -> bool {
|
||||||
|
matches!(self, ModePipeline::ViaYulIR)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An iterator over the available pipelines that we'd like to test,
|
||||||
|
/// when an explicit pipeline was not specified.
|
||||||
|
pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone {
|
||||||
|
[ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||||
|
pub enum ModeOptimizerSetting {
|
||||||
|
/// 0 / -: Don't apply any optimizations
|
||||||
|
M0,
|
||||||
|
/// 1: Apply less than default optimizations
|
||||||
|
M1,
|
||||||
|
/// 2: Apply the default optimizations
|
||||||
|
M2,
|
||||||
|
/// 3 / +: Apply aggressive optimizations
|
||||||
|
M3,
|
||||||
|
/// s: Optimize for size
|
||||||
|
Ms,
|
||||||
|
/// z: Aggressively optimize for size
|
||||||
|
Mz,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ModeOptimizerSetting {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s {
|
||||||
|
"M0" => Ok(ModeOptimizerSetting::M0),
|
||||||
|
"M1" => Ok(ModeOptimizerSetting::M1),
|
||||||
|
"M2" => Ok(ModeOptimizerSetting::M2),
|
||||||
|
"M3" => Ok(ModeOptimizerSetting::M3),
|
||||||
|
"Ms" => Ok(ModeOptimizerSetting::Ms),
|
||||||
|
"Mz" => Ok(ModeOptimizerSetting::Mz),
|
||||||
|
_ => Err(anyhow::anyhow!(
|
||||||
|
"Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'"
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ModeOptimizerSetting {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
ModeOptimizerSetting::M0 => f.write_str("M0"),
|
||||||
|
ModeOptimizerSetting::M1 => f.write_str("M1"),
|
||||||
|
ModeOptimizerSetting::M2 => f.write_str("M2"),
|
||||||
|
ModeOptimizerSetting::M3 => f.write_str("M3"),
|
||||||
|
ModeOptimizerSetting::Ms => f.write_str("Ms"),
|
||||||
|
ModeOptimizerSetting::Mz => f.write_str("Mz"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ModeOptimizerSetting {
|
||||||
|
/// An iterator over the available optimizer settings that we'd like to test,
|
||||||
|
/// when an explicit optimizer setting was not specified.
|
||||||
|
pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone {
|
||||||
|
[
|
||||||
|
// No optimizations:
|
||||||
|
ModeOptimizerSetting::M0,
|
||||||
|
// Aggressive optimizations:
|
||||||
|
ModeOptimizerSetting::M3,
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Are any optimizations enabled?
|
||||||
|
pub fn optimizations_enabled(&self) -> bool {
|
||||||
|
!matches!(self, ModeOptimizerSetting::M0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
|
///
|
||||||
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
||||||
|
#[serde(try_from = "String", into = "String")]
|
||||||
|
pub struct ParsedMode {
|
||||||
|
pub pipeline: Option<ModePipeline>,
|
||||||
|
pub optimize_flag: Option<bool>,
|
||||||
|
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||||
|
pub version: Option<semver::VersionReq>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ParsedMode {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||||
|
Regex::new(r"(?x)
|
||||||
|
^
|
||||||
|
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
||||||
|
\s*
|
||||||
|
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
||||||
|
\s*
|
||||||
|
(?P<version>[>=<^]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
||||||
|
$
|
||||||
|
").unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
|
let Some(caps) = REGEX.captures(s) else {
|
||||||
|
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||||
|
};
|
||||||
|
|
||||||
|
let pipeline = match caps.name("pipeline") {
|
||||||
|
Some(m) => Some(
|
||||||
|
ModePipeline::from_str(m.as_str())
|
||||||
|
.context("Failed to parse mode pipeline from string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||||
|
|
||||||
|
let optimize_setting = match caps.name("optimize_setting") {
|
||||||
|
Some(m) => Some(
|
||||||
|
ModeOptimizerSetting::from_str(m.as_str())
|
||||||
|
.context("Failed to parse optimizer setting from string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let version = match caps.name("version") {
|
||||||
|
Some(m) => Some(
|
||||||
|
semver::VersionReq::parse(m.as_str())
|
||||||
|
.map_err(|e| {
|
||||||
|
anyhow::anyhow!(
|
||||||
|
"Cannot parse the version requirement '{}': {e}",
|
||||||
|
m.as_str()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.context("Failed to parse semver requirement from mode string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ParsedMode {
|
||||||
|
pipeline,
|
||||||
|
optimize_flag,
|
||||||
|
optimize_setting,
|
||||||
|
version,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ParsedMode {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let mut has_written = false;
|
||||||
|
|
||||||
|
if let Some(pipeline) = self.pipeline {
|
||||||
|
pipeline.fmt(f)?;
|
||||||
|
if let Some(optimize_flag) = self.optimize_flag {
|
||||||
|
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||||
|
}
|
||||||
|
has_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(optimize_setting) = self.optimize_setting {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
optimize_setting.fmt(f)?;
|
||||||
|
has_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(version) = &self.version {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
version.fmt(f)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ParsedMode> for String {
|
||||||
|
fn from(parsed_mode: ParsedMode) -> Self {
|
||||||
|
parsed_mode.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for ParsedMode {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
|
ParsedMode::from_str(&value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ParsedMode {
|
||||||
|
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||||
|
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||||
|
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(ModePipeline::test_cases()),
|
||||||
|
|p| EitherIter::B(std::iter::once(*p)),
|
||||||
|
);
|
||||||
|
|
||||||
|
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
||||||
|
if flag {
|
||||||
|
ModeOptimizerSetting::M3
|
||||||
|
} else {
|
||||||
|
ModeOptimizerSetting::M0
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let optimize_flag_iter = match optimize_flag_setting {
|
||||||
|
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||||
|
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(optimize_flag_iter),
|
||||||
|
|s| EitherIter::B(std::iter::once(*s)),
|
||||||
|
);
|
||||||
|
|
||||||
|
pipeline_iter.flat_map(move |pipeline| {
|
||||||
|
optimize_settings_iter
|
||||||
|
.clone()
|
||||||
|
.map(move |optimize_setting| Mode {
|
||||||
|
pipeline,
|
||||||
|
optimize_setting,
|
||||||
|
version: self.version.clone(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||||
|
/// This avoids any duplicate entries.
|
||||||
|
pub fn many_to_modes<'a>(
|
||||||
|
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||||
|
) -> impl Iterator<Item = Mode> {
|
||||||
|
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||||
|
modes.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_from_str() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", "Mz"),
|
||||||
|
("Y", "Y"),
|
||||||
|
("Y+", "Y+"),
|
||||||
|
("Y-", "Y-"),
|
||||||
|
("E", "E"),
|
||||||
|
("E+", "E+"),
|
||||||
|
("E-", "E-"),
|
||||||
|
("Y M0", "Y M0"),
|
||||||
|
("Y M1", "Y M1"),
|
||||||
|
("Y M2", "Y M2"),
|
||||||
|
("Y M3", "Y M3"),
|
||||||
|
("Y Ms", "Y Ms"),
|
||||||
|
("Y Mz", "Y Mz"),
|
||||||
|
("E M0", "E M0"),
|
||||||
|
("E M1", "E M1"),
|
||||||
|
("E M2", "E M2"),
|
||||||
|
("E M3", "E M3"),
|
||||||
|
("E Ms", "E Ms"),
|
||||||
|
("E Mz", "E Mz"),
|
||||||
|
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||||
|
("Y 0.8.0", "Y ^0.8.0"),
|
||||||
|
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||||
|
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||||
|
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||||
|
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||||
|
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||||
|
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||||
|
// We don't see this in the wild but it is parsed.
|
||||||
|
("<=0.8", "<=0.8"),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
assert_eq!(
|
||||||
|
expected,
|
||||||
|
parsed.to_string(),
|
||||||
|
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_to_test_modes() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", vec!["Y Mz", "E Mz"]),
|
||||||
|
("Y", vec!["Y M0", "Y M3"]),
|
||||||
|
("E", vec!["E M0", "E M3"]),
|
||||||
|
("Y+", vec!["Y M3"]),
|
||||||
|
("Y-", vec!["Y M0"]),
|
||||||
|
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||||
|
(
|
||||||
|
"<=0.8",
|
||||||
|
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||||
|
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
expected_set, actual_set,
|
||||||
|
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,173 @@
|
|||||||
|
use std::{
|
||||||
|
fmt::Display,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
str::FromStr,
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::{Context as _, bail};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::types::Mode;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub enum ParsedTestSpecifier {
|
||||||
|
/// All of the test cases in the file should be ran across all of the specified modes
|
||||||
|
FileOrDirectory {
|
||||||
|
/// The path of the metadata file containing the test cases.
|
||||||
|
metadata_or_directory_file_path: PathBuf,
|
||||||
|
},
|
||||||
|
/// Only a specific case within the metadata file should be ran across all of the modes in the
|
||||||
|
/// file.
|
||||||
|
Case {
|
||||||
|
/// The path of the metadata file containing the test cases.
|
||||||
|
metadata_file_path: PathBuf,
|
||||||
|
|
||||||
|
/// The index of the specific case to run.
|
||||||
|
case_idx: usize,
|
||||||
|
},
|
||||||
|
/// A specific case and a specific mode should be ran. This is the most specific out of all of
|
||||||
|
/// the specifier types.
|
||||||
|
CaseWithMode {
|
||||||
|
/// The path of the metadata file containing the test cases.
|
||||||
|
metadata_file_path: PathBuf,
|
||||||
|
|
||||||
|
/// The index of the specific case to run.
|
||||||
|
case_idx: usize,
|
||||||
|
|
||||||
|
/// The parsed mode that the test should be run in.
|
||||||
|
mode: Mode,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ParsedTestSpecifier {
|
||||||
|
pub fn metadata_path(&self) -> &Path {
|
||||||
|
match self {
|
||||||
|
ParsedTestSpecifier::FileOrDirectory {
|
||||||
|
metadata_or_directory_file_path: metadata_file_path,
|
||||||
|
}
|
||||||
|
| ParsedTestSpecifier::Case {
|
||||||
|
metadata_file_path, ..
|
||||||
|
}
|
||||||
|
| ParsedTestSpecifier::CaseWithMode {
|
||||||
|
metadata_file_path, ..
|
||||||
|
} => metadata_file_path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ParsedTestSpecifier {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
ParsedTestSpecifier::FileOrDirectory {
|
||||||
|
metadata_or_directory_file_path,
|
||||||
|
} => {
|
||||||
|
write!(f, "{}", metadata_or_directory_file_path.display())
|
||||||
|
}
|
||||||
|
ParsedTestSpecifier::Case {
|
||||||
|
metadata_file_path,
|
||||||
|
case_idx,
|
||||||
|
} => {
|
||||||
|
write!(f, "{}::{}", metadata_file_path.display(), case_idx)
|
||||||
|
}
|
||||||
|
ParsedTestSpecifier::CaseWithMode {
|
||||||
|
metadata_file_path,
|
||||||
|
case_idx,
|
||||||
|
mode,
|
||||||
|
} => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}::{}::{}",
|
||||||
|
metadata_file_path.display(),
|
||||||
|
case_idx,
|
||||||
|
mode
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ParsedTestSpecifier {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let mut split_iter = s.split("::");
|
||||||
|
|
||||||
|
let Some(path_string) = split_iter.next() else {
|
||||||
|
bail!("Could not find the path in the test specifier")
|
||||||
|
};
|
||||||
|
let path = PathBuf::from(path_string)
|
||||||
|
.canonicalize()
|
||||||
|
.context("Failed to canonicalize the path of the test")?;
|
||||||
|
|
||||||
|
let Some(case_idx_string) = split_iter.next() else {
|
||||||
|
return Ok(Self::FileOrDirectory {
|
||||||
|
metadata_or_directory_file_path: path,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
let case_idx = usize::from_str(case_idx_string)
|
||||||
|
.context("Failed to parse the case idx of the test specifier from string")?;
|
||||||
|
|
||||||
|
// At this point the provided path must be a file.
|
||||||
|
if !path.is_file() {
|
||||||
|
bail!(
|
||||||
|
"Test specifier with a path and case idx must point to a file and not a directory"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(mode_string) = split_iter.next() else {
|
||||||
|
return Ok(Self::Case {
|
||||||
|
metadata_file_path: path,
|
||||||
|
case_idx,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
let mode = Mode::from_str(mode_string)
|
||||||
|
.context("Failed to parse the mode string in the parsed test specifier")?;
|
||||||
|
|
||||||
|
Ok(Self::CaseWithMode {
|
||||||
|
metadata_file_path: path,
|
||||||
|
case_idx,
|
||||||
|
mode,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ParsedTestSpecifier> for String {
|
||||||
|
fn from(value: ParsedTestSpecifier) -> Self {
|
||||||
|
value.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for ParsedTestSpecifier {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
|
value.parse()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<&str> for ParsedTestSpecifier {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||||
|
value.parse()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for ParsedTestSpecifier {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
self.to_string().serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for ParsedTestSpecifier {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let string = String::deserialize(deserializer)?;
|
||||||
|
string.parse().map_err(serde::de::Error::custom)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
use alloy::primitives::U256;
|
||||||
|
use alloy::signers::local::PrivateKeySigner;
|
||||||
|
use anyhow::{Context, Result, bail};
|
||||||
|
|
||||||
|
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
|
||||||
|
/// sequentially and in order until the maximum private key specified is reached.
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub struct PrivateKeyAllocator {
|
||||||
|
/// The next private key to be returned by the allocator when requested.
|
||||||
|
next_private_key: U256,
|
||||||
|
|
||||||
|
/// The highest private key (exclusive) that can be returned by this allocator.
|
||||||
|
highest_private_key_inclusive: U256,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PrivateKeyAllocator {
|
||||||
|
/// Creates a new instance of the private key allocator.
|
||||||
|
pub fn new(highest_private_key_inclusive: U256) -> Self {
|
||||||
|
Self {
|
||||||
|
next_private_key: U256::ONE,
|
||||||
|
highest_private_key_inclusive,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocates a new private key and errors out if the maximum private key has been reached.
|
||||||
|
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
|
||||||
|
if self.next_private_key > self.highest_private_key_inclusive {
|
||||||
|
bail!("Attempted to allocate a private key but failed since all have been allocated");
|
||||||
|
};
|
||||||
|
let private_key =
|
||||||
|
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())
|
||||||
|
.context("Failed to convert the private key digits into a private key")?;
|
||||||
|
self.next_private_key += U256::ONE;
|
||||||
|
Ok(private_key)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
|
pub struct RoundRobinPool<T> {
|
||||||
|
next_index: AtomicUsize,
|
||||||
|
items: Vec<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> RoundRobinPool<T> {
|
||||||
|
pub fn new(items: Vec<T>) -> Self {
|
||||||
|
Self {
|
||||||
|
next_index: Default::default(),
|
||||||
|
items,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn round_robin(&self) -> &T {
|
||||||
|
let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len();
|
||||||
|
self.items.get(current).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn iter(&self) -> impl Iterator<Item = &T> {
|
||||||
|
self.items.iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,41 @@
|
|||||||
|
use semver::{Version, VersionReq};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum VersionOrRequirement {
|
||||||
|
Version(Version),
|
||||||
|
Requirement(VersionReq),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Version> for VersionOrRequirement {
|
||||||
|
fn from(value: Version) -> Self {
|
||||||
|
Self::Version(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<VersionReq> for VersionOrRequirement {
|
||||||
|
fn from(value: VersionReq) -> Self {
|
||||||
|
Self::Requirement(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<VersionOrRequirement> for Version {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||||
|
let VersionOrRequirement::Version(version) = value else {
|
||||||
|
anyhow::bail!("Version or requirement was not a version");
|
||||||
|
};
|
||||||
|
Ok(version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<VersionOrRequirement> for VersionReq {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||||
|
let VersionOrRequirement::Requirement(requirement) = value else {
|
||||||
|
anyhow::bail!("Version or requirement was not a requirement");
|
||||||
|
};
|
||||||
|
Ok(requirement)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -9,11 +9,21 @@ repository.workspace = true
|
|||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { workspace = true }
|
|
||||||
revive-solc-json-interface = { workspace = true }
|
revive-solc-json-interface = { workspace = true }
|
||||||
|
revive-dt-common = { workspace = true }
|
||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
revive-dt-solc-binaries = { workspace = true }
|
revive-dt-solc-binaries = { workspace = true }
|
||||||
revive-common = { workspace = true }
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
|
alloy = { workspace = true }
|
||||||
|
anyhow = { workspace = true }
|
||||||
|
dashmap = { workspace = true }
|
||||||
|
foundry-compilers-artifacts = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
+114
-112
@@ -4,20 +4,23 @@
|
|||||||
//! - Polkadot revive Wasm compiler
|
//! - Polkadot revive Wasm compiler
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
fs::read_to_string,
|
collections::HashMap,
|
||||||
hash::Hash,
|
hash::Hash,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
|
pin::Pin,
|
||||||
};
|
};
|
||||||
|
|
||||||
use revive_dt_config::Arguments;
|
use alloy::json_abi::JsonAbi;
|
||||||
|
use alloy::primitives::Address;
|
||||||
|
use anyhow::{Context as _, Result};
|
||||||
|
use semver::Version;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use revive_solc_json_interface::{
|
use revive_dt_common::cached_fs::read_to_string;
|
||||||
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
|
||||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
// Re-export this as it's a part of the compiler interface.
|
||||||
SolcStandardJsonOutput,
|
pub use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
|
||||||
};
|
|
||||||
use semver::Version;
|
|
||||||
|
|
||||||
pub mod revive_js;
|
pub mod revive_js;
|
||||||
pub mod revive_resolc;
|
pub mod revive_resolc;
|
||||||
@@ -25,150 +28,149 @@ pub mod solc;
|
|||||||
|
|
||||||
/// A common interface for all supported Solidity compilers.
|
/// A common interface for all supported Solidity compilers.
|
||||||
pub trait SolidityCompiler {
|
pub trait SolidityCompiler {
|
||||||
/// Extra options specific to the compiler.
|
/// Returns the version of the compiler.
|
||||||
type Options: Default + PartialEq + Eq + Hash;
|
fn version(&self) -> &Version;
|
||||||
|
|
||||||
|
/// Returns the path of the compiler executable.
|
||||||
|
fn path(&self) -> &Path;
|
||||||
|
|
||||||
/// The low-level compiler interface.
|
/// The low-level compiler interface.
|
||||||
fn build(
|
fn build(
|
||||||
&self,
|
&self,
|
||||||
input: CompilerInput<Self::Options>,
|
input: CompilerInput,
|
||||||
) -> anyhow::Result<CompilerOutput<Self::Options>>;
|
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>>;
|
||||||
|
|
||||||
fn new(solc_executable: PathBuf) -> Self;
|
/// Does the compiler support the provided mode and version settings.
|
||||||
|
fn supports_mode(
|
||||||
fn get_compiler_executable(config: &Arguments, version: Version) -> anyhow::Result<PathBuf>;
|
&self,
|
||||||
|
optimizer_setting: ModeOptimizerSetting,
|
||||||
|
pipeline: ModePipeline,
|
||||||
|
) -> bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The generic compilation input configuration.
|
/// The generic compilation input configuration.
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
pub struct CompilerInput<T: PartialEq + Eq + Hash> {
|
pub struct CompilerInput {
|
||||||
pub extra_options: T,
|
pub pipeline: Option<ModePipeline>,
|
||||||
pub input: SolcStandardJsonInput,
|
pub optimization: Option<ModeOptimizerSetting>,
|
||||||
|
pub evm_version: Option<EVMVersion>,
|
||||||
pub allow_paths: Vec<PathBuf>,
|
pub allow_paths: Vec<PathBuf>,
|
||||||
pub base_path: Option<PathBuf>,
|
pub base_path: Option<PathBuf>,
|
||||||
|
pub sources: HashMap<PathBuf, String>,
|
||||||
|
pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
|
||||||
|
pub revert_string_handling: Option<RevertString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The generic compilation output configuration.
|
/// The generic compilation output configuration.
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
pub struct CompilerOutput<T: PartialEq + Eq + Hash> {
|
pub struct CompilerOutput {
|
||||||
/// The solc standard JSON input.
|
/// The compiled contracts. The bytecode of the contract is kept as a string in case linking is
|
||||||
pub input: CompilerInput<T>,
|
/// required and the compiled source has placeholders.
|
||||||
/// The produced solc standard JSON output.
|
pub contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
pub output: SolcStandardJsonOutput,
|
|
||||||
/// The error message in case the compiler returns abnormally.
|
|
||||||
pub error: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> PartialEq for CompilerInput<T>
|
/// A generic builder style interface for configuring the supported compiler options.
|
||||||
where
|
#[derive(Default)]
|
||||||
T: PartialEq + Eq + Hash,
|
pub struct Compiler {
|
||||||
{
|
input: CompilerInput,
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
let self_input = serde_json::to_vec(&self.input).unwrap_or_default();
|
|
||||||
let other_input = serde_json::to_vec(&self.input).unwrap_or_default();
|
|
||||||
self.extra_options.eq(&other.extra_options) && self_input == other_input
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Eq for CompilerInput<T> where T: PartialEq + Eq + Hash {}
|
impl Compiler {
|
||||||
|
|
||||||
impl<T> Hash for CompilerInput<T>
|
|
||||||
where
|
|
||||||
T: PartialEq + Eq + Hash,
|
|
||||||
{
|
|
||||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
|
||||||
self.extra_options.hash(state);
|
|
||||||
state.write(&serde_json::to_vec(&self.input).unwrap_or_default());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A generic builder style interface for configuring all compiler options.
|
|
||||||
pub struct Compiler<T: SolidityCompiler> {
|
|
||||||
input: SolcStandardJsonInput,
|
|
||||||
extra_options: T::Options,
|
|
||||||
allow_paths: Vec<PathBuf>,
|
|
||||||
base_path: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Compiler<solc::Solc> {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Compiler<T>
|
|
||||||
where
|
|
||||||
T: SolidityCompiler,
|
|
||||||
{
|
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
input: SolcStandardJsonInput {
|
input: CompilerInput {
|
||||||
language: SolcStandardJsonInputLanguage::Solidity,
|
pipeline: Default::default(),
|
||||||
sources: Default::default(),
|
optimization: Default::default(),
|
||||||
settings: SolcStandardJsonInputSettings::new(
|
evm_version: Default::default(),
|
||||||
None,
|
|
||||||
Default::default(),
|
|
||||||
None,
|
|
||||||
SolcStandardJsonInputSettingsSelection::new_required(),
|
|
||||||
SolcStandardJsonInputSettingsOptimizer::new(
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
&Version::new(0, 0, 0),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
extra_options: Default::default(),
|
|
||||||
allow_paths: Default::default(),
|
allow_paths: Default::default(),
|
||||||
base_path: None,
|
base_path: Default::default(),
|
||||||
|
sources: Default::default(),
|
||||||
|
libraries: Default::default(),
|
||||||
|
revert_string_handling: Default::default(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn solc_optimizer(mut self, enabled: bool) -> Self {
|
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
|
||||||
self.input.settings.optimizer.enabled = enabled;
|
self.input.optimization = value.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_source(mut self, path: &Path) -> anyhow::Result<Self> {
|
pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self {
|
||||||
self.input
|
self.input.pipeline = value.into();
|
||||||
.sources
|
self
|
||||||
.insert(path.display().to_string(), read_to_string(path)?.into());
|
}
|
||||||
|
|
||||||
|
pub fn with_evm_version(mut self, version: impl Into<Option<EVMVersion>>) -> Self {
|
||||||
|
self.input.evm_version = version.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_allow_path(mut self, path: impl AsRef<Path>) -> Self {
|
||||||
|
self.input.allow_paths.push(path.as_ref().into());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_base_path(mut self, path: impl Into<Option<PathBuf>>) -> Self {
|
||||||
|
self.input.base_path = path.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> {
|
||||||
|
self.input.sources.insert(
|
||||||
|
path.as_ref().to_path_buf(),
|
||||||
|
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
|
||||||
|
);
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn evm_version(mut self, evm_version: EVMVersion) -> Self {
|
pub fn with_library(
|
||||||
self.input.settings.evm_version = Some(evm_version);
|
mut self,
|
||||||
|
path: impl AsRef<Path>,
|
||||||
|
name: impl AsRef<str>,
|
||||||
|
address: Address,
|
||||||
|
) -> Self {
|
||||||
|
self.input
|
||||||
|
.libraries
|
||||||
|
.entry(path.as_ref().to_path_buf())
|
||||||
|
.or_default()
|
||||||
|
.insert(name.as_ref().into(), address);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extra_options(mut self, extra_options: T::Options) -> Self {
|
pub fn with_revert_string_handling(
|
||||||
self.extra_options = extra_options;
|
mut self,
|
||||||
|
revert_string_handling: impl Into<Option<RevertString>>,
|
||||||
|
) -> Self {
|
||||||
|
self.input.revert_string_handling = revert_string_handling.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn allow_path(mut self, path: PathBuf) -> Self {
|
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
|
||||||
self.allow_paths.push(path);
|
callback(self)
|
||||||
self
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn base_path(mut self, base_path: PathBuf) -> Self {
|
pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> {
|
||||||
self.base_path = Some(base_path);
|
callback(self)
|
||||||
self
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_build(self, solc_path: PathBuf) -> anyhow::Result<CompilerOutput<T::Options>> {
|
pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result<CompilerOutput> {
|
||||||
T::new(solc_path).build(CompilerInput {
|
compiler.build(self.input).await
|
||||||
extra_options: self.extra_options,
|
|
||||||
input: self.input,
|
|
||||||
allow_paths: self.allow_paths,
|
|
||||||
base_path: self.base_path,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the compiler JSON input.
|
pub fn input(&self) -> &CompilerInput {
|
||||||
pub fn input(&self) -> SolcStandardJsonInput {
|
&self.input
|
||||||
self.input.clone()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Defines how the compiler should handle revert strings.
|
||||||
|
#[derive(
|
||||||
|
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
|
||||||
|
)]
|
||||||
|
pub enum RevertString {
|
||||||
|
#[default]
|
||||||
|
Default,
|
||||||
|
Debug,
|
||||||
|
Strip,
|
||||||
|
VerboseDebug,
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,152 +3,361 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
process::{Command, Stdio},
|
pin::Pin,
|
||||||
|
process::Stdio,
|
||||||
|
sync::{Arc, LazyLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{CompilerInput, CompilerOutput, SolidityCompiler};
|
use dashmap::DashMap;
|
||||||
use revive_dt_config::Arguments;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
use revive_solc_json_interface::SolcStandardJsonOutput;
|
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||||
|
use revive_solc_json_interface::{
|
||||||
|
PolkaVMDefaultHeapMemorySize, PolkaVMDefaultStackMemorySize, SolcStandardJsonInput,
|
||||||
|
SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||||
|
SolcStandardJsonInputSettingsLibraries, SolcStandardJsonInputSettingsMetadata,
|
||||||
|
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsPolkaVM,
|
||||||
|
SolcStandardJsonInputSettingsPolkaVMMemory, SolcStandardJsonInputSettingsSelection,
|
||||||
|
SolcStandardJsonOutput, standard_json::input::settings::optimizer::Optimizer,
|
||||||
|
standard_json::input::settings::optimizer::details::Details,
|
||||||
|
};
|
||||||
|
use tracing::{Span, field::display};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy::json_abi::JsonAbi;
|
||||||
|
use anyhow::{Context as _, Result};
|
||||||
|
use semver::Version;
|
||||||
|
use std::collections::BTreeSet;
|
||||||
|
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||||
|
|
||||||
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
|
/// A wrapper around the `resolc` binary, emitting PVM-compatible bytecode.
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
pub struct Resolc {
|
pub struct Resolc(Arc<ResolcInner>);
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
struct ResolcInner {
|
||||||
|
/// The internal solc compiler that the resolc compiler uses as a compiler frontend.
|
||||||
|
solc: Solc,
|
||||||
/// Path to the `resolc` executable
|
/// Path to the `resolc` executable
|
||||||
resolc_path: PathBuf,
|
resolc_path: PathBuf,
|
||||||
|
/// The PVM heap size in bytes.
|
||||||
|
pvm_heap_size: u32,
|
||||||
|
/// The PVM stack size in bytes.
|
||||||
|
pvm_stack_size: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Resolc {
|
||||||
|
pub async fn new(
|
||||||
|
context: impl AsRef<SolcConfiguration>
|
||||||
|
+ AsRef<ResolcConfiguration>
|
||||||
|
+ AsRef<WorkingDirectoryConfiguration>,
|
||||||
|
version: impl Into<Option<VersionOrRequirement>>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
|
||||||
|
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
|
||||||
|
/// its version to the resolc compiler.
|
||||||
|
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
|
||||||
|
|
||||||
|
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
let solc = Solc::new(&context, version)
|
||||||
|
.await
|
||||||
|
.context("Failed to create the solc compiler frontend for resolc")?;
|
||||||
|
|
||||||
|
Ok(COMPILERS_CACHE
|
||||||
|
.entry(solc.clone())
|
||||||
|
.or_insert_with(|| {
|
||||||
|
Self(Arc::new(ResolcInner {
|
||||||
|
solc,
|
||||||
|
resolc_path: resolc_configuration.path.clone(),
|
||||||
|
pvm_heap_size: resolc_configuration
|
||||||
|
.heap_size
|
||||||
|
.unwrap_or(PolkaVMDefaultHeapMemorySize),
|
||||||
|
pvm_stack_size: resolc_configuration
|
||||||
|
.stack_size
|
||||||
|
.unwrap_or(PolkaVMDefaultStackMemorySize),
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn polkavm_settings(&self) -> SolcStandardJsonInputSettingsPolkaVM {
|
||||||
|
SolcStandardJsonInputSettingsPolkaVM::new(
|
||||||
|
Some(SolcStandardJsonInputSettingsPolkaVMMemory::new(
|
||||||
|
Some(self.0.pvm_heap_size),
|
||||||
|
Some(self.0.pvm_stack_size),
|
||||||
|
)),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inject_polkavm_settings(&self, input: &SolcStandardJsonInput) -> Result<serde_json::Value> {
|
||||||
|
let mut input_value = serde_json::to_value(input)
|
||||||
|
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||||
|
if let Some(settings) = input_value.get_mut("settings") {
|
||||||
|
settings["polkavm"] = serde_json::to_value(self.polkavm_settings()).unwrap();
|
||||||
|
}
|
||||||
|
Ok(input_value)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SolidityCompiler for Resolc {
|
impl SolidityCompiler for Resolc {
|
||||||
type Options = Vec<String>;
|
fn version(&self) -> &Version {
|
||||||
|
// We currently return the solc compiler version since we do not support multiple resolc
|
||||||
|
// compiler versions.
|
||||||
|
SolidityCompiler::version(&self.0.solc)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn path(&self) -> &std::path::Path {
|
||||||
|
&self.0.resolc_path
|
||||||
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", ret)]
|
#[tracing::instrument(level = "debug", ret)]
|
||||||
|
#[tracing::instrument(
|
||||||
|
level = "error",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
resolc_version = %self.version(),
|
||||||
|
solc_version = %self.0.solc.version(),
|
||||||
|
json_in = tracing::field::Empty
|
||||||
|
),
|
||||||
|
err(Debug)
|
||||||
|
)]
|
||||||
fn build(
|
fn build(
|
||||||
&self,
|
&self,
|
||||||
input: CompilerInput<Self::Options>,
|
CompilerInput {
|
||||||
) -> anyhow::Result<CompilerOutput<Self::Options>> {
|
pipeline,
|
||||||
let mut command = Command::new(&self.resolc_path);
|
optimization,
|
||||||
|
evm_version,
|
||||||
|
allow_paths,
|
||||||
|
base_path,
|
||||||
|
sources,
|
||||||
|
libraries,
|
||||||
|
// TODO: this is currently not being handled since there is no way to pass it into
|
||||||
|
// resolc. So, we need to go back to this later once it's supported.
|
||||||
|
revert_string_handling: _,
|
||||||
|
}: CompilerInput,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let input = SolcStandardJsonInput {
|
||||||
|
language: SolcStandardJsonInputLanguage::Solidity,
|
||||||
|
sources: sources
|
||||||
|
.into_iter()
|
||||||
|
.map(|(path, source)| (path.display().to_string(), source.into()))
|
||||||
|
.collect(),
|
||||||
|
settings: SolcStandardJsonInputSettings {
|
||||||
|
evm_version,
|
||||||
|
libraries: SolcStandardJsonInputSettingsLibraries {
|
||||||
|
inner: libraries
|
||||||
|
.into_iter()
|
||||||
|
.map(|(source_code, libraries_map)| {
|
||||||
|
(
|
||||||
|
source_code.display().to_string(),
|
||||||
|
libraries_map
|
||||||
|
.into_iter()
|
||||||
|
.map(|(library_ident, library_address)| {
|
||||||
|
(library_ident, library_address.to_string())
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
},
|
||||||
|
remappings: BTreeSet::<String>::new(),
|
||||||
|
output_selection: SolcStandardJsonInputSettingsSelection::new_required(),
|
||||||
|
via_ir: Some(true),
|
||||||
|
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||||
|
optimization
|
||||||
|
.unwrap_or(ModeOptimizerSetting::M0)
|
||||||
|
.optimizations_enabled(),
|
||||||
|
Optimizer::default_mode(),
|
||||||
|
Details::disabled(&Version::new(0, 0, 0)),
|
||||||
|
),
|
||||||
|
polkavm: self.polkavm_settings(),
|
||||||
|
metadata: SolcStandardJsonInputSettingsMetadata::default(),
|
||||||
|
detect_missing_libraries: false,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
// Manually inject polkavm settings since it's marked skip_serializing in the upstream crate
|
||||||
|
let std_input_json = self.inject_polkavm_settings(&input)?;
|
||||||
|
|
||||||
|
Span::current().record(
|
||||||
|
"json_in",
|
||||||
|
display(serde_json::to_string(&std_input_json).unwrap()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let path = &self.0.resolc_path;
|
||||||
|
let mut command = AsyncCommand::new(path);
|
||||||
command
|
command
|
||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::piped())
|
||||||
|
.arg("--solc")
|
||||||
|
.arg(self.0.solc.path())
|
||||||
.arg("--standard-json");
|
.arg("--standard-json");
|
||||||
|
|
||||||
if let Some(ref base_path) = input.base_path {
|
if let Some(ref base_path) = base_path {
|
||||||
command.arg("--base-path").arg(base_path);
|
command.arg("--base-path").arg(base_path);
|
||||||
}
|
}
|
||||||
if !input.allow_paths.is_empty() {
|
if !allow_paths.is_empty() {
|
||||||
command.arg("--allow-paths").arg(
|
command.arg("--allow-paths").arg(
|
||||||
input
|
allow_paths
|
||||||
.allow_paths
|
|
||||||
.iter()
|
.iter()
|
||||||
.map(|path| path.display().to_string())
|
.map(|path| path.display().to_string())
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(","),
|
.join(","),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
let mut child = command.spawn()?;
|
let mut child = command
|
||||||
|
.spawn()
|
||||||
|
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
||||||
|
|
||||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||||
serde_json::to_writer(stdin_pipe, &input.input)?;
|
let serialized_input = serde_json::to_vec(&std_input_json)
|
||||||
|
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||||
|
|
||||||
let json_in = serde_json::to_string_pretty(&input.input)?;
|
stdin_pipe
|
||||||
|
.write_all(&serialized_input)
|
||||||
|
.await
|
||||||
|
.context("Failed to write Standard JSON to resolc stdin")?;
|
||||||
|
|
||||||
let output = child.wait_with_output()?;
|
let output = child
|
||||||
|
.wait_with_output()
|
||||||
|
.await
|
||||||
|
.context("Failed while waiting for resolc process to finish")?;
|
||||||
let stdout = output.stdout;
|
let stdout = output.stdout;
|
||||||
let stderr = output.stderr;
|
let stderr = output.stderr;
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
|
let json_in = serde_json::to_string_pretty(&input)
|
||||||
|
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||||
let message = String::from_utf8_lossy(&stderr);
|
let message = String::from_utf8_lossy(&stderr);
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
"resolc failed exit={} stderr={} JSON-in={} ",
|
status = %output.status,
|
||||||
output.status,
|
message = %message,
|
||||||
&message,
|
json_input = json_in,
|
||||||
json_in,
|
"Compilation using resolc failed"
|
||||||
);
|
);
|
||||||
return Ok(CompilerOutput {
|
anyhow::bail!("Compilation failed with an error: {message}");
|
||||||
input,
|
|
||||||
output: Default::default(),
|
|
||||||
error: Some(message.into()),
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut parsed =
|
let parsed: SolcStandardJsonOutput = {
|
||||||
serde_json::from_slice::<SolcStandardJsonOutput>(&stdout).map_err(|e| {
|
let mut deserializer = serde_json::Deserializer::from_slice(&stdout);
|
||||||
|
deserializer.disable_recursion_limit();
|
||||||
|
serde::de::Deserialize::deserialize(&mut deserializer)
|
||||||
|
.map_err(|e| {
|
||||||
anyhow::anyhow!(
|
anyhow::anyhow!(
|
||||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||||
String::from_utf8_lossy(&stderr)
|
String::from_utf8_lossy(&stderr)
|
||||||
)
|
)
|
||||||
})?;
|
})
|
||||||
|
.context("Failed to parse resolc standard JSON output")?
|
||||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
|
||||||
// errors instead of returning the compiler output that might contain errors.
|
|
||||||
for error in parsed.errors.iter().flatten() {
|
|
||||||
if error.severity == "error" {
|
|
||||||
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
|
||||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to do some post processing on the output to make it in the same format that solc
|
|
||||||
// outputs. More specifically, for each contract, the `.metadata` field should be replaced
|
|
||||||
// with the `.metadata.solc_metadata` field which contains the ABI and other information
|
|
||||||
// about the compiled contracts. We do this because we do not want any downstream logic to
|
|
||||||
// need to differentiate between which compiler is being used when extracting the ABI of the
|
|
||||||
// contracts.
|
|
||||||
if let Some(ref mut contracts) = parsed.contracts {
|
|
||||||
for (contract_path, contracts_map) in contracts.iter_mut() {
|
|
||||||
for (contract_name, contract_info) in contracts_map.iter_mut() {
|
|
||||||
let Some(metadata) = contract_info.metadata.take() else {
|
|
||||||
continue;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get the `solc_metadata` in the metadata of the contract.
|
|
||||||
let Some(solc_metadata) = metadata
|
|
||||||
.get("solc_metadata")
|
|
||||||
.and_then(|metadata| metadata.as_str())
|
|
||||||
else {
|
|
||||||
tracing::error!(
|
|
||||||
contract_path,
|
|
||||||
contract_name,
|
|
||||||
metadata = serde_json::to_string(&metadata).unwrap(),
|
|
||||||
"Encountered a contract compiled with resolc that has no solc_metadata"
|
|
||||||
);
|
|
||||||
anyhow::bail!(
|
|
||||||
"Contract {} compiled with resolc that has no solc_metadata",
|
|
||||||
contract_name
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
// Replace the original metadata with the new solc_metadata.
|
|
||||||
contract_info.metadata =
|
|
||||||
Some(serde_json::Value::String(solc_metadata.to_string()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
output = %serde_json::to_string(&parsed).unwrap(),
|
output = %serde_json::to_string(&parsed).unwrap(),
|
||||||
"Compiled successfully"
|
"Compiled successfully"
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(CompilerOutput {
|
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||||
input,
|
// errors instead of returning the compiler output that might contain errors.
|
||||||
output: parsed,
|
for error in parsed.errors.iter() {
|
||||||
error: None,
|
if error.severity == "error" {
|
||||||
|
tracing::error!(
|
||||||
|
?error,
|
||||||
|
?input,
|
||||||
|
output = %serde_json::to_string(&parsed).unwrap(),
|
||||||
|
"Encountered an error in the compilation"
|
||||||
|
);
|
||||||
|
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if parsed.contracts.is_empty() {
|
||||||
|
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut compiler_output = CompilerOutput::default();
|
||||||
|
for (source_path, contracts) in parsed.contracts.into_iter() {
|
||||||
|
let src_for_msg = source_path.clone();
|
||||||
|
let source_path = PathBuf::from(source_path)
|
||||||
|
.canonicalize()
|
||||||
|
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
|
||||||
|
|
||||||
|
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||||
|
for (contract_name, contract_information) in contracts.into_iter() {
|
||||||
|
let Some(bytecode) = contract_information
|
||||||
|
.evm
|
||||||
|
.and_then(|evm| evm.bytecode.clone())
|
||||||
|
else {
|
||||||
|
tracing::debug!(
|
||||||
|
"Skipping abstract or interface contract {} - no bytecode",
|
||||||
|
contract_name
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let abi = {
|
||||||
|
let metadata = &contract_information.metadata;
|
||||||
|
if metadata.is_null() {
|
||||||
|
anyhow::bail!("No metadata found for the contract");
|
||||||
|
}
|
||||||
|
|
||||||
|
let solc_metadata_str = match metadata {
|
||||||
|
serde_json::Value::String(solc_metadata_str) => {
|
||||||
|
solc_metadata_str.as_str()
|
||||||
|
}
|
||||||
|
serde_json::Value::Object(metadata_object) => {
|
||||||
|
let solc_metadata_value = metadata_object
|
||||||
|
.get("solc_metadata")
|
||||||
|
.context("Contract doesn't have a 'solc_metadata' field")?;
|
||||||
|
solc_metadata_value
|
||||||
|
.as_str()
|
||||||
|
.context("The 'solc_metadata' field is not a string")?
|
||||||
|
}
|
||||||
|
serde_json::Value::Null
|
||||||
|
| serde_json::Value::Bool(_)
|
||||||
|
| serde_json::Value::Number(_)
|
||||||
|
| serde_json::Value::Array(_) => {
|
||||||
|
anyhow::bail!("Unsupported type of metadata {metadata:?}")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let solc_metadata = serde_json::from_str::<serde_json::Value>(
|
||||||
|
solc_metadata_str,
|
||||||
|
)
|
||||||
|
.context(
|
||||||
|
"Failed to deserialize the solc_metadata as a serde_json generic value",
|
||||||
|
)?;
|
||||||
|
let output_value = solc_metadata
|
||||||
|
.get("output")
|
||||||
|
.context("solc_metadata doesn't have an output field")?;
|
||||||
|
let abi_value = output_value
|
||||||
|
.get("abi")
|
||||||
|
.context("solc_metadata output doesn't contain an abi field")?;
|
||||||
|
serde_json::from_value::<JsonAbi>(abi_value.clone())
|
||||||
|
.context("ABI found in solc_metadata output is not valid ABI")?
|
||||||
|
};
|
||||||
|
map.insert(contract_name, (bytecode.object, abi));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(compiler_output)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new(resolc_path: PathBuf) -> Self {
|
fn supports_mode(
|
||||||
Resolc { resolc_path }
|
&self,
|
||||||
}
|
optimize_setting: ModeOptimizerSetting,
|
||||||
|
pipeline: ModePipeline,
|
||||||
fn get_compiler_executable(
|
) -> bool {
|
||||||
config: &Arguments,
|
pipeline == ModePipeline::ViaYulIR
|
||||||
_version: semver::Version,
|
&& SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline)
|
||||||
) -> anyhow::Result<PathBuf> {
|
|
||||||
if !config.resolc.as_os_str().is_empty() {
|
|
||||||
return Ok(config.resolc.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(PathBuf::from("resolc"))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
+241
-45
@@ -3,75 +3,241 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
process::{Command, Stdio},
|
pin::Pin,
|
||||||
|
process::Stdio,
|
||||||
|
sync::{Arc, LazyLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{CompilerInput, CompilerOutput, SolidityCompiler};
|
use dashmap::DashMap;
|
||||||
use revive_dt_config::Arguments;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
|
use revive_dt_config::{SolcConfiguration, WorkingDirectoryConfiguration};
|
||||||
use revive_dt_solc_binaries::download_solc;
|
use revive_dt_solc_binaries::download_solc;
|
||||||
use revive_solc_json_interface::SolcStandardJsonOutput;
|
use tracing::{Span, field::display, info};
|
||||||
|
|
||||||
#[derive(Debug)]
|
use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler};
|
||||||
pub struct Solc {
|
|
||||||
|
use anyhow::{Context as _, Result};
|
||||||
|
use foundry_compilers_artifacts::{
|
||||||
|
output_selection::{
|
||||||
|
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
|
||||||
|
},
|
||||||
|
solc::CompilerOutput as SolcOutput,
|
||||||
|
solc::*,
|
||||||
|
};
|
||||||
|
use semver::Version;
|
||||||
|
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub struct Solc(Arc<SolcInner>);
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
struct SolcInner {
|
||||||
|
/// The path of the solidity compiler executable that this object uses.
|
||||||
solc_path: PathBuf,
|
solc_path: PathBuf,
|
||||||
|
/// The version of the solidity compiler executable that this object uses.
|
||||||
|
solc_version: Version,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Solc {
|
||||||
|
pub async fn new(
|
||||||
|
context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>,
|
||||||
|
version: impl Into<Option<VersionOrRequirement>>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
// This is a cache for the compiler objects so that whenever the same compiler version is
|
||||||
|
// requested the same object is returned. We do this as we do not want to keep cloning the
|
||||||
|
// compiler around.
|
||||||
|
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
|
||||||
|
LazyLock::new(Default::default);
|
||||||
|
|
||||||
|
let working_directory_configuration =
|
||||||
|
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||||
|
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
// We attempt to download the solc binary. Note the following: this call does the version
|
||||||
|
// resolution for us. Therefore, even if the download didn't proceed, this function will
|
||||||
|
// resolve the version requirement into a canonical version of the compiler. It's then up
|
||||||
|
// to us to either use the provided path or not.
|
||||||
|
let version = version
|
||||||
|
.into()
|
||||||
|
.unwrap_or_else(|| solc_configuration.version.clone().into());
|
||||||
|
let (version, path) =
|
||||||
|
download_solc(working_directory_configuration.as_path(), version, false)
|
||||||
|
.await
|
||||||
|
.context("Failed to download/get path to solc binary")?;
|
||||||
|
|
||||||
|
Ok(COMPILERS_CACHE
|
||||||
|
.entry((path.clone(), version.clone()))
|
||||||
|
.or_insert_with(|| {
|
||||||
|
info!(
|
||||||
|
solc_path = %path.display(),
|
||||||
|
solc_version = %version,
|
||||||
|
"Created a new solc compiler object"
|
||||||
|
);
|
||||||
|
Self(Arc::new(SolcInner {
|
||||||
|
solc_path: path,
|
||||||
|
solc_version: version,
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
.clone())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SolidityCompiler for Solc {
|
impl SolidityCompiler for Solc {
|
||||||
type Options = ();
|
fn version(&self) -> &Version {
|
||||||
|
&self.0.solc_version
|
||||||
|
}
|
||||||
|
|
||||||
|
fn path(&self) -> &std::path::Path {
|
||||||
|
&self.0.solc_path
|
||||||
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", ret)]
|
#[tracing::instrument(level = "debug", ret)]
|
||||||
|
#[tracing::instrument(
|
||||||
|
level = "error",
|
||||||
|
skip_all,
|
||||||
|
fields(json_in = tracing::field::Empty),
|
||||||
|
err(Debug)
|
||||||
|
)]
|
||||||
fn build(
|
fn build(
|
||||||
&self,
|
&self,
|
||||||
input: CompilerInput<Self::Options>,
|
CompilerInput {
|
||||||
) -> anyhow::Result<CompilerOutput<Self::Options>> {
|
pipeline,
|
||||||
let mut command = Command::new(&self.solc_path);
|
optimization,
|
||||||
|
evm_version,
|
||||||
|
allow_paths,
|
||||||
|
base_path,
|
||||||
|
sources,
|
||||||
|
libraries,
|
||||||
|
revert_string_handling,
|
||||||
|
}: CompilerInput,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
// Be careful to entirely omit the viaIR field if the compiler does not support it,
|
||||||
|
// as it will error if you provide fields it does not know about. Because
|
||||||
|
// `supports_mode` is called prior to instantiating a compiler, we should never
|
||||||
|
// ask for something which is invalid.
|
||||||
|
let via_ir = match (pipeline, self.compiler_supports_yul()) {
|
||||||
|
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
|
||||||
|
(_pipeline, false) => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let input = SolcInput {
|
||||||
|
language: SolcLanguage::Solidity,
|
||||||
|
sources: Sources(
|
||||||
|
sources
|
||||||
|
.into_iter()
|
||||||
|
.map(|(source_path, source_code)| (source_path, Source::new(source_code)))
|
||||||
|
.collect(),
|
||||||
|
),
|
||||||
|
settings: Settings {
|
||||||
|
optimizer: Optimizer {
|
||||||
|
enabled: optimization.map(|o| o.optimizations_enabled()),
|
||||||
|
details: Some(Default::default()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
output_selection: OutputSelection::common_output_selection(
|
||||||
|
[
|
||||||
|
ContractOutputSelection::Abi,
|
||||||
|
ContractOutputSelection::Evm(EvmOutputSelection::ByteCode(
|
||||||
|
BytecodeOutputSelection::Object,
|
||||||
|
)),
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.map(|item| item.to_string()),
|
||||||
|
),
|
||||||
|
evm_version: evm_version.map(|version| version.to_string().parse().unwrap()),
|
||||||
|
via_ir,
|
||||||
|
libraries: Libraries {
|
||||||
|
libs: libraries
|
||||||
|
.into_iter()
|
||||||
|
.map(|(file_path, libraries)| {
|
||||||
|
(
|
||||||
|
file_path,
|
||||||
|
libraries
|
||||||
|
.into_iter()
|
||||||
|
.map(|(library_name, library_address)| {
|
||||||
|
(library_name, library_address.to_string())
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
},
|
||||||
|
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
|
||||||
|
revert_strings: match revert_string_handling {
|
||||||
|
crate::RevertString::Default => Some(RevertStrings::Default),
|
||||||
|
crate::RevertString::Debug => Some(RevertStrings::Debug),
|
||||||
|
crate::RevertString::Strip => Some(RevertStrings::Strip),
|
||||||
|
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
|
||||||
|
},
|
||||||
|
debug_info: Default::default(),
|
||||||
|
}),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||||
|
|
||||||
|
let path = &self.0.solc_path;
|
||||||
|
let mut command = AsyncCommand::new(path);
|
||||||
command
|
command
|
||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::null())
|
||||||
.arg("--standard-json");
|
.arg("--standard-json");
|
||||||
|
|
||||||
if let Some(ref base_path) = input.base_path {
|
if let Some(ref base_path) = base_path {
|
||||||
command.arg("--base-path").arg(base_path);
|
command.arg("--base-path").arg(base_path);
|
||||||
}
|
}
|
||||||
if !input.allow_paths.is_empty() {
|
if !allow_paths.is_empty() {
|
||||||
command.arg("--allow-paths").arg(
|
command.arg("--allow-paths").arg(
|
||||||
input
|
allow_paths
|
||||||
.allow_paths
|
|
||||||
.iter()
|
.iter()
|
||||||
.map(|path| path.display().to_string())
|
.map(|path| path.display().to_string())
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(","),
|
.join(","),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
let mut child = command.spawn()?;
|
let mut child = command
|
||||||
|
.spawn()
|
||||||
|
.with_context(|| format!("Failed to spawn solc at {}", path.display()))?;
|
||||||
|
|
||||||
let stdin = child.stdin.as_mut().expect("should be piped");
|
let stdin = child.stdin.as_mut().expect("should be piped");
|
||||||
serde_json::to_writer(stdin, &input.input)?;
|
let serialized_input = serde_json::to_vec(&input)
|
||||||
let output = child.wait_with_output()?;
|
.context("Failed to serialize Standard JSON input for solc")?;
|
||||||
|
stdin
|
||||||
|
.write_all(&serialized_input)
|
||||||
|
.await
|
||||||
|
.context("Failed to write Standard JSON to solc stdin")?;
|
||||||
|
let output = child
|
||||||
|
.wait_with_output()
|
||||||
|
.await
|
||||||
|
.context("Failed while waiting for solc process to finish")?;
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
let message = String::from_utf8_lossy(&output.stderr);
|
let json_in = serde_json::to_string_pretty(&input)
|
||||||
tracing::error!("solc failed exit={} stderr={}", output.status, &message);
|
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||||
return Ok(CompilerOutput {
|
tracing::error!(
|
||||||
input,
|
status = %output.status,
|
||||||
output: Default::default(),
|
json_input = json_in,
|
||||||
error: Some(message.into()),
|
"Compilation using solc failed"
|
||||||
});
|
);
|
||||||
|
anyhow::bail!("Compilation failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
let parsed =
|
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
||||||
serde_json::from_slice::<SolcStandardJsonOutput>(&output.stdout).map_err(|e| {
|
.map_err(|e| {
|
||||||
anyhow::anyhow!(
|
anyhow::anyhow!(
|
||||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
"failed to parse resolc JSON output: {e}\nstdout: {}",
|
||||||
String::from_utf8_lossy(&output.stdout)
|
String::from_utf8_lossy(&output.stdout)
|
||||||
)
|
)
|
||||||
})?;
|
})
|
||||||
|
.context("Failed to parse solc standard JSON output")?;
|
||||||
|
|
||||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||||
// errors instead of returning the compiler output that might contain errors.
|
// errors instead of returning the compiler output that might contain errors.
|
||||||
for error in parsed.errors.iter().flatten() {
|
for error in parsed.errors.iter() {
|
||||||
if error.severity == "error" {
|
if error.severity == Severity::Error {
|
||||||
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
||||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||||
}
|
}
|
||||||
@@ -82,22 +248,52 @@ impl SolidityCompiler for Solc {
|
|||||||
"Compiled successfully"
|
"Compiled successfully"
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(CompilerOutput {
|
let mut compiler_output = CompilerOutput::default();
|
||||||
input,
|
for (contract_path, contracts) in parsed.contracts {
|
||||||
output: parsed,
|
let map = compiler_output
|
||||||
error: None,
|
.contracts
|
||||||
|
.entry(contract_path.canonicalize().with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to canonicalize contract path {}",
|
||||||
|
contract_path.display()
|
||||||
|
)
|
||||||
|
})?)
|
||||||
|
.or_default();
|
||||||
|
for (contract_name, contract_info) in contracts.into_iter() {
|
||||||
|
let source_code = contract_info
|
||||||
|
.evm
|
||||||
|
.and_then(|evm| evm.bytecode)
|
||||||
|
.map(|bytecode| match bytecode.object {
|
||||||
|
BytecodeObject::Bytecode(bytecode) => bytecode.to_string(),
|
||||||
|
BytecodeObject::Unlinked(unlinked) => unlinked,
|
||||||
|
})
|
||||||
|
.context("Unexpected - contract compiled with solc has no source code")?;
|
||||||
|
let abi = contract_info
|
||||||
|
.abi
|
||||||
|
.context("Unexpected - contract compiled with solc as no ABI")?;
|
||||||
|
map.insert(contract_name, (source_code, abi));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(compiler_output)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new(solc_path: PathBuf) -> Self {
|
fn supports_mode(
|
||||||
Self { solc_path }
|
&self,
|
||||||
}
|
_optimize_setting: ModeOptimizerSetting,
|
||||||
|
pipeline: ModePipeline,
|
||||||
fn get_compiler_executable(
|
) -> bool {
|
||||||
config: &Arguments,
|
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
|
||||||
version: semver::Version,
|
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
|
||||||
) -> anyhow::Result<PathBuf> {
|
pipeline == ModePipeline::ViaEVMAssembly
|
||||||
let path = download_solc(config.directory(), version, config.wasm)?;
|
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
|
||||||
Ok(path)
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Solc {
|
||||||
|
fn compiler_supports_yul(&self) -> bool {
|
||||||
|
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
|
||||||
|
SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,9 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
pragma solidity >=0.6.9;
|
||||||
|
|
||||||
|
contract Callable {
|
||||||
|
function f(uint[1] memory p1) public pure returns(uint) {
|
||||||
|
return p1[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
// Report https://linear.app/matterlabs/issue/CPR-269/call-with-calldata-variable-bug
|
||||||
|
|
||||||
|
pragma solidity >=0.6.9;
|
||||||
|
|
||||||
|
import "./callable.sol";
|
||||||
|
|
||||||
|
contract Main {
|
||||||
|
function main(
|
||||||
|
uint[1] calldata p1,
|
||||||
|
Callable callable
|
||||||
|
) public pure returns (uint) {
|
||||||
|
return callable.f(p1);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
{ "cases": [ {
|
||||||
|
"name": "first",
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"instance": "Main",
|
||||||
|
"method": "main",
|
||||||
|
"calldata": [
|
||||||
|
"1",
|
||||||
|
"Callable.address"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"expected": [
|
||||||
|
"1"
|
||||||
|
]
|
||||||
|
} ],
|
||||||
|
"contracts": {
|
||||||
|
"Main": "main.sol:Main",
|
||||||
|
"Callable": "callable.sol:Callable"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,88 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
|
use revive_dt_compiler::{Compiler, revive_resolc::Resolc, solc::Solc};
|
||||||
|
use revive_dt_config::TestExecutionContext;
|
||||||
|
use semver::Version;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn contracts_can_be_compiled_with_solc() {
|
||||||
|
// Arrange
|
||||||
|
let args = TestExecutionContext::default();
|
||||||
|
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let output = Compiler::new()
|
||||||
|
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||||
|
.unwrap()
|
||||||
|
.with_source("./tests/assets/array_one_element/main.sol")
|
||||||
|
.unwrap()
|
||||||
|
.try_build(&solc)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let output = output.expect("Failed to compile");
|
||||||
|
assert_eq!(output.contracts.len(), 2);
|
||||||
|
|
||||||
|
let main_file_contracts = output
|
||||||
|
.contracts
|
||||||
|
.get(
|
||||||
|
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||||
|
.canonicalize()
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let callable_file_contracts = output
|
||||||
|
.contracts
|
||||||
|
.get(
|
||||||
|
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||||
|
.canonicalize()
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert!(main_file_contracts.contains_key("Main"));
|
||||||
|
assert!(callable_file_contracts.contains_key("Callable"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn contracts_can_be_compiled_with_resolc() {
|
||||||
|
// Arrange
|
||||||
|
let args = TestExecutionContext::default();
|
||||||
|
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let output = Compiler::new()
|
||||||
|
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||||
|
.unwrap()
|
||||||
|
.with_source("./tests/assets/array_one_element/main.sol")
|
||||||
|
.unwrap()
|
||||||
|
.try_build(&resolc)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let output = output.expect("Failed to compile");
|
||||||
|
assert_eq!(output.contracts.len(), 2);
|
||||||
|
|
||||||
|
let main_file_contracts = output
|
||||||
|
.contracts
|
||||||
|
.get(
|
||||||
|
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||||
|
.canonicalize()
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let callable_file_contracts = output
|
||||||
|
.contracts
|
||||||
|
.get(
|
||||||
|
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||||
|
.canonicalize()
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert!(main_file_contracts.contains_key("Main"));
|
||||||
|
assert!(callable_file_contracts.contains_key("Callable"));
|
||||||
|
}
|
||||||
@@ -9,9 +9,17 @@ repository.workspace = true
|
|||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
revive-dt-common = { workspace = true }
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
|
anyhow = { workspace = true }
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
temp-dir = { workspace = true }
|
temp-dir = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
serde_with = { workspace = true }
|
||||||
|
strum = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
+1240
-118
File diff suppressed because it is too large
Load Diff
+14
-3
@@ -13,6 +13,7 @@ name = "retester"
|
|||||||
path = "src/main.rs"
|
path = "src/main.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
revive-dt-common = { workspace = true }
|
||||||
revive-dt-compiler = { workspace = true }
|
revive-dt-compiler = { workspace = true }
|
||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
@@ -20,13 +21,23 @@ revive-dt-node = { workspace = true }
|
|||||||
revive-dt-node-interaction = { workspace = true }
|
revive-dt-node-interaction = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
revive-dt-report = { workspace = true }
|
||||||
|
|
||||||
|
ansi_term = { workspace = true }
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
bson = { workspace = true }
|
||||||
|
cacache = { workspace = true }
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
|
futures = { workspace = true }
|
||||||
indexmap = { workspace = true }
|
indexmap = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
|
tracing-appender = { workspace = true }
|
||||||
tracing-subscriber = { workspace = true }
|
tracing-subscriber = { workspace = true }
|
||||||
rayon = { workspace = true }
|
schemars = { workspace = true }
|
||||||
revive-solc-json-interface = { workspace = true }
|
semver = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
temp-dir = { workspace = true }
|
subxt = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
@@ -0,0 +1,749 @@
|
|||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
sync::{
|
||||||
|
Arc,
|
||||||
|
atomic::{AtomicUsize, Ordering},
|
||||||
|
},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
hex,
|
||||||
|
json_abi::JsonAbi,
|
||||||
|
network::{Ethereum, TransactionBuilder},
|
||||||
|
primitives::{Address, TxHash, U256},
|
||||||
|
providers::Provider,
|
||||||
|
rpc::types::{
|
||||||
|
TransactionReceipt, TransactionRequest,
|
||||||
|
trace::geth::{
|
||||||
|
CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType,
|
||||||
|
GethDebugTracingOptions,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use anyhow::{Context as _, Result, bail};
|
||||||
|
use futures::{FutureExt as _, TryFutureExt};
|
||||||
|
use indexmap::IndexMap;
|
||||||
|
use revive_dt_common::types::PrivateKeyAllocator;
|
||||||
|
use revive_dt_format::{
|
||||||
|
metadata::{ContractInstance, ContractPathAndIdent},
|
||||||
|
steps::{
|
||||||
|
AllocateAccountStep, Calldata, EtherValue, FunctionCallStep, Method, RepeatStep, Step,
|
||||||
|
StepIdx, StepPath,
|
||||||
|
},
|
||||||
|
traits::{ResolutionContext, ResolverApi},
|
||||||
|
};
|
||||||
|
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
|
||||||
|
use tracing::{Span, debug, error, field::display, info, instrument};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
differential_benchmarks::{ExecutionState, WatcherEvent},
|
||||||
|
helpers::{CachedCompiler, TestDefinition, TestPlatformInformation},
|
||||||
|
};
|
||||||
|
|
||||||
|
static DRIVER_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
|
/// The differential tests driver for a single platform.
|
||||||
|
pub struct Driver<'a, I> {
|
||||||
|
/// The id of the driver.
|
||||||
|
driver_id: usize,
|
||||||
|
|
||||||
|
/// The information of the platform that this driver is for.
|
||||||
|
platform_information: &'a TestPlatformInformation<'a>,
|
||||||
|
|
||||||
|
/// The resolver of the platform.
|
||||||
|
resolver: Arc<dyn ResolverApi + 'a>,
|
||||||
|
|
||||||
|
/// The definition of the test that the driver is instructed to execute.
|
||||||
|
test_definition: &'a TestDefinition<'a>,
|
||||||
|
|
||||||
|
/// The private key allocator used by this driver and other drivers when account allocations are
|
||||||
|
/// needed.
|
||||||
|
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||||
|
|
||||||
|
/// The execution state associated with the platform.
|
||||||
|
execution_state: ExecutionState,
|
||||||
|
|
||||||
|
/// The send side of the watcher's unbounded channel associated with this driver.
|
||||||
|
watcher_tx: UnboundedSender<WatcherEvent>,
|
||||||
|
|
||||||
|
/// The number of steps that were executed on the driver.
|
||||||
|
steps_executed: usize,
|
||||||
|
|
||||||
|
/// This function controls if the driver should wait for transactions to be included in a block
|
||||||
|
/// or not before proceeding forward.
|
||||||
|
await_transaction_inclusion: bool,
|
||||||
|
|
||||||
|
/// This is the queue of steps that are to be executed by the driver for this test case. Each
|
||||||
|
/// time `execute_step` is called one of the steps is executed.
|
||||||
|
steps_iterator: I,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, I> Driver<'a, I>
|
||||||
|
where
|
||||||
|
I: Iterator<Item = (StepPath, Step)>,
|
||||||
|
{
|
||||||
|
// region:Constructors & Initialization
|
||||||
|
pub async fn new(
|
||||||
|
platform_information: &'a TestPlatformInformation<'a>,
|
||||||
|
test_definition: &'a TestDefinition<'a>,
|
||||||
|
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
|
||||||
|
cached_compiler: &CachedCompiler<'a>,
|
||||||
|
watcher_tx: UnboundedSender<WatcherEvent>,
|
||||||
|
await_transaction_inclusion: bool,
|
||||||
|
steps: I,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let mut this = Driver {
|
||||||
|
driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst),
|
||||||
|
platform_information,
|
||||||
|
resolver: platform_information
|
||||||
|
.node
|
||||||
|
.resolver()
|
||||||
|
.await
|
||||||
|
.context("Failed to create resolver")?,
|
||||||
|
test_definition,
|
||||||
|
private_key_allocator,
|
||||||
|
execution_state: ExecutionState::empty(),
|
||||||
|
steps_executed: 0,
|
||||||
|
steps_iterator: steps,
|
||||||
|
await_transaction_inclusion,
|
||||||
|
watcher_tx,
|
||||||
|
};
|
||||||
|
this.init_execution_state(cached_compiler)
|
||||||
|
.await
|
||||||
|
.context("Failed to initialize the execution state of the platform")?;
|
||||||
|
Ok(this)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn init_execution_state(&mut self, cached_compiler: &CachedCompiler<'a>) -> Result<()> {
|
||||||
|
let compiler_output = cached_compiler
|
||||||
|
.compile_contracts(
|
||||||
|
self.test_definition.metadata,
|
||||||
|
self.test_definition.metadata_file_path,
|
||||||
|
self.test_definition.mode.clone(),
|
||||||
|
None,
|
||||||
|
self.platform_information.compiler.as_ref(),
|
||||||
|
self.platform_information.platform,
|
||||||
|
&self.platform_information.reporter,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
|
||||||
|
.context("Failed to produce the pre-linking compiled contracts")?;
|
||||||
|
|
||||||
|
let deployer_address = self.test_definition.case.deployer_address();
|
||||||
|
|
||||||
|
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||||
|
let mut contract_sources = self
|
||||||
|
.test_definition
|
||||||
|
.metadata
|
||||||
|
.contract_sources()
|
||||||
|
.inspect_err(|err| error!(?err, "Failed to retrieve contract sources from metadata"))
|
||||||
|
.context("Failed to get the contract instances from the metadata file")?;
|
||||||
|
for library_instance in self
|
||||||
|
.test_definition
|
||||||
|
.metadata
|
||||||
|
.libraries
|
||||||
|
.iter()
|
||||||
|
.flatten()
|
||||||
|
.flat_map(|(_, map)| map.values())
|
||||||
|
{
|
||||||
|
debug!(%library_instance, "Deploying Library Instance");
|
||||||
|
|
||||||
|
let ContractPathAndIdent {
|
||||||
|
contract_source_path: library_source_path,
|
||||||
|
contract_ident: library_ident,
|
||||||
|
} = contract_sources
|
||||||
|
.remove(library_instance)
|
||||||
|
.context("Failed to get the contract sources of the contract instance")?;
|
||||||
|
|
||||||
|
let (code, abi) = compiler_output
|
||||||
|
.contracts
|
||||||
|
.get(&library_source_path)
|
||||||
|
.and_then(|contracts| contracts.get(library_ident.as_str()))
|
||||||
|
.context("Failed to get the code and abi for the instance")?;
|
||||||
|
|
||||||
|
let code = alloy::hex::decode(code)?;
|
||||||
|
|
||||||
|
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||||
|
TransactionRequest::default().from(deployer_address),
|
||||||
|
code,
|
||||||
|
);
|
||||||
|
let receipt = self
|
||||||
|
.execute_transaction(tx, None, Duration::from_secs(5 * 60))
|
||||||
|
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
%library_instance,
|
||||||
|
"Failed to deploy the library"
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
debug!(?library_instance, "Deployed library");
|
||||||
|
|
||||||
|
let library_address = receipt
|
||||||
|
.contract_address
|
||||||
|
.expect("Failed to deploy the library");
|
||||||
|
|
||||||
|
deployed_libraries.get_or_insert_default().insert(
|
||||||
|
library_instance.clone(),
|
||||||
|
(library_ident.clone(), library_address, abi.clone()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let compiler_output = cached_compiler
|
||||||
|
.compile_contracts(
|
||||||
|
self.test_definition.metadata,
|
||||||
|
self.test_definition.metadata_file_path,
|
||||||
|
self.test_definition.mode.clone(),
|
||||||
|
deployed_libraries.as_ref(),
|
||||||
|
self.platform_information.compiler.as_ref(),
|
||||||
|
self.platform_information.platform,
|
||||||
|
&self.platform_information.reporter,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| error!(?err, "Post-linking compilation failed"))
|
||||||
|
.context("Failed to compile the post-link contracts")?;
|
||||||
|
|
||||||
|
for (contract_path, contract_name_to_info_mapping) in compiler_output.contracts.iter() {
|
||||||
|
for (contract_name, (contract_bytecode, _)) in contract_name_to_info_mapping.iter() {
|
||||||
|
let contract_bytecode = hex::decode(contract_bytecode)
|
||||||
|
.expect("Impossible for us to get an undecodable bytecode after linking");
|
||||||
|
|
||||||
|
self.platform_information
|
||||||
|
.reporter
|
||||||
|
.report_contract_information_event(
|
||||||
|
contract_path.to_path_buf(),
|
||||||
|
contract_name.clone(),
|
||||||
|
contract_bytecode.len(),
|
||||||
|
)
|
||||||
|
.expect("Should not fail");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.execution_state = ExecutionState::new(
|
||||||
|
compiler_output.contracts,
|
||||||
|
deployed_libraries.unwrap_or_default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
// endregion:Constructors & Initialization
|
||||||
|
|
||||||
|
// region:Step Handling
|
||||||
|
pub async fn execute_all(mut self) -> Result<usize> {
|
||||||
|
while let Some(result) = self.execute_next_step().await {
|
||||||
|
result?
|
||||||
|
}
|
||||||
|
Ok(self.steps_executed)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn execute_next_step(&mut self) -> Option<Result<()>> {
|
||||||
|
let (step_path, step) = self.steps_iterator.next()?;
|
||||||
|
info!(%step_path, "Executing Step");
|
||||||
|
Some(
|
||||||
|
self.execute_step(&step_path, &step)
|
||||||
|
.await
|
||||||
|
.inspect(|_| info!(%step_path, "Step execution succeeded"))
|
||||||
|
.inspect_err(|err| error!(%step_path, ?err, "Step execution failed")),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(
|
||||||
|
level = "info",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
driver_id = self.driver_id,
|
||||||
|
%step_path,
|
||||||
|
),
|
||||||
|
err(Debug),
|
||||||
|
)]
|
||||||
|
async fn execute_step(&mut self, step_path: &StepPath, step: &Step) -> Result<()> {
|
||||||
|
let steps_executed = match step {
|
||||||
|
Step::FunctionCall(step) => self
|
||||||
|
.execute_function_call(step_path, step.as_ref())
|
||||||
|
.await
|
||||||
|
.context("Function call step Failed"),
|
||||||
|
Step::Repeat(step) => self
|
||||||
|
.execute_repeat_step(step_path, step.as_ref())
|
||||||
|
.await
|
||||||
|
.context("Repetition Step Failed"),
|
||||||
|
Step::AllocateAccount(step) => self
|
||||||
|
.execute_account_allocation(step_path, step.as_ref())
|
||||||
|
.await
|
||||||
|
.context("Account Allocation Step Failed"),
|
||||||
|
// The following steps are disabled in the benchmarking driver.
|
||||||
|
Step::BalanceAssertion(..) | Step::StorageEmptyAssertion(..) => Ok(0),
|
||||||
|
}?;
|
||||||
|
self.steps_executed += steps_executed;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
|
||||||
|
pub async fn execute_function_call(
|
||||||
|
&mut self,
|
||||||
|
step_path: &StepPath,
|
||||||
|
step: &FunctionCallStep,
|
||||||
|
) -> Result<usize> {
|
||||||
|
let deployment_receipts = self
|
||||||
|
.handle_function_call_contract_deployment(step_path, step)
|
||||||
|
.await
|
||||||
|
.context("Failed to deploy contracts for the function call step")?;
|
||||||
|
let transaction_hash = self
|
||||||
|
.handle_function_call_execution(step_path, step, deployment_receipts)
|
||||||
|
.await
|
||||||
|
.context("Failed to handle the function call execution")?;
|
||||||
|
self.handle_function_call_variable_assignment(step, transaction_hash)
|
||||||
|
.await
|
||||||
|
.context("Failed to handle function call variable assignment")?;
|
||||||
|
Ok(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_function_call_contract_deployment(
|
||||||
|
&mut self,
|
||||||
|
step_path: &StepPath,
|
||||||
|
step: &FunctionCallStep,
|
||||||
|
) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
|
||||||
|
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
||||||
|
for instance in step.find_all_contract_instances().into_iter() {
|
||||||
|
if !self
|
||||||
|
.execution_state
|
||||||
|
.deployed_contracts
|
||||||
|
.contains_key(&instance)
|
||||||
|
{
|
||||||
|
instances_we_must_deploy.entry(instance).or_insert(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Method::Deployer = step.method {
|
||||||
|
instances_we_must_deploy.swap_remove(&step.instance);
|
||||||
|
instances_we_must_deploy.insert(step.instance.clone(), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut receipts = HashMap::new();
|
||||||
|
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
|
||||||
|
let calldata = deploy_with_constructor_arguments.then_some(&step.calldata);
|
||||||
|
let value = deploy_with_constructor_arguments
|
||||||
|
.then_some(step.value)
|
||||||
|
.flatten();
|
||||||
|
|
||||||
|
let caller = {
|
||||||
|
let context = self.default_resolution_context();
|
||||||
|
step.caller
|
||||||
|
.resolve_address(self.resolver.as_ref(), context)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
if let (_, _, Some(receipt)) = self
|
||||||
|
.get_or_deploy_contract_instance(
|
||||||
|
&instance,
|
||||||
|
caller,
|
||||||
|
calldata,
|
||||||
|
value,
|
||||||
|
Some(step_path),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context("Failed to get or deploy contract instance during input execution")?
|
||||||
|
{
|
||||||
|
receipts.insert(instance.clone(), receipt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(receipts)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_function_call_execution(
|
||||||
|
&mut self,
|
||||||
|
step_path: &StepPath,
|
||||||
|
step: &FunctionCallStep,
|
||||||
|
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
||||||
|
) -> Result<TxHash> {
|
||||||
|
match step.method {
|
||||||
|
// This step was already executed when `handle_step` was called. We just need to
|
||||||
|
// lookup the transaction receipt in this case and continue on.
|
||||||
|
Method::Deployer => deployment_receipts
|
||||||
|
.remove(&step.instance)
|
||||||
|
.context("Failed to find deployment receipt for constructor call")
|
||||||
|
.map(|receipt| receipt.transaction_hash),
|
||||||
|
Method::Fallback | Method::FunctionName(_) => {
|
||||||
|
let tx = step
|
||||||
|
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let (tx_hash, receipt_future) = self
|
||||||
|
.execute_transaction(tx.clone(), Some(step_path), Duration::from_secs(30 * 60))
|
||||||
|
.await?;
|
||||||
|
if self.await_transaction_inclusion {
|
||||||
|
let receipt = receipt_future
|
||||||
|
.await
|
||||||
|
.context("Failed while waiting for transaction inclusion in block")?;
|
||||||
|
|
||||||
|
if !receipt.status() {
|
||||||
|
error!(
|
||||||
|
?tx,
|
||||||
|
tx.hash = %receipt.transaction_hash,
|
||||||
|
?receipt,
|
||||||
|
"Encountered a failing benchmark transaction"
|
||||||
|
);
|
||||||
|
bail!(
|
||||||
|
"Encountered a failing transaction in benchmarks: {}",
|
||||||
|
receipt.transaction_hash
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(tx_hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_function_call_call_frame_tracing(
|
||||||
|
&mut self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Result<CallFrame> {
|
||||||
|
self.platform_information
|
||||||
|
.node
|
||||||
|
.trace_transaction(
|
||||||
|
tx_hash,
|
||||||
|
GethDebugTracingOptions {
|
||||||
|
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
||||||
|
GethDebugBuiltInTracerType::CallTracer,
|
||||||
|
)),
|
||||||
|
tracer_config: GethDebugTracerConfig(serde_json::json! {{
|
||||||
|
"onlyTopCall": true,
|
||||||
|
"withLog": false,
|
||||||
|
"withStorage": false,
|
||||||
|
"withMemory": false,
|
||||||
|
"withStack": false,
|
||||||
|
"withReturnData": true
|
||||||
|
}}),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(|trace| {
|
||||||
|
trace
|
||||||
|
.try_into_call_frame()
|
||||||
|
.expect("Impossible - we requested a callframe trace so we must get it back")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_function_call_variable_assignment(
|
||||||
|
&mut self,
|
||||||
|
step: &FunctionCallStep,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Result<()> {
|
||||||
|
let Some(ref assignments) = step.variable_assignments else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
// Handling the return data variable assignments.
|
||||||
|
let callframe = OnceCell::new();
|
||||||
|
for (variable_name, output_word) in assignments.return_data.iter().zip(
|
||||||
|
callframe
|
||||||
|
.get_or_try_init(|| self.handle_function_call_call_frame_tracing(tx_hash))
|
||||||
|
.await
|
||||||
|
.context("Failed to get the callframe trace for transaction")?
|
||||||
|
.output
|
||||||
|
.as_ref()
|
||||||
|
.unwrap_or_default()
|
||||||
|
.to_vec()
|
||||||
|
.chunks(32),
|
||||||
|
) {
|
||||||
|
let value = U256::from_be_slice(output_word);
|
||||||
|
self.execution_state
|
||||||
|
.variables
|
||||||
|
.insert(variable_name.clone(), value);
|
||||||
|
tracing::info!(
|
||||||
|
variable_name,
|
||||||
|
variable_value = hex::encode(value.to_be_bytes::<32>()),
|
||||||
|
"Assigned variable"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
|
||||||
|
async fn execute_repeat_step(
|
||||||
|
&mut self,
|
||||||
|
step_path: &StepPath,
|
||||||
|
step: &RepeatStep,
|
||||||
|
) -> Result<usize> {
|
||||||
|
let tasks = (0..step.repeat)
|
||||||
|
.map(|_| Driver {
|
||||||
|
driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst),
|
||||||
|
platform_information: self.platform_information,
|
||||||
|
resolver: self.resolver.clone(),
|
||||||
|
test_definition: self.test_definition,
|
||||||
|
private_key_allocator: self.private_key_allocator.clone(),
|
||||||
|
execution_state: self.execution_state.clone(),
|
||||||
|
steps_executed: 0,
|
||||||
|
steps_iterator: {
|
||||||
|
let steps = step
|
||||||
|
.steps
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(step_idx, step)| {
|
||||||
|
let step_idx = StepIdx::new(step_idx);
|
||||||
|
let step_path = step_path.append(step_idx);
|
||||||
|
(step_path, step)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
steps.into_iter()
|
||||||
|
},
|
||||||
|
await_transaction_inclusion: self.await_transaction_inclusion,
|
||||||
|
watcher_tx: self.watcher_tx.clone(),
|
||||||
|
})
|
||||||
|
.map(|driver| driver.execute_all());
|
||||||
|
|
||||||
|
// TODO: Determine how we want to know the `ignore_block_before` and if it's through the
|
||||||
|
// receipt and how this would impact the architecture and the possibility of us not waiting
|
||||||
|
// for receipts in the future.
|
||||||
|
self.watcher_tx
|
||||||
|
.send(WatcherEvent::RepetitionStartEvent {
|
||||||
|
ignore_block_before: 0,
|
||||||
|
})
|
||||||
|
.context("Failed to send message on the watcher's tx")?;
|
||||||
|
|
||||||
|
let res = futures::future::try_join_all(tasks)
|
||||||
|
.await
|
||||||
|
.context("Repetition execution failed")?;
|
||||||
|
Ok(res.into_iter().sum())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", fields(driver_id = self.driver_id), skip_all, err(Debug))]
|
||||||
|
pub async fn execute_account_allocation(
|
||||||
|
&mut self,
|
||||||
|
_: &StepPath,
|
||||||
|
step: &AllocateAccountStep,
|
||||||
|
) -> Result<usize> {
|
||||||
|
let Some(variable_name) = step.variable_name.strip_prefix("$VARIABLE:") else {
|
||||||
|
bail!("Account allocation must start with $VARIABLE:");
|
||||||
|
};
|
||||||
|
|
||||||
|
let private_key = self
|
||||||
|
.private_key_allocator
|
||||||
|
.lock()
|
||||||
|
.await
|
||||||
|
.allocate()
|
||||||
|
.context("Account allocation through the private key allocator failed")?;
|
||||||
|
let account = private_key.address();
|
||||||
|
let variable = U256::from_be_slice(account.0.as_slice());
|
||||||
|
|
||||||
|
self.execution_state
|
||||||
|
.variables
|
||||||
|
.insert(variable_name.to_string(), variable);
|
||||||
|
|
||||||
|
Ok(1)
|
||||||
|
}
|
||||||
|
// endregion:Step Handling
|
||||||
|
|
||||||
|
// region:Contract Deployment
|
||||||
|
#[instrument(
|
||||||
|
level = "info",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
driver_id = self.driver_id,
|
||||||
|
%contract_instance,
|
||||||
|
%deployer
|
||||||
|
),
|
||||||
|
err(Debug),
|
||||||
|
)]
|
||||||
|
async fn get_or_deploy_contract_instance(
|
||||||
|
&mut self,
|
||||||
|
contract_instance: &ContractInstance,
|
||||||
|
deployer: Address,
|
||||||
|
calldata: Option<&Calldata>,
|
||||||
|
value: Option<EtherValue>,
|
||||||
|
step_path: Option<&StepPath>,
|
||||||
|
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
||||||
|
if let Some((_, address, abi)) = self
|
||||||
|
.execution_state
|
||||||
|
.deployed_contracts
|
||||||
|
.get(contract_instance)
|
||||||
|
{
|
||||||
|
info!(
|
||||||
|
|
||||||
|
%address,
|
||||||
|
"Contract instance already deployed."
|
||||||
|
);
|
||||||
|
Ok((*address, abi.clone(), None))
|
||||||
|
} else {
|
||||||
|
info!("Contract instance requires deployment.");
|
||||||
|
let (address, abi, receipt) = self
|
||||||
|
.deploy_contract(contract_instance, deployer, calldata, value, step_path)
|
||||||
|
.await
|
||||||
|
.context("Failed to deploy contract")?;
|
||||||
|
info!(
|
||||||
|
%address,
|
||||||
|
"Contract instance has been deployed."
|
||||||
|
);
|
||||||
|
Ok((address, abi, Some(receipt)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(
|
||||||
|
level = "info",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
driver_id = self.driver_id,
|
||||||
|
%contract_instance,
|
||||||
|
%deployer
|
||||||
|
),
|
||||||
|
err(Debug),
|
||||||
|
)]
|
||||||
|
async fn deploy_contract(
|
||||||
|
&mut self,
|
||||||
|
contract_instance: &ContractInstance,
|
||||||
|
deployer: Address,
|
||||||
|
calldata: Option<&Calldata>,
|
||||||
|
value: Option<EtherValue>,
|
||||||
|
step_path: Option<&StepPath>,
|
||||||
|
) -> Result<(Address, JsonAbi, TransactionReceipt)> {
|
||||||
|
let Some(ContractPathAndIdent {
|
||||||
|
contract_source_path,
|
||||||
|
contract_ident,
|
||||||
|
}) = self
|
||||||
|
.test_definition
|
||||||
|
.metadata
|
||||||
|
.contract_sources()?
|
||||||
|
.remove(contract_instance)
|
||||||
|
else {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Contract source not found for instance {:?}",
|
||||||
|
contract_instance
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some((code, abi)) = self
|
||||||
|
.execution_state
|
||||||
|
.compiled_contracts
|
||||||
|
.get(&contract_source_path)
|
||||||
|
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
|
||||||
|
.cloned()
|
||||||
|
else {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Failed to find information for contract {:?}",
|
||||||
|
contract_instance
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut code = match alloy::hex::decode(&code) {
|
||||||
|
Ok(code) => code,
|
||||||
|
Err(error) => {
|
||||||
|
tracing::error!(
|
||||||
|
?error,
|
||||||
|
contract_source_path = contract_source_path.display().to_string(),
|
||||||
|
contract_ident = contract_ident.as_ref(),
|
||||||
|
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
|
||||||
|
);
|
||||||
|
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(calldata) = calldata {
|
||||||
|
let calldata = calldata
|
||||||
|
.calldata(self.resolver.as_ref(), self.default_resolution_context())
|
||||||
|
.await?;
|
||||||
|
code.extend(calldata);
|
||||||
|
}
|
||||||
|
|
||||||
|
let tx = {
|
||||||
|
let tx = TransactionRequest::default().from(deployer);
|
||||||
|
let tx = match value {
|
||||||
|
Some(ref value) => tx.value(value.into_inner()),
|
||||||
|
_ => tx,
|
||||||
|
};
|
||||||
|
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
|
||||||
|
};
|
||||||
|
|
||||||
|
let receipt = match self
|
||||||
|
.execute_transaction(tx, step_path, Duration::from_secs(5 * 60))
|
||||||
|
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(receipt) => receipt,
|
||||||
|
Err(error) => {
|
||||||
|
tracing::error!(?error, "Contract deployment transaction failed.");
|
||||||
|
return Err(error);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(address) = receipt.contract_address else {
|
||||||
|
anyhow::bail!("Contract deployment didn't return an address");
|
||||||
|
};
|
||||||
|
tracing::info!(
|
||||||
|
instance_name = ?contract_instance,
|
||||||
|
instance_address = ?address,
|
||||||
|
"Deployed contract"
|
||||||
|
);
|
||||||
|
self.platform_information
|
||||||
|
.reporter
|
||||||
|
.report_contract_deployed_event(contract_instance.clone(), address)?;
|
||||||
|
|
||||||
|
self.execution_state.deployed_contracts.insert(
|
||||||
|
contract_instance.clone(),
|
||||||
|
(contract_ident, address, abi.clone()),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok((address, abi, receipt))
|
||||||
|
}
|
||||||
|
// endregion:Contract Deployment
|
||||||
|
|
||||||
|
// region:Resolution & Resolver
|
||||||
|
fn default_resolution_context(&self) -> ResolutionContext<'_> {
|
||||||
|
ResolutionContext::default()
|
||||||
|
.with_deployed_contracts(&self.execution_state.deployed_contracts)
|
||||||
|
.with_variables(&self.execution_state.variables)
|
||||||
|
}
|
||||||
|
// endregion:Resolution & Resolver
|
||||||
|
|
||||||
|
// region:Transaction Execution
|
||||||
|
/// Executes the transaction on the driver's node with some custom waiting logic for the receipt
|
||||||
|
#[instrument(
|
||||||
|
level = "info",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
driver_id = self.driver_id,
|
||||||
|
transaction = ?transaction,
|
||||||
|
transaction_hash = tracing::field::Empty
|
||||||
|
),
|
||||||
|
err(Debug)
|
||||||
|
)]
|
||||||
|
async fn execute_transaction(
|
||||||
|
&self,
|
||||||
|
transaction: TransactionRequest,
|
||||||
|
step_path: Option<&StepPath>,
|
||||||
|
receipt_wait_duration: Duration,
|
||||||
|
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
|
||||||
|
let node = self.platform_information.node;
|
||||||
|
let provider = node.provider().await.context("Creating provider failed")?;
|
||||||
|
|
||||||
|
let pending_transaction_builder = provider
|
||||||
|
.send_transaction(transaction)
|
||||||
|
.await
|
||||||
|
.context("Failed to submit transaction")?;
|
||||||
|
|
||||||
|
let transaction_hash = *pending_transaction_builder.tx_hash();
|
||||||
|
let receipt_future = pending_transaction_builder
|
||||||
|
.with_timeout(Some(receipt_wait_duration))
|
||||||
|
.with_required_confirmations(2)
|
||||||
|
.get_receipt()
|
||||||
|
.map(|res| res.context("Failed to get the receipt of the transaction"));
|
||||||
|
Span::current().record("transaction_hash", display(transaction_hash));
|
||||||
|
|
||||||
|
info!("Submitted transaction");
|
||||||
|
if let Some(step_path) = step_path {
|
||||||
|
self.watcher_tx
|
||||||
|
.send(WatcherEvent::SubmittedTransaction {
|
||||||
|
transaction_hash,
|
||||||
|
step_path: step_path.clone(),
|
||||||
|
})
|
||||||
|
.context("Failed to send the transaction hash to the watcher")?;
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((transaction_hash, receipt_future))
|
||||||
|
}
|
||||||
|
// endregion:Transaction Execution
|
||||||
|
}
|
||||||
@@ -0,0 +1,195 @@
|
|||||||
|
//! The main entry point for differential benchmarking.
|
||||||
|
|
||||||
|
use std::{collections::BTreeMap, sync::Arc};
|
||||||
|
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use futures::{FutureExt, StreamExt};
|
||||||
|
use revive_dt_common::types::PrivateKeyAllocator;
|
||||||
|
use revive_dt_core::Platform;
|
||||||
|
use revive_dt_format::{
|
||||||
|
corpus::Corpus,
|
||||||
|
steps::{Step, StepIdx, StepPath},
|
||||||
|
};
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
use tracing::{Instrument, error, info, info_span, instrument, warn};
|
||||||
|
|
||||||
|
use revive_dt_config::{BenchmarkingContext, Context};
|
||||||
|
use revive_dt_report::Reporter;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
differential_benchmarks::{Driver, Watcher, WatcherEvent},
|
||||||
|
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Handles the differential testing executing it according to the information defined in the
|
||||||
|
/// context
|
||||||
|
#[instrument(level = "info", err(Debug), skip_all)]
|
||||||
|
pub async fn handle_differential_benchmarks(
|
||||||
|
mut context: BenchmarkingContext,
|
||||||
|
reporter: Reporter,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
// A bit of a hack but we need to override the number of nodes specified through the CLI since
|
||||||
|
// benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to
|
||||||
|
// do this. But, for the time being, we need to override the cli arguments.
|
||||||
|
if context.concurrency_configuration.number_of_nodes != 1 {
|
||||||
|
warn!(
|
||||||
|
specified_number_of_nodes = context.concurrency_configuration.number_of_nodes,
|
||||||
|
updated_number_of_nodes = 1,
|
||||||
|
"Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments."
|
||||||
|
);
|
||||||
|
context.concurrency_configuration.number_of_nodes = 1;
|
||||||
|
};
|
||||||
|
let full_context = Context::Benchmark(Box::new(context.clone()));
|
||||||
|
|
||||||
|
// Discover all of the metadata files that are defined in the context.
|
||||||
|
let corpus = context
|
||||||
|
.corpus_configuration
|
||||||
|
.test_specifiers
|
||||||
|
.clone()
|
||||||
|
.into_iter()
|
||||||
|
.try_fold(Corpus::default(), Corpus::with_test_specifier)
|
||||||
|
.context("Failed to parse the test corpus")?;
|
||||||
|
info!(
|
||||||
|
len = corpus.metadata_file_count(),
|
||||||
|
"Discovered metadata files"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Discover the list of platforms that the tests should run on based on the context.
|
||||||
|
let platforms = context
|
||||||
|
.platforms
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.map(Into::<&dyn Platform>::into)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// Starting the nodes of the various platforms specified in the context. Note that we use the
|
||||||
|
// node pool since it contains all of the code needed to spawn nodes from A to Z and therefore
|
||||||
|
// it's the preferred way for us to start nodes even when we're starting just a single node. The
|
||||||
|
// added overhead from it is quite small (performance wise) since it's involved only when we're
|
||||||
|
// creating the test definitions, but it might have other maintenance overhead as it obscures
|
||||||
|
// the fact that only a single node is spawned.
|
||||||
|
let platforms_and_nodes = {
|
||||||
|
let mut map = BTreeMap::new();
|
||||||
|
|
||||||
|
for platform in platforms.iter() {
|
||||||
|
let platform_identifier = platform.platform_identifier();
|
||||||
|
|
||||||
|
let node_pool = NodePool::new(full_context.clone(), *platform)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
%platform_identifier,
|
||||||
|
"Failed to initialize the node pool for the platform."
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.context("Failed to initialize the node pool")?;
|
||||||
|
|
||||||
|
map.insert(platform_identifier, (*platform, node_pool));
|
||||||
|
}
|
||||||
|
|
||||||
|
map
|
||||||
|
};
|
||||||
|
info!("Spawned the platform nodes");
|
||||||
|
|
||||||
|
// Preparing test definitions for the execution.
|
||||||
|
let test_definitions = create_test_definitions_stream(
|
||||||
|
&full_context,
|
||||||
|
&corpus,
|
||||||
|
&platforms_and_nodes,
|
||||||
|
None,
|
||||||
|
reporter.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.await;
|
||||||
|
info!(len = test_definitions.len(), "Created test definitions");
|
||||||
|
|
||||||
|
// Creating the objects that will be shared between the various runs. The cached compiler is the
|
||||||
|
// only one at the current moment of time that's safe to share between runs.
|
||||||
|
let cached_compiler = CachedCompiler::new(
|
||||||
|
context
|
||||||
|
.working_directory
|
||||||
|
.as_path()
|
||||||
|
.join("compilation_cache"),
|
||||||
|
context
|
||||||
|
.compilation_configuration
|
||||||
|
.invalidate_compilation_cache,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(Arc::new)
|
||||||
|
.context("Failed to initialize cached compiler")?;
|
||||||
|
|
||||||
|
// Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd
|
||||||
|
// like to run all of the workloads for one platform, and then the next sequentially as we'd
|
||||||
|
// like for the effect of concurrency to be minimized when we're doing the benchmarking.
|
||||||
|
for platform in platforms.iter() {
|
||||||
|
let platform_identifier = platform.platform_identifier();
|
||||||
|
|
||||||
|
let span = info_span!("Benchmarking for the platform", %platform_identifier);
|
||||||
|
let _guard = span.enter();
|
||||||
|
|
||||||
|
for test_definition in test_definitions.iter() {
|
||||||
|
let platform_information = &test_definition.platforms[&platform_identifier];
|
||||||
|
|
||||||
|
let span = info_span!(
|
||||||
|
"Executing workload",
|
||||||
|
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||||
|
case_idx = %test_definition.case_idx,
|
||||||
|
mode = %test_definition.mode,
|
||||||
|
);
|
||||||
|
let _guard = span.enter();
|
||||||
|
|
||||||
|
// Initializing all of the components requires to execute this particular workload.
|
||||||
|
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||||
|
context.wallet_configuration.highest_private_key_exclusive(),
|
||||||
|
)));
|
||||||
|
let (watcher, watcher_tx) = Watcher::new(
|
||||||
|
platform_information
|
||||||
|
.node
|
||||||
|
.subscribe_to_full_blocks_information()
|
||||||
|
.await
|
||||||
|
.context("Failed to subscribe to full blocks information from the node")?,
|
||||||
|
test_definition
|
||||||
|
.reporter
|
||||||
|
.execution_specific_reporter(0usize, platform_identifier),
|
||||||
|
);
|
||||||
|
let driver = Driver::new(
|
||||||
|
platform_information,
|
||||||
|
test_definition,
|
||||||
|
private_key_allocator,
|
||||||
|
cached_compiler.as_ref(),
|
||||||
|
watcher_tx.clone(),
|
||||||
|
context.await_transaction_inclusion,
|
||||||
|
test_definition
|
||||||
|
.case
|
||||||
|
.steps_iterator_for_benchmarks(context.default_repetition_count)
|
||||||
|
.enumerate()
|
||||||
|
.map(|(step_idx, step)| -> (StepPath, Step) {
|
||||||
|
(StepPath::new(vec![StepIdx::new(step_idx)]), step)
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context("Failed to create the benchmarks driver")?;
|
||||||
|
|
||||||
|
futures::future::try_join(
|
||||||
|
watcher.run(),
|
||||||
|
driver
|
||||||
|
.execute_all()
|
||||||
|
.instrument(info_span!("Executing Benchmarks", %platform_identifier))
|
||||||
|
.inspect(|_| {
|
||||||
|
info!("All transactions submitted - driver completed execution");
|
||||||
|
watcher_tx
|
||||||
|
.send(WatcherEvent::AllTransactionsSubmitted)
|
||||||
|
.unwrap()
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context("Failed to run the driver and executor")
|
||||||
|
.inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded"))
|
||||||
|
.inspect_err(|err| error!(?err, "Workload Execution Failed"))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
use std::{collections::HashMap, path::PathBuf};
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
json_abi::JsonAbi,
|
||||||
|
primitives::{Address, U256},
|
||||||
|
};
|
||||||
|
|
||||||
|
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
/// The state associated with the test execution of one of the workloads.
|
||||||
|
pub struct ExecutionState {
|
||||||
|
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
||||||
|
/// against them and therefore they're ready to be deployed on-demand.
|
||||||
|
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
|
|
||||||
|
/// A map of all of the deployed contracts and information about them.
|
||||||
|
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
|
||||||
|
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
||||||
|
pub variables: HashMap<String, U256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExecutionState {
|
||||||
|
pub fn new(
|
||||||
|
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
|
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
compiled_contracts,
|
||||||
|
deployed_contracts,
|
||||||
|
variables: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn empty() -> Self {
|
||||||
|
Self {
|
||||||
|
compiled_contracts: Default::default(),
|
||||||
|
deployed_contracts: Default::default(),
|
||||||
|
variables: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
mod driver;
|
||||||
|
mod entry_point;
|
||||||
|
mod execution_state;
|
||||||
|
mod watcher;
|
||||||
|
|
||||||
|
pub use driver::*;
|
||||||
|
pub use entry_point::*;
|
||||||
|
pub use execution_state::*;
|
||||||
|
pub use watcher::*;
|
||||||
@@ -0,0 +1,218 @@
|
|||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
pin::Pin,
|
||||||
|
sync::Arc,
|
||||||
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy::primitives::{BlockNumber, TxHash};
|
||||||
|
use anyhow::Result;
|
||||||
|
use futures::{Stream, StreamExt};
|
||||||
|
use revive_dt_format::steps::StepPath;
|
||||||
|
use revive_dt_report::{ExecutionSpecificReporter, MinedBlockInformation, TransactionInformation};
|
||||||
|
use tokio::sync::{
|
||||||
|
RwLock,
|
||||||
|
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||||
|
};
|
||||||
|
use tracing::{info, instrument};
|
||||||
|
|
||||||
|
/// This struct defines the watcher used in the benchmarks. A watcher is only valid for 1 workload
|
||||||
|
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
||||||
|
/// workload and is not designed for reuse.
|
||||||
|
pub struct Watcher {
|
||||||
|
/// The receive side of the channel that all of the drivers and various other parts of the code
|
||||||
|
/// send events to the watcher on.
|
||||||
|
rx: UnboundedReceiver<WatcherEvent>,
|
||||||
|
|
||||||
|
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
||||||
|
/// and a single node from that platform.
|
||||||
|
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||||
|
|
||||||
|
/// The reporter used to send events to the report aggregator.
|
||||||
|
reporter: ExecutionSpecificReporter,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Watcher {
|
||||||
|
pub fn new(
|
||||||
|
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||||
|
reporter: ExecutionSpecificReporter,
|
||||||
|
) -> (Self, UnboundedSender<WatcherEvent>) {
|
||||||
|
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
||||||
|
(
|
||||||
|
Self {
|
||||||
|
rx,
|
||||||
|
blocks_stream,
|
||||||
|
reporter,
|
||||||
|
},
|
||||||
|
tx,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all)]
|
||||||
|
pub async fn run(mut self) -> Result<()> {
|
||||||
|
// The first event that the watcher receives must be a `RepetitionStartEvent` that informs
|
||||||
|
// the watcher of the last block number that it should ignore and what the block number is
|
||||||
|
// for the first important block that it should look for.
|
||||||
|
let ignore_block_before = loop {
|
||||||
|
let Some(WatcherEvent::RepetitionStartEvent {
|
||||||
|
ignore_block_before,
|
||||||
|
}) = self.rx.recv().await
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
break ignore_block_before;
|
||||||
|
};
|
||||||
|
|
||||||
|
// This is the set of the transaction hashes that the watcher should be looking for and
|
||||||
|
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
||||||
|
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
||||||
|
let watch_for_transaction_hashes =
|
||||||
|
Arc::new(RwLock::new(HashMap::<TxHash, (StepPath, SystemTime)>::new()));
|
||||||
|
|
||||||
|
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
||||||
|
// txs are expected to come through the receive side of the channel. We do not want to rely
|
||||||
|
// on the channel closing alone for the watcher to know that all of the transactions were
|
||||||
|
// submitted and for there to be an explicit event sent by the core orchestrator that
|
||||||
|
// informs the watcher that no further transactions are to be expected and that it can
|
||||||
|
// safely ignore the channel.
|
||||||
|
let all_transactions_submitted = Arc::new(RwLock::new(false));
|
||||||
|
|
||||||
|
let watcher_event_watching_task = {
|
||||||
|
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||||
|
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||||
|
async move {
|
||||||
|
while let Some(watcher_event) = self.rx.recv().await {
|
||||||
|
match watcher_event {
|
||||||
|
// Subsequent repetition starts are ignored since certain workloads can
|
||||||
|
// contain nested repetitions and therefore there's no use in doing any
|
||||||
|
// action if the repetitions are nested.
|
||||||
|
WatcherEvent::RepetitionStartEvent { .. } => {}
|
||||||
|
WatcherEvent::SubmittedTransaction {
|
||||||
|
transaction_hash,
|
||||||
|
step_path,
|
||||||
|
} => {
|
||||||
|
watch_for_transaction_hashes
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.insert(transaction_hash, (step_path, SystemTime::now()));
|
||||||
|
}
|
||||||
|
WatcherEvent::AllTransactionsSubmitted => {
|
||||||
|
*all_transactions_submitted.write().await = true;
|
||||||
|
self.rx.close();
|
||||||
|
info!("Watcher's Events Watching Task Finished");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let reporter = self.reporter.clone();
|
||||||
|
let block_information_watching_task = {
|
||||||
|
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||||
|
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||||
|
let mut blocks_information_stream = self.blocks_stream;
|
||||||
|
async move {
|
||||||
|
while let Some(mut block) = blocks_information_stream.next().await {
|
||||||
|
// If the block number is equal to or less than the last block before the
|
||||||
|
// repetition then we ignore it and continue on to the next block.
|
||||||
|
if block.ethereum_block_information.block_number <= ignore_block_before {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let watch_for_transaction_hashes =
|
||||||
|
watch_for_transaction_hashes.read().await;
|
||||||
|
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
||||||
|
let Some((step_path, _)) = watch_for_transaction_hashes.get(tx_hash)
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
*block.tx_counts.entry(step_path.clone()).or_default() += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reporter
|
||||||
|
.report_block_mined_event(block.clone())
|
||||||
|
.expect("Can't fail");
|
||||||
|
|
||||||
|
if *all_transactions_submitted.read().await
|
||||||
|
&& watch_for_transaction_hashes.read().await.is_empty()
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all of the transaction hashes observed in this block from the txs we
|
||||||
|
// are currently watching for.
|
||||||
|
let mut watch_for_transaction_hashes =
|
||||||
|
watch_for_transaction_hashes.write().await;
|
||||||
|
let mut relevant_transactions_observed = 0;
|
||||||
|
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
||||||
|
let Some((step_path, submission_time)) =
|
||||||
|
watch_for_transaction_hashes.remove(tx_hash)
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
relevant_transactions_observed += 1;
|
||||||
|
let transaction_information = TransactionInformation {
|
||||||
|
transaction_hash: *tx_hash,
|
||||||
|
submission_timestamp: submission_time
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.expect("Can't fail")
|
||||||
|
.as_secs() as _,
|
||||||
|
block_timestamp: block.ethereum_block_information.block_timestamp,
|
||||||
|
block_number: block.ethereum_block_information.block_number,
|
||||||
|
};
|
||||||
|
reporter
|
||||||
|
.report_step_transaction_information_event(
|
||||||
|
step_path,
|
||||||
|
transaction_information,
|
||||||
|
)
|
||||||
|
.expect("Can't fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
info!(
|
||||||
|
block_number = block.ethereum_block_information.block_number,
|
||||||
|
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
|
||||||
|
relevant_transactions_observed,
|
||||||
|
remaining_transactions = watch_for_transaction_hashes.len(),
|
||||||
|
"Observed a block"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Watcher's Block Watching Task Finished");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let (_, _) =
|
||||||
|
futures::future::join(watcher_event_watching_task, block_information_watching_task)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub enum WatcherEvent {
|
||||||
|
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
||||||
|
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
||||||
|
/// reason behind this is that we do not want the initialization transactions (e.g., contract
|
||||||
|
/// deployments) to be included in the overall TPS and GPS measurements since these blocks will
|
||||||
|
/// most likely only contain a single transaction since they're just being used for
|
||||||
|
/// initialization.
|
||||||
|
RepetitionStartEvent {
|
||||||
|
/// This is the block number of the last block seen before the repetition started. This is
|
||||||
|
/// used to instruct the watcher to ignore all block prior to this block when it starts
|
||||||
|
/// streaming the blocks.
|
||||||
|
ignore_block_before: BlockNumber,
|
||||||
|
},
|
||||||
|
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
||||||
|
/// transaction with this hash in the blocks that it watches.
|
||||||
|
SubmittedTransaction {
|
||||||
|
/// The hash of the submitted transaction.
|
||||||
|
transaction_hash: TxHash,
|
||||||
|
/// The step path of the step that the transaction belongs to.
|
||||||
|
step_path: StepPath,
|
||||||
|
},
|
||||||
|
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
||||||
|
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
||||||
|
/// any longer.
|
||||||
|
AllTransactionsSubmitted,
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,371 @@
|
|||||||
|
//! The main entry point into differential testing.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, BTreeSet},
|
||||||
|
io::{BufWriter, Write, stderr},
|
||||||
|
sync::Arc,
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
|
||||||
|
use ansi_term::{ANSIStrings, Color};
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use futures::{FutureExt, StreamExt};
|
||||||
|
use revive_dt_common::{cached_fs::read_to_string, types::PrivateKeyAllocator};
|
||||||
|
use revive_dt_core::Platform;
|
||||||
|
use revive_dt_format::corpus::Corpus;
|
||||||
|
use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||||
|
use tracing::{Instrument, error, info, info_span, instrument};
|
||||||
|
|
||||||
|
use revive_dt_config::{Context, OutputFormat, TestExecutionContext};
|
||||||
|
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
differential_tests::Driver,
|
||||||
|
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Handles the differential testing executing it according to the information defined in the
|
||||||
|
/// context
|
||||||
|
#[instrument(level = "info", err(Debug), skip_all)]
|
||||||
|
pub async fn handle_differential_tests(
|
||||||
|
context: TestExecutionContext,
|
||||||
|
reporter: Reporter,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let reporter_clone = reporter.clone();
|
||||||
|
|
||||||
|
// Discover all of the metadata files that are defined in the context.
|
||||||
|
let corpus = context
|
||||||
|
.corpus_configuration
|
||||||
|
.test_specifiers
|
||||||
|
.clone()
|
||||||
|
.into_iter()
|
||||||
|
.try_fold(Corpus::default(), Corpus::with_test_specifier)
|
||||||
|
.context("Failed to parse the test corpus")?;
|
||||||
|
info!(
|
||||||
|
len = corpus.metadata_file_count(),
|
||||||
|
"Discovered metadata files"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Discover the list of platforms that the tests should run on based on the context.
|
||||||
|
let platforms = context
|
||||||
|
.platforms
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.map(Into::<&dyn Platform>::into)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// Starting the nodes of the various platforms specified in the context.
|
||||||
|
let platforms_and_nodes = {
|
||||||
|
let mut map = BTreeMap::new();
|
||||||
|
|
||||||
|
for platform in platforms.iter() {
|
||||||
|
let platform_identifier = platform.platform_identifier();
|
||||||
|
|
||||||
|
let context = Context::Test(Box::new(context.clone()));
|
||||||
|
let node_pool = NodePool::new(context, *platform)
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
%platform_identifier,
|
||||||
|
"Failed to initialize the node pool for the platform."
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.context("Failed to initialize the node pool")?;
|
||||||
|
|
||||||
|
map.insert(platform_identifier, (*platform, node_pool));
|
||||||
|
}
|
||||||
|
|
||||||
|
map
|
||||||
|
};
|
||||||
|
info!("Spawned the platform nodes");
|
||||||
|
|
||||||
|
// Preparing test definitions.
|
||||||
|
let only_execute_failed_tests = match context.ignore_success_configuration.path.as_ref() {
|
||||||
|
Some(path) => {
|
||||||
|
let report = read_to_string(path)
|
||||||
|
.context("Failed to read the report file to ignore the succeeding test cases")?;
|
||||||
|
Some(serde_json::from_str(&report).context("Failed to deserialize report")?)
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
let full_context = Context::Test(Box::new(context.clone()));
|
||||||
|
let test_definitions = create_test_definitions_stream(
|
||||||
|
&full_context,
|
||||||
|
&corpus,
|
||||||
|
&platforms_and_nodes,
|
||||||
|
only_execute_failed_tests.as_ref(),
|
||||||
|
reporter.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.await;
|
||||||
|
info!(len = test_definitions.len(), "Created test definitions");
|
||||||
|
|
||||||
|
// Creating everything else required for the driver to run.
|
||||||
|
let cached_compiler = CachedCompiler::new(
|
||||||
|
context
|
||||||
|
.working_directory
|
||||||
|
.as_path()
|
||||||
|
.join("compilation_cache"),
|
||||||
|
context
|
||||||
|
.compilation_configuration
|
||||||
|
.invalidate_compilation_cache,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map(Arc::new)
|
||||||
|
.context("Failed to initialize cached compiler")?;
|
||||||
|
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||||
|
context.wallet_configuration.highest_private_key_exclusive(),
|
||||||
|
)));
|
||||||
|
|
||||||
|
// Creating the driver and executing all of the steps.
|
||||||
|
let semaphore = context
|
||||||
|
.concurrency_configuration
|
||||||
|
.concurrency_limit()
|
||||||
|
.map(Semaphore::new)
|
||||||
|
.map(Arc::new);
|
||||||
|
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
|
||||||
|
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|
||||||
|
|(test_id, test_definition)| {
|
||||||
|
let running_task_list = running_task_list.clone();
|
||||||
|
let semaphore = semaphore.clone();
|
||||||
|
|
||||||
|
let private_key_allocator = private_key_allocator.clone();
|
||||||
|
let cached_compiler = cached_compiler.clone();
|
||||||
|
let mode = test_definition.mode.clone();
|
||||||
|
let span = info_span!(
|
||||||
|
"Executing Test Case",
|
||||||
|
test_id,
|
||||||
|
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||||
|
case_idx = %test_definition.case_idx,
|
||||||
|
mode = %mode,
|
||||||
|
);
|
||||||
|
async move {
|
||||||
|
let permit = match semaphore.as_ref() {
|
||||||
|
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
running_task_list.write().await.insert(test_id);
|
||||||
|
let driver = match Driver::new_root(
|
||||||
|
test_definition,
|
||||||
|
private_key_allocator,
|
||||||
|
&cached_compiler,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(driver) => driver,
|
||||||
|
Err(error) => {
|
||||||
|
test_definition
|
||||||
|
.reporter
|
||||||
|
.report_test_failed_event(format!("{error:#}"))
|
||||||
|
.expect("Can't fail");
|
||||||
|
error!("Test Case Failed");
|
||||||
|
drop(permit);
|
||||||
|
running_task_list.write().await.remove(&test_id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
info!("Created the driver for the test case");
|
||||||
|
|
||||||
|
match driver.execute_all().await {
|
||||||
|
Ok(steps_executed) => test_definition
|
||||||
|
.reporter
|
||||||
|
.report_test_succeeded_event(steps_executed)
|
||||||
|
.expect("Can't fail"),
|
||||||
|
Err(error) => {
|
||||||
|
test_definition
|
||||||
|
.reporter
|
||||||
|
.report_test_failed_event(format!("{error:#}"))
|
||||||
|
.expect("Can't fail");
|
||||||
|
error!("Test Case Failed");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
info!("Finished the execution of the test case");
|
||||||
|
drop(permit);
|
||||||
|
running_task_list.write().await.remove(&test_id);
|
||||||
|
}
|
||||||
|
.instrument(span)
|
||||||
|
},
|
||||||
|
))
|
||||||
|
.inspect(|_| {
|
||||||
|
info!("Finished executing all test cases");
|
||||||
|
reporter_clone
|
||||||
|
.report_completion_event()
|
||||||
|
.expect("Can't fail")
|
||||||
|
});
|
||||||
|
let cli_reporting_task = start_cli_reporting_task(context.output_format, reporter);
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
loop {
|
||||||
|
let remaining_tasks = running_task_list.read().await;
|
||||||
|
info!(
|
||||||
|
count = remaining_tasks.len(),
|
||||||
|
?remaining_tasks,
|
||||||
|
"Remaining Tests"
|
||||||
|
);
|
||||||
|
drop(remaining_tasks);
|
||||||
|
tokio::time::sleep(Duration::from_secs(10)).await
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
futures::future::join(driver_task, cli_reporting_task).await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
||||||
|
async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporter) {
|
||||||
|
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||||
|
drop(reporter);
|
||||||
|
|
||||||
|
let start = Instant::now();
|
||||||
|
|
||||||
|
let mut global_success_count = 0;
|
||||||
|
let mut global_failure_count = 0;
|
||||||
|
let mut global_ignore_count = 0;
|
||||||
|
|
||||||
|
let mut buf = BufWriter::new(stderr());
|
||||||
|
while let Ok(event) = aggregator_events_rx.recv().await {
|
||||||
|
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
|
metadata_file_path,
|
||||||
|
mode,
|
||||||
|
case_status,
|
||||||
|
} = event
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
match output_format {
|
||||||
|
OutputFormat::Legacy => {
|
||||||
|
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
||||||
|
for (case_idx, case_status) in case_status.into_iter() {
|
||||||
|
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
||||||
|
let _ = match case_status {
|
||||||
|
TestCaseStatus::Succeeded { steps_executed } => {
|
||||||
|
global_success_count += 1;
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"{}",
|
||||||
|
ANSIStrings(&[
|
||||||
|
Color::Green.bold().paint("Case Succeeded"),
|
||||||
|
Color::Green
|
||||||
|
.paint(format!(" - Steps Executed: {steps_executed}")),
|
||||||
|
])
|
||||||
|
)
|
||||||
|
}
|
||||||
|
TestCaseStatus::Failed { reason } => {
|
||||||
|
global_failure_count += 1;
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"{}",
|
||||||
|
ANSIStrings(&[
|
||||||
|
Color::Red.bold().paint("Case Failed"),
|
||||||
|
Color::Red.paint(format!(" - Reason: {}", reason.trim())),
|
||||||
|
])
|
||||||
|
)
|
||||||
|
}
|
||||||
|
TestCaseStatus::Ignored { reason, .. } => {
|
||||||
|
global_ignore_count += 1;
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"{}",
|
||||||
|
ANSIStrings(&[
|
||||||
|
Color::Yellow.bold().paint("Case Ignored"),
|
||||||
|
Color::Yellow.paint(format!(" - Reason: {}", reason.trim())),
|
||||||
|
])
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
let _ = writeln!(buf);
|
||||||
|
}
|
||||||
|
OutputFormat::CargoTestLike => {
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"\t{} {} - {}\n",
|
||||||
|
Color::Green.paint("Running"),
|
||||||
|
metadata_file_path.display(),
|
||||||
|
mode
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut success_count = 0;
|
||||||
|
let mut failure_count = 0;
|
||||||
|
let mut ignored_count = 0;
|
||||||
|
writeln!(buf, "running {} tests", case_status.len()).unwrap();
|
||||||
|
for (case_idx, case_result) in case_status.iter() {
|
||||||
|
let status = match case_result {
|
||||||
|
TestCaseStatus::Succeeded { .. } => {
|
||||||
|
success_count += 1;
|
||||||
|
global_success_count += 1;
|
||||||
|
Color::Green.paint("ok")
|
||||||
|
}
|
||||||
|
TestCaseStatus::Failed { reason } => {
|
||||||
|
failure_count += 1;
|
||||||
|
global_failure_count += 1;
|
||||||
|
Color::Red.paint(format!("FAILED, {reason}"))
|
||||||
|
}
|
||||||
|
TestCaseStatus::Ignored { reason, .. } => {
|
||||||
|
ignored_count += 1;
|
||||||
|
global_ignore_count += 1;
|
||||||
|
Color::Yellow.paint(format!("ignored, {reason:?}"))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
writeln!(buf, "test case_idx_{} ... {}", case_idx, status).unwrap();
|
||||||
|
}
|
||||||
|
writeln!(buf).unwrap();
|
||||||
|
|
||||||
|
let status = if failure_count > 0 {
|
||||||
|
Color::Red.paint("FAILED")
|
||||||
|
} else {
|
||||||
|
Color::Green.paint("ok")
|
||||||
|
};
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"test result: {}. {} passed; {} failed; {} ignored",
|
||||||
|
status, success_count, failure_count, ignored_count,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
writeln!(buf).unwrap();
|
||||||
|
|
||||||
|
if aggregator_events_rx.is_empty() {
|
||||||
|
buf = tokio::task::spawn_blocking(move || {
|
||||||
|
buf.flush().unwrap();
|
||||||
|
buf
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("Aggregator Broadcast Channel Closed");
|
||||||
|
|
||||||
|
// Summary at the end.
|
||||||
|
match output_format {
|
||||||
|
OutputFormat::Legacy => {
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"{} cases: {} cases succeeded, {} cases failed in {} seconds",
|
||||||
|
global_success_count + global_failure_count + global_ignore_count,
|
||||||
|
Color::Green.paint(global_success_count.to_string()),
|
||||||
|
Color::Red.paint(global_failure_count.to_string()),
|
||||||
|
start.elapsed().as_secs()
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
OutputFormat::CargoTestLike => {
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"run finished. {} passed; {} failed; {} ignored; finished in {}s",
|
||||||
|
global_success_count,
|
||||||
|
global_failure_count,
|
||||||
|
global_ignore_count,
|
||||||
|
start.elapsed().as_secs()
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,35 @@
|
|||||||
|
use std::{collections::HashMap, path::PathBuf};
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
json_abi::JsonAbi,
|
||||||
|
primitives::{Address, U256},
|
||||||
|
};
|
||||||
|
|
||||||
|
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
/// The state associated with the test execution of one of the tests.
|
||||||
|
pub struct ExecutionState {
|
||||||
|
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
||||||
|
/// against them and therefore they're ready to be deployed on-demand.
|
||||||
|
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
|
|
||||||
|
/// A map of all of the deployed contracts and information about them.
|
||||||
|
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
|
||||||
|
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
||||||
|
pub variables: HashMap<String, U256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExecutionState {
|
||||||
|
pub fn new(
|
||||||
|
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
|
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
compiled_contracts,
|
||||||
|
deployed_contracts,
|
||||||
|
variables: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
//! This module contains all of the code responsible for performing differential tests including the
|
||||||
|
//! driver implementation, state implementation, and the core logic that allows for tests to be
|
||||||
|
//! executed.
|
||||||
|
|
||||||
|
mod driver;
|
||||||
|
mod entry_point;
|
||||||
|
mod execution_state;
|
||||||
|
|
||||||
|
pub use driver::*;
|
||||||
|
pub use entry_point::*;
|
||||||
|
pub use execution_state::*;
|
||||||
@@ -1,834 +0,0 @@
|
|||||||
//! The test driver handles the compilation and execution of the test cases.
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
|
|
||||||
use alloy::json_abi::JsonAbi;
|
|
||||||
use alloy::network::{Ethereum, TransactionBuilder};
|
|
||||||
use alloy::rpc::types::TransactionReceipt;
|
|
||||||
use alloy::rpc::types::trace::geth::GethTrace;
|
|
||||||
use alloy::{
|
|
||||||
primitives::Address,
|
|
||||||
rpc::types::{
|
|
||||||
TransactionRequest,
|
|
||||||
trace::geth::{AccountState, DiffMode},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use anyhow::Context;
|
|
||||||
use indexmap::IndexMap;
|
|
||||||
use revive_dt_compiler::{Compiler, SolidityCompiler};
|
|
||||||
use revive_dt_config::Arguments;
|
|
||||||
use revive_dt_format::case::CaseIdx;
|
|
||||||
use revive_dt_format::input::Method;
|
|
||||||
use revive_dt_format::metadata::{ContractInstance, ContractPathAndIdentifier};
|
|
||||||
use revive_dt_format::{input::Input, metadata::Metadata, mode::SolcMode};
|
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
|
||||||
use revive_dt_report::reporter::{CompilationTask, Report, Span};
|
|
||||||
use revive_solc_json_interface::SolcStandardJsonOutput;
|
|
||||||
use serde_json::Value;
|
|
||||||
use std::fmt::Debug;
|
|
||||||
|
|
||||||
use crate::Platform;
|
|
||||||
use crate::common::*;
|
|
||||||
|
|
||||||
pub struct State<'a, T: Platform> {
|
|
||||||
/// The configuration that the framework was started with.
|
|
||||||
///
|
|
||||||
/// This is currently used to get certain information from it such as the solc mode and other
|
|
||||||
/// information used at runtime.
|
|
||||||
config: &'a Arguments,
|
|
||||||
|
|
||||||
/// The [`Span`] used in reporting.
|
|
||||||
span: Span,
|
|
||||||
|
|
||||||
/// A vector of all of the compiled contracts. Each call to [`build_contracts`] adds a new entry
|
|
||||||
/// to this vector.
|
|
||||||
///
|
|
||||||
/// [`build_contracts`]: State::build_contracts
|
|
||||||
contracts: Vec<SolcStandardJsonOutput>,
|
|
||||||
|
|
||||||
/// This map stores the contracts deployments that have been made for each case within a
|
|
||||||
/// metadata file. Note, this means that the state can't be reused between different metadata
|
|
||||||
/// files.
|
|
||||||
deployed_contracts: HashMap<CaseIdx, HashMap<ContractInstance, (Address, JsonAbi)>>,
|
|
||||||
|
|
||||||
phantom: PhantomData<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T> State<'a, T>
|
|
||||||
where
|
|
||||||
T: Platform,
|
|
||||||
{
|
|
||||||
pub fn new(config: &'a Arguments, span: Span) -> Self {
|
|
||||||
Self {
|
|
||||||
config,
|
|
||||||
span,
|
|
||||||
contracts: Default::default(),
|
|
||||||
deployed_contracts: Default::default(),
|
|
||||||
phantom: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a copy of the current span.
|
|
||||||
fn span(&self) -> Span {
|
|
||||||
self.span
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build_contracts(&mut self, mode: &SolcMode, metadata: &Metadata) -> anyhow::Result<()> {
|
|
||||||
let mut span = self.span();
|
|
||||||
span.next_metadata(
|
|
||||||
metadata
|
|
||||||
.file_path
|
|
||||||
.as_ref()
|
|
||||||
.expect("metadata should have been read from a file")
|
|
||||||
.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let Some(version) = mode.last_patch_version(&self.config.solc) else {
|
|
||||||
anyhow::bail!("unsupported solc version: {:?}", &mode.solc_version);
|
|
||||||
};
|
|
||||||
|
|
||||||
let compiler = Compiler::<T::Compiler>::new()
|
|
||||||
.allow_path(metadata.directory()?)
|
|
||||||
.solc_optimizer(mode.solc_optimize());
|
|
||||||
|
|
||||||
let compiler = FilesWithExtensionIterator::new(metadata.directory()?)
|
|
||||||
.with_allowed_extension("sol")
|
|
||||||
.try_fold(compiler, |compiler, path| compiler.with_source(&path))?;
|
|
||||||
|
|
||||||
let mut task = CompilationTask {
|
|
||||||
json_input: compiler.input(),
|
|
||||||
json_output: None,
|
|
||||||
mode: mode.clone(),
|
|
||||||
compiler_version: format!("{}", &version),
|
|
||||||
error: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let compiler_path = T::Compiler::get_compiler_executable(self.config, version)?;
|
|
||||||
match compiler.try_build(compiler_path) {
|
|
||||||
Ok(output) => {
|
|
||||||
task.json_output = Some(output.output.clone());
|
|
||||||
task.error = output.error;
|
|
||||||
self.contracts.push(output.output);
|
|
||||||
|
|
||||||
if let Some(last_output) = self.contracts.last() {
|
|
||||||
if let Some(contracts) = &last_output.contracts {
|
|
||||||
for (file, contracts_map) in contracts {
|
|
||||||
for contract_name in contracts_map.keys() {
|
|
||||||
tracing::debug!(
|
|
||||||
"Compiled contract: {contract_name} from file: {file}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
tracing::warn!("Compiled contracts field is None");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Report::compilation(span, T::config_id(), task);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!("Failed to compile contract: {:?}", error.to_string());
|
|
||||||
task.error = Some(error.to_string());
|
|
||||||
Err(error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn handle_input(
|
|
||||||
&mut self,
|
|
||||||
metadata: &Metadata,
|
|
||||||
case_idx: CaseIdx,
|
|
||||||
input: &Input,
|
|
||||||
node: &T::Blockchain,
|
|
||||||
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
|
|
||||||
let deployment_receipts =
|
|
||||||
self.handle_contract_deployment(metadata, case_idx, input, node)?;
|
|
||||||
self.handle_input_execution(case_idx, input, deployment_receipts, node)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handles the contract deployment for a given input performing it if it needs to be performed.
|
|
||||||
fn handle_contract_deployment(
|
|
||||||
&mut self,
|
|
||||||
metadata: &Metadata,
|
|
||||||
case_idx: CaseIdx,
|
|
||||||
input: &Input,
|
|
||||||
node: &T::Blockchain,
|
|
||||||
) -> anyhow::Result<HashMap<ContractInstance, TransactionReceipt>> {
|
|
||||||
let span = tracing::debug_span!(
|
|
||||||
"Handling contract deployment",
|
|
||||||
?case_idx,
|
|
||||||
instance = ?input.instance
|
|
||||||
);
|
|
||||||
let _guard = span.enter();
|
|
||||||
|
|
||||||
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
|
||||||
for instance in input.find_all_contract_instances().into_iter() {
|
|
||||||
if !self
|
|
||||||
.deployed_contracts
|
|
||||||
.entry(case_idx)
|
|
||||||
.or_default()
|
|
||||||
.contains_key(&instance)
|
|
||||||
{
|
|
||||||
instances_we_must_deploy.entry(instance).or_insert(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Method::Deployer = input.method {
|
|
||||||
instances_we_must_deploy.swap_remove(&input.instance);
|
|
||||||
instances_we_must_deploy.insert(input.instance.clone(), true);
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::debug!(
|
|
||||||
instances_to_deploy = instances_we_must_deploy.len(),
|
|
||||||
"Computed the number of required deployments for input"
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut receipts = HashMap::new();
|
|
||||||
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
|
|
||||||
// What we have at this moment is just a contract instance which is kind of like a variable
|
|
||||||
// name for an actual underlying contract. So, we need to resolve this instance to the info
|
|
||||||
// of the contract that it belongs to.
|
|
||||||
let Some(ContractPathAndIdentifier {
|
|
||||||
contract_source_path,
|
|
||||||
contract_ident,
|
|
||||||
}) = metadata.contract_sources()?.remove(&instance)
|
|
||||||
else {
|
|
||||||
tracing::error!("Contract source not found for instance");
|
|
||||||
anyhow::bail!("Contract source not found for instance {:?}", instance)
|
|
||||||
};
|
|
||||||
|
|
||||||
let compiled_contract = self.contracts.iter().find_map(|output| {
|
|
||||||
output
|
|
||||||
.contracts
|
|
||||||
.as_ref()?
|
|
||||||
.get(&contract_source_path.display().to_string())
|
|
||||||
.and_then(|source_file_contracts| {
|
|
||||||
source_file_contracts.get(contract_ident.as_ref())
|
|
||||||
})
|
|
||||||
});
|
|
||||||
let Some(code) = compiled_contract
|
|
||||||
.and_then(|contract| contract.evm.as_ref().and_then(|evm| evm.bytecode.as_ref()))
|
|
||||||
else {
|
|
||||||
tracing::error!(
|
|
||||||
contract_source_path = contract_source_path.display().to_string(),
|
|
||||||
contract_ident = contract_ident.as_ref(),
|
|
||||||
"Failed to find bytecode for contract"
|
|
||||||
);
|
|
||||||
anyhow::bail!("Failed to find bytecode for contract {:?}", instance)
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: When we want to do linking it would be best to do it at this stage here. We have
|
|
||||||
// the context from the metadata files and therefore know what needs to be linked and in
|
|
||||||
// what order it needs to happen.
|
|
||||||
|
|
||||||
let mut code = match alloy::hex::decode(&code.object) {
|
|
||||||
Ok(code) => code,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(
|
|
||||||
?error,
|
|
||||||
contract_source_path = contract_source_path.display().to_string(),
|
|
||||||
contract_ident = contract_ident.as_ref(),
|
|
||||||
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
|
|
||||||
);
|
|
||||||
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if deploy_with_constructor_arguments {
|
|
||||||
let encoded_input = input
|
|
||||||
.encoded_input(self.deployed_contracts.entry(case_idx).or_default(), node)?;
|
|
||||||
code.extend(encoded_input.to_vec());
|
|
||||||
}
|
|
||||||
|
|
||||||
let tx = {
|
|
||||||
let tx = TransactionRequest::default().from(input.caller);
|
|
||||||
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
|
|
||||||
};
|
|
||||||
|
|
||||||
let receipt = match node.execute_transaction(tx) {
|
|
||||||
Ok(receipt) => receipt,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(
|
|
||||||
node = std::any::type_name::<T>(),
|
|
||||||
?error,
|
|
||||||
"Contract deployment transaction failed."
|
|
||||||
);
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(address) = receipt.contract_address else {
|
|
||||||
tracing::error!("Contract deployment transaction didn't return an address");
|
|
||||||
anyhow::bail!("Contract deployment didn't return an address");
|
|
||||||
};
|
|
||||||
tracing::info!(
|
|
||||||
instance_name = ?instance,
|
|
||||||
instance_address = ?address,
|
|
||||||
"Deployed contract"
|
|
||||||
);
|
|
||||||
|
|
||||||
let Some(Value::String(metadata)) =
|
|
||||||
compiled_contract.and_then(|contract| contract.metadata.as_ref())
|
|
||||||
else {
|
|
||||||
tracing::error!("Contract does not have a metadata field");
|
|
||||||
anyhow::bail!("Contract does not have a metadata field");
|
|
||||||
};
|
|
||||||
|
|
||||||
let Ok(metadata) = serde_json::from_str::<Value>(metadata) else {
|
|
||||||
tracing::error!(%metadata, "Failed to parse solc metadata into a structured value");
|
|
||||||
anyhow::bail!("Failed to parse solc metadata into a structured value {metadata}");
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(abi) = metadata.get("output").and_then(|value| value.get("abi")) else {
|
|
||||||
tracing::error!(%metadata, "Failed to access the .output.abi field of the solc metadata");
|
|
||||||
anyhow::bail!(
|
|
||||||
"Failed to access the .output.abi field of the solc metadata {metadata}"
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
let Ok(abi) = serde_json::from_value::<JsonAbi>(abi.clone()) else {
|
|
||||||
tracing::error!(%metadata, "Failed to deserialize ABI into a structured format");
|
|
||||||
anyhow::bail!("Failed to deserialize ABI into a structured format {metadata}");
|
|
||||||
};
|
|
||||||
|
|
||||||
self.deployed_contracts
|
|
||||||
.entry(case_idx)
|
|
||||||
.or_default()
|
|
||||||
.insert(instance.clone(), (address, abi));
|
|
||||||
|
|
||||||
receipts.insert(instance.clone(), receipt);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(receipts)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handles the execution of the input in terms of the calls that need to be made.
|
|
||||||
fn handle_input_execution(
|
|
||||||
&mut self,
|
|
||||||
case_idx: CaseIdx,
|
|
||||||
input: &Input,
|
|
||||||
deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
|
||||||
node: &T::Blockchain,
|
|
||||||
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
|
|
||||||
tracing::trace!("Calling execute_input for input: {input:?}");
|
|
||||||
|
|
||||||
let receipt = match input.method {
|
|
||||||
// This input was already executed when `handle_input` was called. We just need to
|
|
||||||
// lookup the transaction receipt in this case and continue on.
|
|
||||||
Method::Deployer => deployment_receipts
|
|
||||||
.get(&input.instance)
|
|
||||||
.context("Failed to find deployment receipt")?
|
|
||||||
.clone(),
|
|
||||||
Method::Fallback | Method::FunctionName(_) => {
|
|
||||||
let tx = match input
|
|
||||||
.legacy_transaction(self.deployed_contracts.entry(case_idx).or_default(), node)
|
|
||||||
{
|
|
||||||
Ok(tx) => {
|
|
||||||
tracing::debug!("Legacy transaction data: {tx:#?}");
|
|
||||||
tx
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
tracing::error!("Failed to construct legacy transaction: {err:?}");
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::trace!("Executing transaction for input: {input:?}");
|
|
||||||
|
|
||||||
match node.execute_transaction(tx) {
|
|
||||||
Ok(receipt) => receipt,
|
|
||||||
Err(err) => {
|
|
||||||
tracing::error!(
|
|
||||||
"Failed to execute transaction when executing the contract: {}, {:?}",
|
|
||||||
&*input.instance,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::trace!(
|
|
||||||
"Transaction receipt for executed contract: {} - {:?}",
|
|
||||||
&*input.instance,
|
|
||||||
receipt,
|
|
||||||
);
|
|
||||||
|
|
||||||
let trace = node.trace_transaction(receipt.clone())?;
|
|
||||||
tracing::trace!(
|
|
||||||
"Trace result for contract: {} - {:?}",
|
|
||||||
&*input.instance,
|
|
||||||
trace
|
|
||||||
);
|
|
||||||
|
|
||||||
let diff = node.state_diff(receipt.clone())?;
|
|
||||||
|
|
||||||
Ok((receipt, trace, diff))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Driver<'a, Leader: Platform, Follower: Platform> {
|
|
||||||
metadata: &'a Metadata,
|
|
||||||
config: &'a Arguments,
|
|
||||||
leader_node: &'a Leader::Blockchain,
|
|
||||||
follower_node: &'a Follower::Blockchain,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, L, F> Driver<'a, L, F>
|
|
||||||
where
|
|
||||||
L: Platform,
|
|
||||||
F: Platform,
|
|
||||||
{
|
|
||||||
pub fn new(
|
|
||||||
metadata: &'a Metadata,
|
|
||||||
config: &'a Arguments,
|
|
||||||
leader_node: &'a L::Blockchain,
|
|
||||||
follower_node: &'a F::Blockchain,
|
|
||||||
) -> Driver<'a, L, F> {
|
|
||||||
Self {
|
|
||||||
metadata,
|
|
||||||
config,
|
|
||||||
leader_node,
|
|
||||||
follower_node,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn trace_diff_mode(label: &str, diff: &DiffMode) {
|
|
||||||
tracing::trace!("{label} - PRE STATE:");
|
|
||||||
for (addr, state) in &diff.pre {
|
|
||||||
Self::trace_account_state(" [pre]", addr, state);
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::trace!("{label} - POST STATE:");
|
|
||||||
for (addr, state) in &diff.post {
|
|
||||||
Self::trace_account_state(" [post]", addr, state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn trace_account_state(prefix: &str, addr: &Address, state: &AccountState) {
|
|
||||||
tracing::trace!("{prefix} 0x{addr:x}");
|
|
||||||
|
|
||||||
if let Some(balance) = &state.balance {
|
|
||||||
tracing::trace!("{prefix} balance: {balance}");
|
|
||||||
}
|
|
||||||
if let Some(nonce) = &state.nonce {
|
|
||||||
tracing::trace!("{prefix} nonce: {nonce}");
|
|
||||||
}
|
|
||||||
if let Some(code) = &state.code {
|
|
||||||
tracing::trace!("{prefix} code: {code}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A note on this function and the choice of how we handle errors that happen here. This is not
|
|
||||||
// a doc comment since it's a comment for the maintainers of this code and not for the users of
|
|
||||||
// this code.
|
|
||||||
//
|
|
||||||
// This function does a few things: it builds the contracts for the various SOLC modes needed.
|
|
||||||
// It deploys the contracts to the chain, and it executes the various inputs that are specified
|
|
||||||
// for the test cases.
|
|
||||||
//
|
|
||||||
// In most functions in the codebase, it's fine to just say "If we encounter an error just
|
|
||||||
// bubble it up to the caller", but this isn't a good idea to do here and we need an elaborate
|
|
||||||
// way to report errors all while being graceful and continuing execution where we can. For
|
|
||||||
// example, if one of the inputs of one of the cases fail to execute, then we should not just
|
|
||||||
// bubble that error up immediately. Instead, we should note it down and continue to the next
|
|
||||||
// case as the next case might succeed.
|
|
||||||
//
|
|
||||||
// Therefore, this method returns an `ExecutionResult` object, and not just a normal `Result`.
|
|
||||||
// This object is fully typed to contain information about what exactly in the execution was a
|
|
||||||
// success and what failed.
|
|
||||||
//
|
|
||||||
// The above then allows us to have better logging and better information in the caller of this
|
|
||||||
// function as we have a more detailed view of what worked and what didn't.
|
|
||||||
pub fn execute(&mut self, span: Span) -> ExecutionResult {
|
|
||||||
// This is the execution result object that all of the execution information will be
|
|
||||||
// collected into and returned at the end of the execution.
|
|
||||||
let mut execution_result = ExecutionResult::default();
|
|
||||||
|
|
||||||
let tracing_span = tracing::info_span!("Handling metadata file");
|
|
||||||
let _guard = tracing_span.enter();
|
|
||||||
|
|
||||||
for mode in self.metadata.solc_modes() {
|
|
||||||
let tracing_span = tracing::info_span!("With solc mode", solc_mode = ?mode);
|
|
||||||
let _guard = tracing_span.enter();
|
|
||||||
|
|
||||||
let mut leader_state = State::<L>::new(self.config, span);
|
|
||||||
let mut follower_state = State::<F>::new(self.config, span);
|
|
||||||
|
|
||||||
// We build the contracts. If building the contracts for the metadata file fails then we
|
|
||||||
// have no other option but to keep note of this error and move on to the next solc mode
|
|
||||||
// and NOT just bail out of the execution as a whole.
|
|
||||||
let build_result = tracing::info_span!("Building contracts").in_scope(|| {
|
|
||||||
match leader_state.build_contracts(&mode, self.metadata) {
|
|
||||||
Ok(_) => {
|
|
||||||
tracing::debug!(target = ?Target::Leader, "Contract building succeeded");
|
|
||||||
execution_result.add_successful_build(Target::Leader, mode.clone());
|
|
||||||
},
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(target = ?Target::Leader, ?error, "Contract building failed");
|
|
||||||
execution_result.add_failed_build(Target::Leader, mode.clone(), error);
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
match follower_state.build_contracts(&mode, self.metadata) {
|
|
||||||
Ok(_) => {
|
|
||||||
tracing::debug!(target = ?Target::Follower, "Contract building succeeded");
|
|
||||||
execution_result.add_successful_build(Target::Follower, mode.clone());
|
|
||||||
},
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(target = ?Target::Follower, ?error, "Contract building failed");
|
|
||||||
execution_result.add_failed_build(Target::Follower, mode.clone(), error);
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
if build_result.is_err() {
|
|
||||||
// Note: We skip to the next solc mode as there's nothing that we can do at this
|
|
||||||
// point, the building has failed. We do NOT bail out of the execution as a whole.
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// For cases if one of the inputs fail then we move on to the next case and we do NOT
|
|
||||||
// bail out of the whole thing.
|
|
||||||
'case_loop: for (case_idx, case) in self.metadata.cases.iter().enumerate() {
|
|
||||||
let tracing_span = tracing::info_span!(
|
|
||||||
"Handling case",
|
|
||||||
case_name = case.name,
|
|
||||||
case_idx = case_idx
|
|
||||||
);
|
|
||||||
let _guard = tracing_span.enter();
|
|
||||||
|
|
||||||
let case_idx = CaseIdx::new_from(case_idx);
|
|
||||||
|
|
||||||
// For inputs if one of the inputs fail we move on to the next case (we do not move
|
|
||||||
// on to the next input as it doesn't make sense. It depends on the previous one).
|
|
||||||
for (input_idx, input) in case.inputs.iter().enumerate() {
|
|
||||||
let tracing_span = tracing::info_span!("Handling input", input_idx);
|
|
||||||
let _guard = tracing_span.enter();
|
|
||||||
|
|
||||||
let execution_result =
|
|
||||||
tracing::info_span!("Executing input", contract_name = ?input.instance)
|
|
||||||
.in_scope(|| {
|
|
||||||
let (leader_receipt, _, leader_diff) = match leader_state
|
|
||||||
.handle_input(self.metadata, case_idx, input, self.leader_node)
|
|
||||||
{
|
|
||||||
Ok(result) => result,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(
|
|
||||||
target = ?Target::Leader,
|
|
||||||
?error,
|
|
||||||
"Contract execution failed"
|
|
||||||
);
|
|
||||||
execution_result.add_failed_case(
|
|
||||||
Target::Leader,
|
|
||||||
mode.clone(),
|
|
||||||
case.name
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or("no case name")
|
|
||||||
.to_owned(),
|
|
||||||
case_idx,
|
|
||||||
input_idx,
|
|
||||||
anyhow::Error::msg(format!("{error}")),
|
|
||||||
);
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let (follower_receipt, _, follower_diff) = match follower_state
|
|
||||||
.handle_input(
|
|
||||||
self.metadata,
|
|
||||||
case_idx,
|
|
||||||
input,
|
|
||||||
self.follower_node,
|
|
||||||
) {
|
|
||||||
Ok(result) => result,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(
|
|
||||||
target = ?Target::Follower,
|
|
||||||
?error,
|
|
||||||
"Contract execution failed"
|
|
||||||
);
|
|
||||||
execution_result.add_failed_case(
|
|
||||||
Target::Follower,
|
|
||||||
mode.clone(),
|
|
||||||
case.name
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or("no case name")
|
|
||||||
.to_owned(),
|
|
||||||
case_idx,
|
|
||||||
input_idx,
|
|
||||||
anyhow::Error::msg(format!("{error}")),
|
|
||||||
);
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((leader_receipt, leader_diff, follower_receipt, follower_diff))
|
|
||||||
});
|
|
||||||
let Ok((leader_receipt, leader_diff, follower_receipt, follower_diff)) =
|
|
||||||
execution_result
|
|
||||||
else {
|
|
||||||
// Noting it again here: if something in the input fails we do not move on
|
|
||||||
// to the next input, we move to the next case completely.
|
|
||||||
continue 'case_loop;
|
|
||||||
};
|
|
||||||
|
|
||||||
if leader_diff == follower_diff {
|
|
||||||
tracing::debug!("State diffs match between leader and follower.");
|
|
||||||
} else {
|
|
||||||
tracing::debug!("State diffs mismatch between leader and follower.");
|
|
||||||
Self::trace_diff_mode("Leader", &leader_diff);
|
|
||||||
Self::trace_diff_mode("Follower", &follower_diff);
|
|
||||||
}
|
|
||||||
|
|
||||||
if leader_receipt.logs() != follower_receipt.logs() {
|
|
||||||
tracing::debug!("Log/event mismatch between leader and follower.");
|
|
||||||
tracing::trace!("Leader logs: {:?}", leader_receipt.logs());
|
|
||||||
tracing::trace!("Follower logs: {:?}", follower_receipt.logs());
|
|
||||||
}
|
|
||||||
|
|
||||||
if leader_receipt.status() != follower_receipt.status() {
|
|
||||||
tracing::debug!(
|
|
||||||
"Mismatch in status: leader = {}, follower = {}",
|
|
||||||
leader_receipt.status(),
|
|
||||||
follower_receipt.status()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: Only consider the case as having been successful after we have processed
|
|
||||||
// all of the inputs and completed the entire loop over the input.
|
|
||||||
execution_result.add_successful_case(
|
|
||||||
Target::Leader,
|
|
||||||
mode.clone(),
|
|
||||||
case.name.clone().unwrap_or("no case name".to_owned()),
|
|
||||||
case_idx,
|
|
||||||
);
|
|
||||||
execution_result.add_successful_case(
|
|
||||||
Target::Follower,
|
|
||||||
mode.clone(),
|
|
||||||
case.name.clone().unwrap_or("no case name".to_owned()),
|
|
||||||
case_idx,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
execution_result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct ExecutionResult {
|
|
||||||
pub results: Vec<Box<dyn ExecutionResultItem>>,
|
|
||||||
pub successful_cases_count: usize,
|
|
||||||
pub failed_cases_count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExecutionResult {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
results: Default::default(),
|
|
||||||
successful_cases_count: Default::default(),
|
|
||||||
failed_cases_count: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_successful_build(&mut self, target: Target, solc_mode: SolcMode) {
|
|
||||||
self.results
|
|
||||||
.push(Box::new(BuildResult::Success { target, solc_mode }));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_failed_build(&mut self, target: Target, solc_mode: SolcMode, error: anyhow::Error) {
|
|
||||||
self.results.push(Box::new(BuildResult::Failure {
|
|
||||||
target,
|
|
||||||
solc_mode,
|
|
||||||
error,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_successful_case(
|
|
||||||
&mut self,
|
|
||||||
target: Target,
|
|
||||||
solc_mode: SolcMode,
|
|
||||||
case_name: String,
|
|
||||||
case_idx: CaseIdx,
|
|
||||||
) {
|
|
||||||
self.successful_cases_count += 1;
|
|
||||||
self.results.push(Box::new(CaseResult::Success {
|
|
||||||
target,
|
|
||||||
solc_mode,
|
|
||||||
case_name,
|
|
||||||
case_idx,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_failed_case(
|
|
||||||
&mut self,
|
|
||||||
target: Target,
|
|
||||||
solc_mode: SolcMode,
|
|
||||||
case_name: String,
|
|
||||||
case_idx: CaseIdx,
|
|
||||||
input_idx: usize,
|
|
||||||
error: anyhow::Error,
|
|
||||||
) {
|
|
||||||
self.failed_cases_count += 1;
|
|
||||||
self.results.push(Box::new(CaseResult::Failure {
|
|
||||||
target,
|
|
||||||
solc_mode,
|
|
||||||
case_name,
|
|
||||||
case_idx,
|
|
||||||
error,
|
|
||||||
input_idx,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait ExecutionResultItem: Debug {
|
|
||||||
/// Converts this result item into an [`anyhow::Result`].
|
|
||||||
fn as_result(&self) -> Result<(), &anyhow::Error>;
|
|
||||||
|
|
||||||
/// Provides information on whether the provided result item is of a success or failure.
|
|
||||||
fn is_success(&self) -> bool;
|
|
||||||
|
|
||||||
/// Provides information of the target that this result is for.
|
|
||||||
fn target(&self) -> &Target;
|
|
||||||
|
|
||||||
/// Provides information on the [`SolcMode`] mode that we being used for this result item.
|
|
||||||
fn solc_mode(&self) -> &SolcMode;
|
|
||||||
|
|
||||||
/// Provides information on the case name and number that this result item pertains to. This is
|
|
||||||
/// [`None`] if the error doesn't belong to any case (e.g., if it's a build error outside of any
|
|
||||||
/// of the cases.).
|
|
||||||
fn case_name_and_index(&self) -> Option<(&str, &CaseIdx)>;
|
|
||||||
|
|
||||||
/// Provides information on the input number that this result item pertains to. This is [`None`]
|
|
||||||
/// if the error doesn't belong to any input (e.g., if it's a build error outside of any of the
|
|
||||||
/// inputs.).
|
|
||||||
fn input_index(&self) -> Option<usize>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
||||||
pub enum Target {
|
|
||||||
Leader,
|
|
||||||
Follower,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum BuildResult {
|
|
||||||
Success {
|
|
||||||
target: Target,
|
|
||||||
solc_mode: SolcMode,
|
|
||||||
},
|
|
||||||
Failure {
|
|
||||||
target: Target,
|
|
||||||
solc_mode: SolcMode,
|
|
||||||
error: anyhow::Error,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExecutionResultItem for BuildResult {
|
|
||||||
fn as_result(&self) -> Result<(), &anyhow::Error> {
|
|
||||||
match self {
|
|
||||||
Self::Success { .. } => Ok(()),
|
|
||||||
Self::Failure { error, .. } => Err(error)?,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_success(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
Self::Success { .. } => true,
|
|
||||||
Self::Failure { .. } => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn target(&self) -> &Target {
|
|
||||||
match self {
|
|
||||||
Self::Success { target, .. } | Self::Failure { target, .. } => target,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn solc_mode(&self) -> &SolcMode {
|
|
||||||
match self {
|
|
||||||
Self::Success { solc_mode, .. } | Self::Failure { solc_mode, .. } => solc_mode,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn case_name_and_index(&self) -> Option<(&str, &CaseIdx)> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn input_index(&self) -> Option<usize> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum CaseResult {
|
|
||||||
Success {
|
|
||||||
target: Target,
|
|
||||||
solc_mode: SolcMode,
|
|
||||||
case_name: String,
|
|
||||||
case_idx: CaseIdx,
|
|
||||||
},
|
|
||||||
Failure {
|
|
||||||
target: Target,
|
|
||||||
solc_mode: SolcMode,
|
|
||||||
case_name: String,
|
|
||||||
case_idx: CaseIdx,
|
|
||||||
input_idx: usize,
|
|
||||||
error: anyhow::Error,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExecutionResultItem for CaseResult {
|
|
||||||
fn as_result(&self) -> Result<(), &anyhow::Error> {
|
|
||||||
match self {
|
|
||||||
Self::Success { .. } => Ok(()),
|
|
||||||
Self::Failure { error, .. } => Err(error)?,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_success(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
Self::Success { .. } => true,
|
|
||||||
Self::Failure { .. } => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn target(&self) -> &Target {
|
|
||||||
match self {
|
|
||||||
Self::Success { target, .. } | Self::Failure { target, .. } => target,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn solc_mode(&self) -> &SolcMode {
|
|
||||||
match self {
|
|
||||||
Self::Success { solc_mode, .. } | Self::Failure { solc_mode, .. } => solc_mode,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn case_name_and_index(&self) -> Option<(&str, &CaseIdx)> {
|
|
||||||
match self {
|
|
||||||
Self::Success {
|
|
||||||
case_name,
|
|
||||||
case_idx,
|
|
||||||
..
|
|
||||||
}
|
|
||||||
| Self::Failure {
|
|
||||||
case_name,
|
|
||||||
case_idx,
|
|
||||||
..
|
|
||||||
} => Some((case_name, case_idx)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn input_index(&self) -> Option<usize> {
|
|
||||||
match self {
|
|
||||||
CaseResult::Success { .. } => None,
|
|
||||||
CaseResult::Failure { input_idx, .. } => Some(*input_idx),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,355 @@
|
|||||||
|
//! A wrapper around the compiler which allows for caching of compilation artifacts so that they can
|
||||||
|
//! be reused between runs.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::HashMap,
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
sync::{Arc, LazyLock},
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
|
||||||
|
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
|
||||||
|
use revive_dt_core::Platform;
|
||||||
|
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
|
||||||
|
|
||||||
|
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
|
||||||
|
use anyhow::{Context as _, Error, Result};
|
||||||
|
use revive_dt_report::ExecutionSpecificReporter;
|
||||||
|
use semver::Version;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||||
|
use tracing::{Instrument, debug, debug_span, instrument};
|
||||||
|
|
||||||
|
pub struct CachedCompiler<'a> {
|
||||||
|
/// The cache that stores the compiled contracts.
|
||||||
|
artifacts_cache: ArtifactsCache,
|
||||||
|
|
||||||
|
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests
|
||||||
|
/// come in for the same contract we never compile all of them and only compile it once and all
|
||||||
|
/// other tasks that request this same compilation concurrently get the cached version.
|
||||||
|
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> CachedCompiler<'a> {
|
||||||
|
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
|
||||||
|
let mut cache = ArtifactsCache::new(path);
|
||||||
|
if invalidate_cache {
|
||||||
|
cache = cache
|
||||||
|
.with_invalidated_cache()
|
||||||
|
.await
|
||||||
|
.context("Failed to invalidate compilation cache directory")?;
|
||||||
|
}
|
||||||
|
Ok(Self {
|
||||||
|
artifacts_cache: cache,
|
||||||
|
cache_key_lock: Default::default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compiles or gets the compilation artifacts from the cache.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
#[instrument(
|
||||||
|
level = "debug",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
metadata_file_path = %metadata_file_path.display(),
|
||||||
|
%mode,
|
||||||
|
platform = %platform.platform_identifier()
|
||||||
|
),
|
||||||
|
err
|
||||||
|
)]
|
||||||
|
pub async fn compile_contracts(
|
||||||
|
&self,
|
||||||
|
metadata: &'a Metadata,
|
||||||
|
metadata_file_path: &'a Path,
|
||||||
|
mode: Cow<'a, Mode>,
|
||||||
|
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
compiler: &dyn SolidityCompiler,
|
||||||
|
platform: &dyn Platform,
|
||||||
|
reporter: &ExecutionSpecificReporter,
|
||||||
|
) -> Result<CompilerOutput> {
|
||||||
|
let cache_key = CacheKey {
|
||||||
|
compiler_identifier: platform.compiler_identifier(),
|
||||||
|
compiler_version: compiler.version().clone(),
|
||||||
|
metadata_file_path,
|
||||||
|
solc_mode: mode.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let compilation_callback = || {
|
||||||
|
async move {
|
||||||
|
compile_contracts(
|
||||||
|
metadata
|
||||||
|
.directory()
|
||||||
|
.context("Failed to get metadata directory while preparing compilation")?,
|
||||||
|
metadata
|
||||||
|
.files_to_compile()
|
||||||
|
.context("Failed to enumerate files to compile from metadata")?,
|
||||||
|
&mode,
|
||||||
|
deployed_libraries,
|
||||||
|
compiler,
|
||||||
|
reporter,
|
||||||
|
)
|
||||||
|
.map(|compilation_result| compilation_result.map(CacheValue::new))
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
.instrument(debug_span!(
|
||||||
|
"Running compilation for the cache key",
|
||||||
|
cache_key.compiler_identifier = %cache_key.compiler_identifier,
|
||||||
|
cache_key.compiler_version = %cache_key.compiler_version,
|
||||||
|
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
|
||||||
|
cache_key.solc_mode = %cache_key.solc_mode,
|
||||||
|
))
|
||||||
|
};
|
||||||
|
|
||||||
|
let compiled_contracts = match deployed_libraries {
|
||||||
|
// If deployed libraries have been specified then we will re-compile the contract as it
|
||||||
|
// means that linking is required in this case.
|
||||||
|
Some(_) => {
|
||||||
|
debug!("Deployed libraries defined, recompilation must take place");
|
||||||
|
debug!("Cache miss");
|
||||||
|
compilation_callback()
|
||||||
|
.await
|
||||||
|
.context("Compilation callback for deployed libraries failed")?
|
||||||
|
.compiler_output
|
||||||
|
}
|
||||||
|
// If no deployed libraries are specified then we can follow the cached flow and attempt
|
||||||
|
// to lookup the compilation artifacts in the cache.
|
||||||
|
None => {
|
||||||
|
debug!("Deployed libraries undefined, attempting to make use of cache");
|
||||||
|
|
||||||
|
// Lock this specific cache key such that we do not get inconsistent state. We want
|
||||||
|
// that when multiple cases come in asking for the compilation artifacts then they
|
||||||
|
// don't all trigger a compilation if there's a cache miss. Hence, the lock here.
|
||||||
|
let read_guard = self.cache_key_lock.read().await;
|
||||||
|
let mutex = match read_guard.get(&cache_key).cloned() {
|
||||||
|
Some(value) => {
|
||||||
|
drop(read_guard);
|
||||||
|
value
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
drop(read_guard);
|
||||||
|
self.cache_key_lock
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.entry(cache_key.clone())
|
||||||
|
.or_default()
|
||||||
|
.clone()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let _guard = mutex.lock().await;
|
||||||
|
|
||||||
|
match self.artifacts_cache.get(&cache_key).await {
|
||||||
|
Some(cache_value) => {
|
||||||
|
if deployed_libraries.is_some() {
|
||||||
|
reporter
|
||||||
|
.report_post_link_contracts_compilation_succeeded_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path(),
|
||||||
|
true,
|
||||||
|
None,
|
||||||
|
cache_value.compiler_output.clone(),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
} else {
|
||||||
|
reporter
|
||||||
|
.report_pre_link_contracts_compilation_succeeded_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path(),
|
||||||
|
true,
|
||||||
|
None,
|
||||||
|
cache_value.compiler_output.clone(),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
cache_value.compiler_output
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let compiler_output = compilation_callback()
|
||||||
|
.await
|
||||||
|
.context("Compilation callback failed (cache miss path)")?
|
||||||
|
.compiler_output;
|
||||||
|
self.artifacts_cache
|
||||||
|
.insert(
|
||||||
|
&cache_key,
|
||||||
|
&CacheValue {
|
||||||
|
compiler_output: compiler_output.clone(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context(
|
||||||
|
"Failed to write the cached value of the compilation artifacts",
|
||||||
|
)?;
|
||||||
|
compiler_output
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(compiled_contracts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn compile_contracts(
|
||||||
|
metadata_directory: impl AsRef<Path>,
|
||||||
|
mut files_to_compile: impl Iterator<Item = PathBuf>,
|
||||||
|
mode: &Mode,
|
||||||
|
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
compiler: &dyn SolidityCompiler,
|
||||||
|
reporter: &ExecutionSpecificReporter,
|
||||||
|
) -> Result<CompilerOutput> {
|
||||||
|
// Puts a limit on how many compilations we can perform at any given instance which helps us
|
||||||
|
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
|
||||||
|
// it on Linux so we don't know if these issues also persist there or not.)
|
||||||
|
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
|
||||||
|
let _permit = SPAWN_GATE.acquire().await?;
|
||||||
|
|
||||||
|
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||||
|
.with_allowed_extension("sol")
|
||||||
|
.with_use_cached_fs(true)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let compilation = Compiler::new()
|
||||||
|
.with_allow_path(metadata_directory)
|
||||||
|
// Handling the modes
|
||||||
|
.with_optimization(mode.optimize_setting)
|
||||||
|
.with_pipeline(mode.pipeline)
|
||||||
|
// Adding the contract sources to the compiler.
|
||||||
|
.try_then(|compiler| {
|
||||||
|
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
|
||||||
|
})?
|
||||||
|
// Adding the deployed libraries to the compiler.
|
||||||
|
.then(|compiler| {
|
||||||
|
deployed_libraries
|
||||||
|
.iter()
|
||||||
|
.flat_map(|value| value.iter())
|
||||||
|
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
|
||||||
|
.flat_map(|(_, ident, address, _)| {
|
||||||
|
all_sources_in_dir
|
||||||
|
.iter()
|
||||||
|
.map(move |path| (ident, address, path))
|
||||||
|
})
|
||||||
|
.fold(compiler, |compiler, (ident, address, path)| {
|
||||||
|
compiler.with_library(path, ident.as_str(), *address)
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
let input = compilation.input().clone();
|
||||||
|
let output = compilation.try_build(compiler).await;
|
||||||
|
|
||||||
|
match (output.as_ref(), deployed_libraries.is_some()) {
|
||||||
|
(Ok(output), true) => {
|
||||||
|
reporter
|
||||||
|
.report_post_link_contracts_compilation_succeeded_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path(),
|
||||||
|
false,
|
||||||
|
input,
|
||||||
|
output.clone(),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
(Ok(output), false) => {
|
||||||
|
reporter
|
||||||
|
.report_pre_link_contracts_compilation_succeeded_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path(),
|
||||||
|
false,
|
||||||
|
input,
|
||||||
|
output.clone(),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
(Err(err), true) => {
|
||||||
|
reporter
|
||||||
|
.report_post_link_contracts_compilation_failed_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path().to_path_buf(),
|
||||||
|
input,
|
||||||
|
format!("{err:#}"),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
(Err(err), false) => {
|
||||||
|
reporter
|
||||||
|
.report_pre_link_contracts_compilation_failed_event(
|
||||||
|
compiler.version().clone(),
|
||||||
|
compiler.path().to_path_buf(),
|
||||||
|
input,
|
||||||
|
format!("{err:#}"),
|
||||||
|
)
|
||||||
|
.expect("Can't happen");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ArtifactsCache {
|
||||||
|
path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ArtifactsCache {
|
||||||
|
pub fn new(path: impl AsRef<Path>) -> Self {
|
||||||
|
Self {
|
||||||
|
path: path.as_ref().to_path_buf(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
|
pub async fn with_invalidated_cache(self) -> Result<Self> {
|
||||||
|
cacache::clear(self.path.as_path())
|
||||||
|
.await
|
||||||
|
.map_err(Into::<Error>::into)
|
||||||
|
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
|
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> {
|
||||||
|
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
|
||||||
|
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
|
||||||
|
cacache::write(self.path.as_path(), key.encode_hex(), value)
|
||||||
|
.await
|
||||||
|
.with_context(|| {
|
||||||
|
format!("Failed to write cache entry under {}", self.path.display())
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
|
||||||
|
let key = bson::to_vec(key).ok()?;
|
||||||
|
let value = cacache::read(self.path.as_path(), key.encode_hex())
|
||||||
|
.await
|
||||||
|
.ok()?;
|
||||||
|
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
||||||
|
Some(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||||
|
struct CacheKey<'a> {
|
||||||
|
/// The identifier of the used compiler.
|
||||||
|
compiler_identifier: CompilerIdentifier,
|
||||||
|
|
||||||
|
/// The version of the compiler that was used to compile the artifacts.
|
||||||
|
compiler_version: Version,
|
||||||
|
|
||||||
|
/// The path of the metadata file that the compilation artifacts are for.
|
||||||
|
metadata_file_path: &'a Path,
|
||||||
|
|
||||||
|
/// The mode that the compilation artifacts where compiled with.
|
||||||
|
solc_mode: Cow<'a, Mode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
struct CacheValue {
|
||||||
|
/// The compiler output from the compilation run.
|
||||||
|
compiler_output: CompilerOutput,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CacheValue {
|
||||||
|
pub fn new(compiler_output: CompilerOutput) -> Self {
|
||||||
|
Self { compiler_output }
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
mod cached_compiler;
|
||||||
|
mod pool;
|
||||||
|
mod test;
|
||||||
|
|
||||||
|
pub use cached_compiler::*;
|
||||||
|
pub use pool::*;
|
||||||
|
pub use test::*;
|
||||||
@@ -0,0 +1,59 @@
|
|||||||
|
//! This crate implements concurrent handling of testing node.
|
||||||
|
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use revive_dt_config::*;
|
||||||
|
use revive_dt_core::Platform;
|
||||||
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
|
||||||
|
/// The node pool starts one or more [Node] which then can be accessed
|
||||||
|
/// in a round robbin fashion.
|
||||||
|
pub struct NodePool {
|
||||||
|
next: AtomicUsize,
|
||||||
|
nodes: Vec<Box<dyn EthereumNode + Send + Sync>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodePool {
|
||||||
|
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||||
|
pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
|
||||||
|
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||||
|
let nodes = concurrency_configuration.number_of_nodes;
|
||||||
|
|
||||||
|
let mut handles = Vec::with_capacity(nodes);
|
||||||
|
for _ in 0..nodes {
|
||||||
|
let context = context.clone();
|
||||||
|
handles.push(platform.new_node(context)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut nodes = Vec::with_capacity(nodes);
|
||||||
|
for handle in handles {
|
||||||
|
nodes.push(
|
||||||
|
handle
|
||||||
|
.join()
|
||||||
|
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||||
|
.context("Failed to join node spawn thread")?
|
||||||
|
.context("Node failed to spawn")?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pre_transactions_tasks = nodes
|
||||||
|
.iter_mut()
|
||||||
|
.map(|node| node.pre_transactions())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
futures::future::try_join_all(pre_transactions_tasks)
|
||||||
|
.await
|
||||||
|
.context("Failed to run the pre-transactions task")?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
nodes,
|
||||||
|
next: Default::default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a handle to the next node.
|
||||||
|
pub fn round_robbin(&self) -> &dyn EthereumNode {
|
||||||
|
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
||||||
|
self.nodes.get(current).unwrap().as_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,349 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::{borrow::Cow, path::Path};
|
||||||
|
|
||||||
|
use futures::{Stream, StreamExt, stream};
|
||||||
|
use indexmap::{IndexMap, indexmap};
|
||||||
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
|
use revive_dt_config::Context;
|
||||||
|
use revive_dt_format::corpus::Corpus;
|
||||||
|
use serde_json::{Value, json};
|
||||||
|
|
||||||
|
use revive_dt_compiler::Mode;
|
||||||
|
use revive_dt_compiler::SolidityCompiler;
|
||||||
|
use revive_dt_format::{
|
||||||
|
case::{Case, CaseIdx},
|
||||||
|
metadata::MetadataFile,
|
||||||
|
};
|
||||||
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
use revive_dt_report::{ExecutionSpecificReporter, Report, Reporter, TestCaseStatus};
|
||||||
|
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
|
||||||
|
use tracing::{debug, error, info};
|
||||||
|
|
||||||
|
use crate::Platform;
|
||||||
|
use crate::helpers::NodePool;
|
||||||
|
|
||||||
|
pub async fn create_test_definitions_stream<'a>(
|
||||||
|
// This is only required for creating the compiler objects and is not used anywhere else in the
|
||||||
|
// function.
|
||||||
|
context: &Context,
|
||||||
|
corpus: &'a Corpus,
|
||||||
|
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
|
||||||
|
only_execute_failed_tests: Option<&Report>,
|
||||||
|
reporter: Reporter,
|
||||||
|
) -> impl Stream<Item = TestDefinition<'a>> {
|
||||||
|
let cloned_reporter = reporter.clone();
|
||||||
|
stream::iter(
|
||||||
|
corpus
|
||||||
|
.cases_iterator()
|
||||||
|
.inspect(move |(metadata_file, ..)| {
|
||||||
|
cloned_reporter
|
||||||
|
.report_metadata_file_discovery_event(
|
||||||
|
metadata_file.metadata_file_path.clone(),
|
||||||
|
metadata_file.content.clone(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
})
|
||||||
|
.map(move |(metadata_file, case_idx, case, mode)| {
|
||||||
|
let reporter = reporter.clone();
|
||||||
|
|
||||||
|
(
|
||||||
|
metadata_file,
|
||||||
|
case_idx,
|
||||||
|
case,
|
||||||
|
mode.clone(),
|
||||||
|
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
||||||
|
solc_mode: mode.as_ref().clone(),
|
||||||
|
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||||
|
case_idx: CaseIdx::new(case_idx),
|
||||||
|
})),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
// Inform the reporter of each one of the test cases that were discovered which we expect to
|
||||||
|
// run.
|
||||||
|
.inspect(|(_, _, _, _, reporter)| {
|
||||||
|
reporter
|
||||||
|
.report_test_case_discovery_event()
|
||||||
|
.expect("Can't fail");
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
// Creating the Test Definition objects from all of the various objects we have and creating
|
||||||
|
// their required dependencies (e.g., compiler).
|
||||||
|
.filter_map(
|
||||||
|
move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
||||||
|
let mut platforms = BTreeMap::new();
|
||||||
|
for (platform, node_pool) in platforms_and_nodes.values() {
|
||||||
|
let node = node_pool.round_robbin();
|
||||||
|
let compiler = platform
|
||||||
|
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
platform_identifier = %platform.platform_identifier(),
|
||||||
|
"Failed to instantiate the compiler"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok()?;
|
||||||
|
|
||||||
|
reporter
|
||||||
|
.report_node_assigned_event(
|
||||||
|
node.id(),
|
||||||
|
platform.platform_identifier(),
|
||||||
|
node.connection_string(),
|
||||||
|
)
|
||||||
|
.expect("Can't fail");
|
||||||
|
|
||||||
|
let reporter =
|
||||||
|
reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||||
|
|
||||||
|
platforms.insert(
|
||||||
|
platform.platform_identifier(),
|
||||||
|
TestPlatformInformation {
|
||||||
|
platform: *platform,
|
||||||
|
node,
|
||||||
|
compiler,
|
||||||
|
reporter,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(TestDefinition {
|
||||||
|
/* Metadata file information */
|
||||||
|
metadata: metadata_file,
|
||||||
|
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
||||||
|
|
||||||
|
/* Mode Information */
|
||||||
|
mode: mode.clone(),
|
||||||
|
|
||||||
|
/* Case Information */
|
||||||
|
case_idx: CaseIdx::new(case_idx),
|
||||||
|
case,
|
||||||
|
|
||||||
|
/* Platform and Node Assignment Information */
|
||||||
|
platforms,
|
||||||
|
|
||||||
|
/* Reporter */
|
||||||
|
reporter,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
)
|
||||||
|
// Filter out the test cases which are incompatible or that can't run in the current setup.
|
||||||
|
.filter_map(move |test| async move {
|
||||||
|
match test.check_compatibility(only_execute_failed_tests) {
|
||||||
|
Ok(()) => Some(test),
|
||||||
|
Err((reason, additional_information)) => {
|
||||||
|
debug!(
|
||||||
|
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
||||||
|
case_idx = %test.case_idx,
|
||||||
|
mode = %test.mode,
|
||||||
|
reason,
|
||||||
|
additional_information =
|
||||||
|
serde_json::to_string(&additional_information).unwrap(),
|
||||||
|
"Ignoring Test Case"
|
||||||
|
);
|
||||||
|
test.reporter
|
||||||
|
.report_test_ignored_event(
|
||||||
|
reason.to_string(),
|
||||||
|
additional_information
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.into(), v))
|
||||||
|
.collect::<IndexMap<_, _>>(),
|
||||||
|
)
|
||||||
|
.expect("Can't fail");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.inspect(|test| {
|
||||||
|
info!(
|
||||||
|
metadata_file_path = %test.metadata_file_path.display(),
|
||||||
|
case_idx = %test.case_idx,
|
||||||
|
mode = %test.mode,
|
||||||
|
"Created a test case definition"
|
||||||
|
);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is a full description of a differential test to run alongside the full metadata file, the
|
||||||
|
/// specific case to be tested, the platforms that the tests should run on, the specific nodes of
|
||||||
|
/// these platforms that they should run on, the compilers to use, and everything else needed making
|
||||||
|
/// it a complete description.
|
||||||
|
pub struct TestDefinition<'a> {
|
||||||
|
/* Metadata file information */
|
||||||
|
pub metadata: &'a MetadataFile,
|
||||||
|
pub metadata_file_path: &'a Path,
|
||||||
|
|
||||||
|
/* Mode Information */
|
||||||
|
pub mode: Cow<'a, Mode>,
|
||||||
|
|
||||||
|
/* Case Information */
|
||||||
|
pub case_idx: CaseIdx,
|
||||||
|
pub case: &'a Case,
|
||||||
|
|
||||||
|
/* Platform and Node Assignment Information */
|
||||||
|
pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>,
|
||||||
|
|
||||||
|
/* Reporter */
|
||||||
|
pub reporter: TestSpecificReporter,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> TestDefinition<'a> {
|
||||||
|
/// Checks if this test can be ran with the current configuration.
|
||||||
|
pub fn check_compatibility(
|
||||||
|
&self,
|
||||||
|
only_execute_failed_tests: Option<&Report>,
|
||||||
|
) -> TestCheckFunctionResult {
|
||||||
|
self.check_metadata_file_ignored()?;
|
||||||
|
self.check_case_file_ignored()?;
|
||||||
|
self.check_target_compatibility()?;
|
||||||
|
self.check_evm_version_compatibility()?;
|
||||||
|
self.check_compiler_compatibility()?;
|
||||||
|
self.check_ignore_succeeded(only_execute_failed_tests)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the metadata file is ignored or not.
|
||||||
|
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
||||||
|
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
||||||
|
Err(("Metadata file is ignored.", indexmap! {}))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the case file is ignored or not.
|
||||||
|
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
||||||
|
if self.case.ignore.is_some_and(|ignore| ignore) {
|
||||||
|
Err(("Case is ignored.", indexmap! {}))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the platforms all support the desired targets in the metadata file.
|
||||||
|
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
|
let mut error_map = indexmap! {
|
||||||
|
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||||
|
};
|
||||||
|
let mut is_allowed = true;
|
||||||
|
for (_, platform_information) in self.platforms.iter() {
|
||||||
|
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
||||||
|
None => true,
|
||||||
|
Some(required_vm_identifiers) => {
|
||||||
|
required_vm_identifiers.contains(&platform_information.platform.vm_identifier())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
is_allowed &= is_allowed_for_platform;
|
||||||
|
error_map.insert(
|
||||||
|
platform_information.platform.platform_identifier().into(),
|
||||||
|
json!(is_allowed_for_platform),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if is_allowed {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err((
|
||||||
|
"One of the platforms do do not support the targets allowed by the test.",
|
||||||
|
error_map,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checks for the compatibility of the EVM version with the platforms specified.
|
||||||
|
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
|
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut error_map = indexmap! {
|
||||||
|
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||||
|
};
|
||||||
|
let mut is_allowed = true;
|
||||||
|
for (_, platform_information) in self.platforms.iter() {
|
||||||
|
let is_allowed_for_platform =
|
||||||
|
evm_version_requirement.matches(&platform_information.node.evm_version());
|
||||||
|
is_allowed &= is_allowed_for_platform;
|
||||||
|
error_map.insert(
|
||||||
|
platform_information.platform.platform_identifier().into(),
|
||||||
|
json!(is_allowed_for_platform),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if is_allowed {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err((
|
||||||
|
"EVM version is incompatible for the platforms specified",
|
||||||
|
error_map,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the platforms compilers support the mode that the test is for.
|
||||||
|
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
|
let mut error_map = indexmap! {
|
||||||
|
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||||
|
};
|
||||||
|
let mut is_allowed = true;
|
||||||
|
for (_, platform_information) in self.platforms.iter() {
|
||||||
|
let is_allowed_for_platform = platform_information
|
||||||
|
.compiler
|
||||||
|
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||||
|
is_allowed &= is_allowed_for_platform;
|
||||||
|
error_map.insert(
|
||||||
|
platform_information.platform.platform_identifier().into(),
|
||||||
|
json!(is_allowed_for_platform),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if is_allowed {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err((
|
||||||
|
"Compilers do not support this mode either for the provided platforms.",
|
||||||
|
error_map,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if the test case should be executed or not based on the passed report and whether the
|
||||||
|
/// user has instructed the tool to ignore the already succeeding test cases.
|
||||||
|
fn check_ignore_succeeded(
|
||||||
|
&self,
|
||||||
|
only_execute_failed_tests: Option<&Report>,
|
||||||
|
) -> TestCheckFunctionResult {
|
||||||
|
let Some(report) = only_execute_failed_tests else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let test_case_status = report
|
||||||
|
.execution_information
|
||||||
|
.get(&(self.metadata_file_path.to_path_buf().into()))
|
||||||
|
.and_then(|obj| obj.case_reports.get(&self.case_idx))
|
||||||
|
.and_then(|obj| obj.mode_execution_reports.get(&self.mode))
|
||||||
|
.and_then(|obj| obj.status.as_ref());
|
||||||
|
|
||||||
|
match test_case_status {
|
||||||
|
Some(TestCaseStatus::Failed { .. }) => Ok(()),
|
||||||
|
Some(TestCaseStatus::Ignored { .. }) => Err((
|
||||||
|
"Ignored since it was ignored in a previous run",
|
||||||
|
indexmap! {},
|
||||||
|
)),
|
||||||
|
Some(TestCaseStatus::Succeeded { .. }) => {
|
||||||
|
Err(("Ignored since it succeeded in a prior run", indexmap! {}))
|
||||||
|
}
|
||||||
|
None => Ok(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TestPlatformInformation<'a> {
|
||||||
|
pub platform: &'a dyn Platform,
|
||||||
|
pub node: &'a dyn EthereumNode,
|
||||||
|
pub compiler: Box<dyn SolidityCompiler>,
|
||||||
|
pub reporter: ExecutionSpecificReporter,
|
||||||
|
}
|
||||||
|
|
||||||
|
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
|
||||||
+609
-27
@@ -3,45 +3,627 @@
|
|||||||
//! This crate defines the testing configuration and
|
//! This crate defines the testing configuration and
|
||||||
//! provides a helper utility to execute tests.
|
//! provides a helper utility to execute tests.
|
||||||
|
|
||||||
use revive_dt_compiler::{SolidityCompiler, revive_resolc, solc};
|
use std::{
|
||||||
use revive_dt_config::TestingPlatform;
|
pin::Pin,
|
||||||
use revive_dt_node::{geth, kitchensink::KitchensinkNode};
|
thread::{self, JoinHandle},
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy::genesis::Genesis;
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use revive_dt_common::types::*;
|
||||||
|
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||||
|
use revive_dt_config::*;
|
||||||
|
use revive_dt_node::{
|
||||||
|
Node,
|
||||||
|
node_implementations::{
|
||||||
|
geth::GethNode, lighthouse_geth::LighthouseGethNode,
|
||||||
|
polkadot_omni_node::PolkadotOmnichainNode, substrate::SubstrateNode,
|
||||||
|
zombienet::ZombienetNode,
|
||||||
|
},
|
||||||
|
};
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
pub mod common;
|
/// A trait that describes the interface for the platforms that are supported by the tool.
|
||||||
pub mod driver;
|
#[allow(clippy::type_complexity)]
|
||||||
|
|
||||||
/// One platform can be tested differentially against another.
|
|
||||||
///
|
|
||||||
/// For this we need a blockchain node implementation and a compiler.
|
|
||||||
pub trait Platform {
|
pub trait Platform {
|
||||||
type Blockchain: EthereumNode;
|
/// Returns the identifier of this platform. This is a combination of the node and the compiler
|
||||||
type Compiler: SolidityCompiler;
|
/// used.
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier;
|
||||||
|
|
||||||
/// Returns the matching [TestingPlatform] of the [revive_dt_config::Arguments].
|
/// Returns a full identifier for the platform.
|
||||||
fn config_id() -> TestingPlatform;
|
fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) {
|
||||||
|
(
|
||||||
|
self.node_identifier(),
|
||||||
|
self.vm_identifier(),
|
||||||
|
self.compiler_identifier(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the identifier of the node used.
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier;
|
||||||
|
|
||||||
|
/// Returns the identifier of the vm used.
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier;
|
||||||
|
|
||||||
|
/// Returns the identifier of the compiler used.
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier;
|
||||||
|
|
||||||
|
/// Creates a new node for the platform by spawning a new thread, creating the node object,
|
||||||
|
/// initializing it, spawning it, and waiting for it to start up.
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>>;
|
||||||
|
|
||||||
|
/// Creates a new compiler for the provided platform
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
||||||
|
|
||||||
|
/// Exports the genesis/chainspec for the node.
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
pub struct Geth;
|
pub struct GethEvmSolcPlatform;
|
||||||
|
|
||||||
impl Platform for Geth {
|
impl Platform for GethEvmSolcPlatform {
|
||||||
type Blockchain = geth::Instance;
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
type Compiler = solc::Solc;
|
PlatformIdentifier::GethEvmSolc
|
||||||
|
}
|
||||||
|
|
||||||
fn config_id() -> TestingPlatform {
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
TestingPlatform::Geth
|
NodeIdentifier::Geth
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::Evm
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Solc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||||
|
let node = GethNode::new(context, use_fallback_gas_filler);
|
||||||
|
let node = spawn_node::<GethNode>(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Solc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
let node_genesis = GethNode::node_genesis(genesis.clone(), &wallet);
|
||||||
|
serde_json::to_value(node_genesis)
|
||||||
|
.context("Failed to convert node genesis to a serde_value")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
pub struct Kitchensink;
|
pub struct LighthouseGethEvmSolcPlatform;
|
||||||
|
|
||||||
impl Platform for Kitchensink {
|
impl Platform for LighthouseGethEvmSolcPlatform {
|
||||||
type Blockchain = KitchensinkNode;
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
type Compiler = revive_resolc::Resolc;
|
PlatformIdentifier::LighthouseGethEvmSolc
|
||||||
|
}
|
||||||
|
|
||||||
fn config_id() -> TestingPlatform {
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
TestingPlatform::Kitchensink
|
NodeIdentifier::LighthouseGeth
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::Evm
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Solc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||||
|
let node = LighthouseGethNode::new(context, use_fallback_gas_filler);
|
||||||
|
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Solc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
let node_genesis = LighthouseGethNode::node_genesis(genesis.clone(), &wallet);
|
||||||
|
serde_json::to_value(node_genesis)
|
||||||
|
.context("Failed to convert node genesis to a serde_value")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct ReviveDevNodePolkavmResolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::ReviveDevNodePolkavmResolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::ReviveDevNode
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::PolkaVM
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Resolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
||||||
|
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
||||||
|
|
||||||
|
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
|
||||||
|
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
revive_dev_node_path,
|
||||||
|
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
Some(revive_dev_node_consensus),
|
||||||
|
context,
|
||||||
|
ð_rpc_connection_strings,
|
||||||
|
use_fallback_gas_filler,
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Resolc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
|
||||||
|
|
||||||
|
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct ReviveDevNodeRevmSolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for ReviveDevNodeRevmSolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::ReviveDevNodeRevmSolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::ReviveDevNode
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::Evm
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Solc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
||||||
|
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
||||||
|
|
||||||
|
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
|
||||||
|
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
revive_dev_node_path,
|
||||||
|
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
Some(revive_dev_node_consensus),
|
||||||
|
context,
|
||||||
|
ð_rpc_connection_strings,
|
||||||
|
use_fallback_gas_filler,
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Solc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
|
||||||
|
|
||||||
|
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct ZombienetPolkavmResolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for ZombienetPolkavmResolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::ZombienetPolkavmResolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Zombienet
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::PolkaVM
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Resolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||||
|
let node =
|
||||||
|
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Resolc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct ZombienetRevmSolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for ZombienetRevmSolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::ZombienetRevmSolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Zombienet
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::Evm
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Solc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||||
|
let node =
|
||||||
|
ZombienetNode::new(polkadot_parachain_path, context, use_fallback_gas_filler);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Solc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct PolkadotOmniNodePolkavmResolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for PolkadotOmniNodePolkavmResolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::PolkadotOmniNodePolkavmResolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::PolkadotOmniNode
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::PolkaVM
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Resolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||||
|
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Resolc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let polkadot_omnichain_node_configuration =
|
||||||
|
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
PolkadotOmnichainNode::node_genesis(
|
||||||
|
&wallet,
|
||||||
|
polkadot_omnichain_node_configuration
|
||||||
|
.chain_spec_path
|
||||||
|
.as_ref()
|
||||||
|
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct PolkadotOmniNodeRevmSolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for PolkadotOmniNodeRevmSolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::PolkadotOmniNodeRevmSolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::PolkadotOmniNode
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::Evm
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Solc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let use_fallback_gas_filler = matches!(context, Context::Test(..));
|
||||||
|
let node = PolkadotOmnichainNode::new(context, use_fallback_gas_filler);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Solc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let polkadot_omnichain_node_configuration =
|
||||||
|
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
PolkadotOmnichainNode::node_genesis(
|
||||||
|
&wallet,
|
||||||
|
polkadot_omnichain_node_configuration
|
||||||
|
.chain_spec_path
|
||||||
|
.as_ref()
|
||||||
|
.context("No WASM runtime path found in the polkadot-omni-node configuration")?,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
||||||
|
fn from(value: PlatformIdentifier) -> Self {
|
||||||
|
match value {
|
||||||
|
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
|
||||||
|
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||||
|
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||||
|
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||||
|
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ZombienetPolkavmResolc => {
|
||||||
|
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
|
||||||
|
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
|
||||||
|
Box::new(PolkadotOmniNodePolkavmResolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
|
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
|
||||||
|
Box::new(PolkadotOmniNodeRevmSolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<PlatformIdentifier> for &dyn Platform {
|
||||||
|
fn from(value: PlatformIdentifier) -> Self {
|
||||||
|
match value {
|
||||||
|
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
|
||||||
|
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||||
|
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||||
|
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||||
|
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ZombienetPolkavmResolc => {
|
||||||
|
&ZombienetPolkavmResolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
|
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
|
||||||
|
PlatformIdentifier::PolkadotOmniNodePolkavmResolc => {
|
||||||
|
&PolkadotOmniNodePolkavmResolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
|
PlatformIdentifier::PolkadotOmniNodeRevmSolc => {
|
||||||
|
&PolkadotOmniNodeRevmSolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spawn_node<T: Node + EthereumNode + Send + Sync>(
|
||||||
|
mut node: T,
|
||||||
|
genesis: Genesis,
|
||||||
|
) -> anyhow::Result<T> {
|
||||||
|
info!(
|
||||||
|
id = node.id(),
|
||||||
|
connection_string = node.connection_string(),
|
||||||
|
"Spawning node"
|
||||||
|
);
|
||||||
|
node.spawn(genesis)
|
||||||
|
.context("Failed to spawn node process")?;
|
||||||
|
info!(
|
||||||
|
id = node.id(),
|
||||||
|
connection_string = node.connection_string(),
|
||||||
|
"Spawned node"
|
||||||
|
);
|
||||||
|
Ok(node)
|
||||||
|
}
|
||||||
|
|||||||
+101
-152
@@ -1,178 +1,127 @@
|
|||||||
use std::{collections::HashMap, sync::LazyLock};
|
mod differential_benchmarks;
|
||||||
|
mod differential_tests;
|
||||||
|
mod helpers;
|
||||||
|
|
||||||
|
use anyhow::{Context as _, bail};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use rayon::{ThreadPoolBuilder, prelude::*};
|
use revive_dt_report::{ReportAggregator, TestCaseStatus};
|
||||||
|
use schemars::schema_for;
|
||||||
use revive_dt_config::*;
|
use tracing::{info, level_filters::LevelFilter};
|
||||||
use revive_dt_core::{
|
|
||||||
Geth, Kitchensink, Platform,
|
|
||||||
driver::{Driver, State},
|
|
||||||
};
|
|
||||||
use revive_dt_format::{corpus::Corpus, metadata::MetadataFile};
|
|
||||||
use revive_dt_node::pool::NodePool;
|
|
||||||
use revive_dt_report::reporter::{Report, Span};
|
|
||||||
use temp_dir::TempDir;
|
|
||||||
use tracing::Level;
|
|
||||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||||
|
|
||||||
static TEMP_DIR: LazyLock<TempDir> = LazyLock::new(|| TempDir::new().unwrap());
|
use revive_dt_config::Context;
|
||||||
|
use revive_dt_core::Platform;
|
||||||
|
use revive_dt_format::metadata::Metadata;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
differential_benchmarks::handle_differential_benchmarks,
|
||||||
|
differential_tests::handle_differential_tests,
|
||||||
|
};
|
||||||
|
|
||||||
fn main() -> anyhow::Result<()> {
|
fn main() -> anyhow::Result<()> {
|
||||||
let args = init_cli()?;
|
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||||
|
.lossy(false)
|
||||||
|
// Assuming that each line contains 255 characters and that each character is one byte, then
|
||||||
|
// this means that our buffer is about 4GBs large.
|
||||||
|
.buffered_lines_limit(0x1000000)
|
||||||
|
.thread_name("buffered writer")
|
||||||
|
.finish(std::io::stdout());
|
||||||
|
|
||||||
for (corpus, tests) in collect_corpora(&args)? {
|
|
||||||
let span = Span::new(corpus, args.clone())?;
|
|
||||||
|
|
||||||
match &args.compile_only {
|
|
||||||
Some(platform) => compile_corpus(&args, &tests, platform, span),
|
|
||||||
None => execute_corpus(&args, &tests, span)?,
|
|
||||||
}
|
|
||||||
|
|
||||||
Report::save()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn init_cli() -> anyhow::Result<Arguments> {
|
|
||||||
let subscriber = FmtSubscriber::builder()
|
let subscriber = FmtSubscriber::builder()
|
||||||
.with_thread_ids(true)
|
.with_writer(writer)
|
||||||
.with_thread_names(true)
|
.with_thread_ids(false)
|
||||||
.with_env_filter(EnvFilter::from_default_env())
|
.with_thread_names(false)
|
||||||
|
.with_env_filter(
|
||||||
|
EnvFilter::builder()
|
||||||
|
.with_default_directive(LevelFilter::OFF.into())
|
||||||
|
.from_env_lossy(),
|
||||||
|
)
|
||||||
.with_ansi(false)
|
.with_ansi(false)
|
||||||
.pretty()
|
.pretty()
|
||||||
.finish();
|
.finish();
|
||||||
tracing::subscriber::set_global_default(subscriber)?;
|
tracing::subscriber::set_global_default(subscriber)?;
|
||||||
|
info!("Differential testing tool is starting");
|
||||||
|
|
||||||
let mut args = Arguments::parse();
|
let mut context = Context::try_parse()?;
|
||||||
|
context.update_for_profile();
|
||||||
|
|
||||||
if args.corpus.is_empty() {
|
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
||||||
anyhow::bail!("no test corpus specified");
|
|
||||||
}
|
|
||||||
|
|
||||||
match args.working_directory.as_ref() {
|
match context {
|
||||||
Some(dir) => {
|
Context::Test(context) => tokio::runtime::Builder::new_multi_thread()
|
||||||
if !dir.exists() {
|
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||||
anyhow::bail!("workdir {} does not exist", dir.display());
|
.enable_all()
|
||||||
}
|
.build()
|
||||||
}
|
.expect("Failed building the Runtime")
|
||||||
None => {
|
.block_on(async move {
|
||||||
args.temp_dir = Some(&TEMP_DIR);
|
let differential_tests_handling_task =
|
||||||
}
|
handle_differential_tests(*context, reporter);
|
||||||
}
|
|
||||||
tracing::info!("workdir: {}", args.directory().display());
|
|
||||||
|
|
||||||
ThreadPoolBuilder::new()
|
let (_, report) = futures::future::try_join(
|
||||||
.num_threads(args.workers)
|
differential_tests_handling_task,
|
||||||
.build_global()?;
|
report_aggregator_task,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(args)
|
let contains_failure = report
|
||||||
}
|
.execution_information
|
||||||
|
.values()
|
||||||
|
.flat_map(|values| values.case_reports.values())
|
||||||
|
.flat_map(|values| values.mode_execution_reports.values())
|
||||||
|
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
|
||||||
|
|
||||||
fn collect_corpora(args: &Arguments) -> anyhow::Result<HashMap<Corpus, Vec<MetadataFile>>> {
|
if contains_failure {
|
||||||
let mut corpora = HashMap::new();
|
bail!("Some tests failed")
|
||||||
|
|
||||||
for path in &args.corpus {
|
|
||||||
let corpus = Corpus::try_from_path(path)?;
|
|
||||||
tracing::info!("found corpus: {}", path.display());
|
|
||||||
let tests = corpus.enumerate_tests();
|
|
||||||
tracing::info!("corpus '{}' contains {} tests", &corpus.name, tests.len());
|
|
||||||
corpora.insert(corpus, tests);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(corpora)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_driver<L, F>(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyhow::Result<()>
|
|
||||||
where
|
|
||||||
L: Platform,
|
|
||||||
F: Platform,
|
|
||||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
|
||||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
|
||||||
{
|
|
||||||
let leader_nodes = NodePool::<L::Blockchain>::new(args)?;
|
|
||||||
let follower_nodes = NodePool::<F::Blockchain>::new(args)?;
|
|
||||||
|
|
||||||
tests.par_iter().for_each(
|
|
||||||
|MetadataFile {
|
|
||||||
content: metadata,
|
|
||||||
path: metadata_file_path,
|
|
||||||
}| {
|
|
||||||
// Starting a new tracing span for this metadata file. This allows our logs to be clear
|
|
||||||
// about which metadata file the logs belong to. We can add other information into this
|
|
||||||
// as well to be able to associate the logs with the correct metadata file and case
|
|
||||||
// that's being executed.
|
|
||||||
let tracing_span = tracing::span!(
|
|
||||||
Level::INFO,
|
|
||||||
"Running driver",
|
|
||||||
metadata_file_path = metadata_file_path.display().to_string(),
|
|
||||||
);
|
|
||||||
let _guard = tracing_span.enter();
|
|
||||||
|
|
||||||
let mut driver = Driver::<L, F>::new(
|
|
||||||
metadata,
|
|
||||||
args,
|
|
||||||
leader_nodes.round_robbin(),
|
|
||||||
follower_nodes.round_robbin(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let execution_result = driver.execute(span);
|
|
||||||
tracing::info!(
|
|
||||||
case_success_count = execution_result.successful_cases_count,
|
|
||||||
case_failure_count = execution_result.failed_cases_count,
|
|
||||||
"Execution completed"
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut error_count = 0;
|
|
||||||
for result in execution_result.results.iter() {
|
|
||||||
if !result.is_success() {
|
|
||||||
tracing::error!(execution_error = ?result, "Encountered an error");
|
|
||||||
error_count += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if error_count == 0 {
|
|
||||||
tracing::info!("Execution succeeded");
|
|
||||||
} else {
|
|
||||||
tracing::info!("Execution failed");
|
|
||||||
}
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn execute_corpus(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyhow::Result<()> {
|
|
||||||
match (&args.leader, &args.follower) {
|
|
||||||
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => {
|
|
||||||
run_driver::<Geth, Kitchensink>(args, tests, span)?
|
|
||||||
}
|
|
||||||
(TestingPlatform::Geth, TestingPlatform::Geth) => {
|
|
||||||
run_driver::<Geth, Geth>(args, tests, span)?
|
|
||||||
}
|
|
||||||
_ => unimplemented!(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}),
|
||||||
|
Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread()
|
||||||
|
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.expect("Failed building the Runtime")
|
||||||
|
.block_on(async move {
|
||||||
|
let differential_benchmarks_handling_task =
|
||||||
|
handle_differential_benchmarks(*context, reporter);
|
||||||
|
|
||||||
fn compile_corpus(
|
let (_, report) = futures::future::try_join(
|
||||||
config: &Arguments,
|
differential_benchmarks_handling_task,
|
||||||
tests: &[MetadataFile],
|
report_aggregator_task,
|
||||||
platform: &TestingPlatform,
|
)
|
||||||
span: Span,
|
.await?;
|
||||||
) {
|
|
||||||
tests.par_iter().for_each(|metadata| {
|
let contains_failure = report
|
||||||
for mode in &metadata.solc_modes() {
|
.execution_information
|
||||||
match platform {
|
.values()
|
||||||
TestingPlatform::Geth => {
|
.flat_map(|values| values.case_reports.values())
|
||||||
let mut state = State::<Geth>::new(config, span);
|
.flat_map(|values| values.mode_execution_reports.values())
|
||||||
let _ = state.build_contracts(mode, metadata);
|
.any(|report| matches!(report.status, Some(TestCaseStatus::Failed { .. })));
|
||||||
|
|
||||||
|
if contains_failure {
|
||||||
|
bail!("Some benchmarks failed")
|
||||||
}
|
}
|
||||||
TestingPlatform::Kitchensink => {
|
|
||||||
let mut state = State::<Kitchensink>::new(config, span);
|
Ok(())
|
||||||
let _ = state.build_contracts(mode, metadata);
|
}),
|
||||||
|
Context::ExportGenesis(ref export_genesis_context) => {
|
||||||
|
let platform = Into::<&dyn Platform>::into(export_genesis_context.platform);
|
||||||
|
let genesis = platform.export_genesis(context)?;
|
||||||
|
let genesis_json = serde_json::to_string_pretty(&genesis)
|
||||||
|
.context("Failed to serialize the genesis to JSON")?;
|
||||||
|
println!("{genesis_json}");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Context::ExportJsonSchema => {
|
||||||
|
let schema = schema_for!(Metadata);
|
||||||
|
println!(
|
||||||
|
"{}",
|
||||||
|
serde_json::to_string_pretty(&schema)
|
||||||
|
.context("Failed to export the JSON schema")?
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
};
|
|
||||||
}
|
}
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,13 +9,22 @@ repository.workspace = true
|
|||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
revive-dt-node-interaction = { workspace = true }
|
revive-dt-common = { workspace = true }
|
||||||
|
|
||||||
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
alloy-primitives = { workspace = true }
|
|
||||||
alloy-sol-types = { workspace = true }
|
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
futures = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
|
schemars = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true, features = ["derive"] }
|
serde = { workspace = true, features = ["derive"] }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
itertools = { workspace = true }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
+121
-7
@@ -1,18 +1,132 @@
|
|||||||
use serde::Deserialize;
|
use alloy::primitives::Address;
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::{define_wrapper_type, input::Input, mode::Mode};
|
use revive_dt_common::{
|
||||||
|
macros::define_wrapper_type,
|
||||||
|
types::{Mode, ParsedMode},
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
use crate::steps::*;
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
||||||
pub struct Case {
|
pub struct Case {
|
||||||
|
/// An optional name of the test case.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub name: Option<String>,
|
pub name: Option<String>,
|
||||||
|
|
||||||
|
/// An optional comment on the case which has no impact on the execution in any way.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
pub modes: Option<Vec<Mode>>,
|
|
||||||
pub inputs: Vec<Input>,
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
|
///
|
||||||
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// If this is provided then it takes higher priority than the modes specified in the metadata
|
||||||
|
/// file.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub modes: Option<Vec<ParsedMode>>,
|
||||||
|
|
||||||
|
/// The set of steps to run as part of this test case.
|
||||||
|
#[serde(rename = "inputs")]
|
||||||
|
pub steps: Vec<Step>,
|
||||||
|
|
||||||
|
/// An optional name of the group of tests that this test belongs to.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub group: Option<String>,
|
pub group: Option<String>,
|
||||||
|
|
||||||
|
/// An optional set of expectations and assertions to make about the transaction after it ran.
|
||||||
|
///
|
||||||
|
/// If this is not specified then the only assertion that will be ran is that the transaction
|
||||||
|
/// was successful.
|
||||||
|
///
|
||||||
|
/// This expectation that's on the case itself will be attached to the final step of the case.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub expected: Option<Expected>,
|
||||||
|
|
||||||
|
/// An optional boolean which defines if the case as a whole should be ignored. If null then the
|
||||||
|
/// case will not be ignored.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ignore: Option<bool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Case {
|
||||||
|
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
|
||||||
|
let steps_len = self.steps.len();
|
||||||
|
self.steps
|
||||||
|
.clone()
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(move |(idx, mut step)| {
|
||||||
|
let Step::FunctionCall(ref mut input) = step else {
|
||||||
|
return step;
|
||||||
|
};
|
||||||
|
|
||||||
|
if idx + 1 == steps_len {
|
||||||
|
if input.expected.is_none() {
|
||||||
|
input.expected = self.expected.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: What does it mean for us to have an `expected` field on the case itself
|
||||||
|
// but the final input also has an expected field that doesn't match the one on
|
||||||
|
// the case? What are we supposed to do with that final expected field on the
|
||||||
|
// case?
|
||||||
|
|
||||||
|
step
|
||||||
|
} else {
|
||||||
|
step
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn steps_iterator_for_benchmarks(
|
||||||
|
&self,
|
||||||
|
default_repeat_count: usize,
|
||||||
|
) -> Box<dyn Iterator<Item = Step> + '_> {
|
||||||
|
let contains_repeat = self
|
||||||
|
.steps_iterator()
|
||||||
|
.any(|step| matches!(&step, Step::Repeat(..)));
|
||||||
|
if contains_repeat {
|
||||||
|
Box::new(self.steps_iterator()) as Box<_>
|
||||||
|
} else {
|
||||||
|
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
|
||||||
|
comment: None,
|
||||||
|
repeat: default_repeat_count,
|
||||||
|
steps: self.steps_iterator().collect(),
|
||||||
|
})))) as Box<_>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||||
|
match &self.modes {
|
||||||
|
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||||
|
None => Mode::all().cloned().collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deployer_address(&self) -> Address {
|
||||||
|
self.steps
|
||||||
|
.iter()
|
||||||
|
.filter_map(|step| match step {
|
||||||
|
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
||||||
|
Step::BalanceAssertion(..) => None,
|
||||||
|
Step::StorageEmptyAssertion(..) => None,
|
||||||
|
Step::Repeat(..) => None,
|
||||||
|
Step::AllocateAccount(..) => None,
|
||||||
|
})
|
||||||
|
.next()
|
||||||
|
.unwrap_or(FunctionCallStep::default_caller_address())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
/// A wrapper type for the index of test cases found in metadata file.
|
/// A wrapper type for the index of test cases found in metadata file.
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
CaseIdx(usize);
|
#[serde(transparent)]
|
||||||
|
pub struct CaseIdx(usize) impl Display, FromStr;
|
||||||
);
|
);
|
||||||
|
|||||||
+186
-53
@@ -1,67 +1,200 @@
|
|||||||
use std::{
|
use std::{
|
||||||
fs::File,
|
borrow::Cow,
|
||||||
|
collections::HashMap,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use itertools::Itertools;
|
||||||
|
use revive_dt_common::{
|
||||||
|
iterators::{EitherIter, FilesWithExtensionIterator},
|
||||||
|
types::{Mode, ParsedMode, ParsedTestSpecifier},
|
||||||
|
};
|
||||||
|
use tracing::{debug, warn};
|
||||||
|
|
||||||
use crate::metadata::MetadataFile;
|
use crate::{
|
||||||
|
case::{Case, CaseIdx},
|
||||||
|
metadata::{Metadata, MetadataFile},
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, Hash)]
|
#[derive(Default)]
|
||||||
pub struct Corpus {
|
pub struct Corpus {
|
||||||
pub name: String,
|
test_specifiers: HashMap<ParsedTestSpecifier, Vec<PathBuf>>,
|
||||||
pub path: PathBuf,
|
metadata_files: HashMap<PathBuf, MetadataFile>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Corpus {
|
impl Corpus {
|
||||||
/// Try to read and parse the corpus definition file at given `path`.
|
pub fn new() -> Self {
|
||||||
pub fn try_from_path(path: &Path) -> anyhow::Result<Self> {
|
Default::default()
|
||||||
let file = File::open(path)?;
|
|
||||||
Ok(serde_json::from_reader(file)?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Scan the corpus base directory and return all tests found.
|
pub fn with_test_specifier(
|
||||||
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
|
mut self,
|
||||||
let mut tests = Vec::new();
|
test_specifier: ParsedTestSpecifier,
|
||||||
collect_metadata(&self.path, &mut tests);
|
) -> anyhow::Result<Self> {
|
||||||
|
match &test_specifier {
|
||||||
|
ParsedTestSpecifier::FileOrDirectory {
|
||||||
|
metadata_or_directory_file_path: metadata_file_path,
|
||||||
|
}
|
||||||
|
| ParsedTestSpecifier::Case {
|
||||||
|
metadata_file_path, ..
|
||||||
|
}
|
||||||
|
| ParsedTestSpecifier::CaseWithMode {
|
||||||
|
metadata_file_path, ..
|
||||||
|
} => {
|
||||||
|
let metadata_files = enumerate_metadata_files(metadata_file_path);
|
||||||
|
self.test_specifiers.insert(
|
||||||
|
test_specifier,
|
||||||
|
metadata_files
|
||||||
|
.iter()
|
||||||
|
.map(|metadata_file| metadata_file.metadata_file_path.clone())
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
for metadata_file in metadata_files.into_iter() {
|
||||||
|
self.metadata_files
|
||||||
|
.insert(metadata_file.metadata_file_path.clone(), metadata_file);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cases_iterator(
|
||||||
|
&self,
|
||||||
|
) -> impl Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_ {
|
||||||
|
let mut iterator = Box::new(std::iter::empty())
|
||||||
|
as Box<dyn Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_>;
|
||||||
|
|
||||||
|
for (test_specifier, metadata_file_paths) in self.test_specifiers.iter() {
|
||||||
|
for metadata_file_path in metadata_file_paths {
|
||||||
|
let metadata_file = self
|
||||||
|
.metadata_files
|
||||||
|
.get(metadata_file_path)
|
||||||
|
.expect("Must succeed");
|
||||||
|
|
||||||
|
match test_specifier {
|
||||||
|
ParsedTestSpecifier::FileOrDirectory { .. } => {
|
||||||
|
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
|
||||||
|
let case_idx = CaseIdx::new(case_idx);
|
||||||
|
|
||||||
|
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||||
|
let modes = match modes {
|
||||||
|
Some(modes) => EitherIter::A(
|
||||||
|
ParsedMode::many_to_modes(modes.iter())
|
||||||
|
.map(Cow::<'static, _>::Owned),
|
||||||
|
),
|
||||||
|
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||||
|
};
|
||||||
|
|
||||||
|
iterator = Box::new(
|
||||||
|
iterator.chain(
|
||||||
|
modes
|
||||||
|
.into_iter()
|
||||||
|
.map(move |mode| (metadata_file, case_idx, case, mode)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ParsedTestSpecifier::Case { case_idx, .. } => {
|
||||||
|
let Some(case) = metadata_file.cases.get(*case_idx) else {
|
||||||
|
warn!(
|
||||||
|
test_specifier = %test_specifier,
|
||||||
|
metadata_file_path = %metadata_file_path.display(),
|
||||||
|
case_idx = case_idx,
|
||||||
|
case_count = metadata_file.cases.len(),
|
||||||
|
"Specified case not found in metadata file"
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let case_idx = CaseIdx::new(*case_idx);
|
||||||
|
|
||||||
|
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||||
|
let modes = match modes {
|
||||||
|
Some(modes) => EitherIter::A(
|
||||||
|
ParsedMode::many_to_modes(modes.iter())
|
||||||
|
.map(Cow::<'static, Mode>::Owned),
|
||||||
|
),
|
||||||
|
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||||
|
};
|
||||||
|
|
||||||
|
iterator = Box::new(
|
||||||
|
iterator.chain(
|
||||||
|
modes
|
||||||
|
.into_iter()
|
||||||
|
.map(move |mode| (metadata_file, case_idx, case, mode)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
ParsedTestSpecifier::CaseWithMode { case_idx, mode, .. } => {
|
||||||
|
let Some(case) = metadata_file.cases.get(*case_idx) else {
|
||||||
|
warn!(
|
||||||
|
test_specifier = %test_specifier,
|
||||||
|
metadata_file_path = %metadata_file_path.display(),
|
||||||
|
case_idx = case_idx,
|
||||||
|
case_count = metadata_file.cases.len(),
|
||||||
|
"Specified case not found in metadata file"
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let case_idx = CaseIdx::new(*case_idx);
|
||||||
|
|
||||||
|
let mode = Cow::Borrowed(mode);
|
||||||
|
iterator = Box::new(iterator.chain(std::iter::once((
|
||||||
|
metadata_file,
|
||||||
|
case_idx,
|
||||||
|
case,
|
||||||
|
mode,
|
||||||
|
))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator.unique_by(|item| (&item.0.metadata_file_path, item.1, item.3.clone()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn metadata_file_count(&self) -> usize {
|
||||||
|
self.metadata_files.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn enumerate_metadata_files(path: impl AsRef<Path>) -> Vec<MetadataFile> {
|
||||||
|
let root_path = path.as_ref();
|
||||||
|
let mut tests = if !root_path.is_dir() {
|
||||||
|
Box::new(std::iter::once(root_path.to_path_buf())) as Box<dyn Iterator<Item = _>>
|
||||||
|
} else {
|
||||||
|
Box::new(
|
||||||
|
FilesWithExtensionIterator::new(root_path)
|
||||||
|
.with_use_cached_fs(true)
|
||||||
|
.with_allowed_extension("sol")
|
||||||
|
.with_allowed_extension("json"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
||||||
|
.filter_map(|(root_path, metadata_file_path)| {
|
||||||
|
Metadata::try_from_file(&metadata_file_path)
|
||||||
|
.or_else(|| {
|
||||||
|
debug!(
|
||||||
|
discovered_from = %root_path.display(),
|
||||||
|
metadata_file_path = %metadata_file_path.display(),
|
||||||
|
"Skipping file since it doesn't contain valid metadata"
|
||||||
|
);
|
||||||
|
None
|
||||||
|
})
|
||||||
|
.map(|metadata| MetadataFile {
|
||||||
|
metadata_file_path,
|
||||||
|
corpus_file_path: root_path.to_path_buf(),
|
||||||
|
content: metadata,
|
||||||
|
})
|
||||||
|
.inspect(|metadata_file| {
|
||||||
|
debug!(
|
||||||
|
metadata_file_path = %metadata_file.relative_path().display(),
|
||||||
|
"Loaded metadata file"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||||
|
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||||
tests
|
tests
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Recursively walks `path` and parses any JSON or Solidity file into a test
|
|
||||||
/// definition [Metadata].
|
|
||||||
///
|
|
||||||
/// Found tests are inserted into `tests`.
|
|
||||||
///
|
|
||||||
/// `path` is expected to be a directory.
|
|
||||||
pub fn collect_metadata(path: &Path, tests: &mut Vec<MetadataFile>) {
|
|
||||||
let dir_entry = match std::fs::read_dir(path) {
|
|
||||||
Ok(dir_entry) => dir_entry,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!("failed to read dir '{}': {error}", path.display());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
for entry in dir_entry {
|
|
||||||
let entry = match entry {
|
|
||||||
Ok(entry) => entry,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!("error reading dir entry: {error}");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let path = entry.path();
|
|
||||||
if path.is_dir() {
|
|
||||||
collect_metadata(&path, tests);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if path.is_file() {
|
|
||||||
if let Some(metadata) = MetadataFile::try_from_file(&path) {
|
|
||||||
tests.push(metadata)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,607 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use alloy::{
|
|
||||||
eips::BlockNumberOrTag,
|
|
||||||
json_abi::JsonAbi,
|
|
||||||
network::TransactionBuilder,
|
|
||||||
primitives::{Address, Bytes, U256},
|
|
||||||
rpc::types::TransactionRequest,
|
|
||||||
};
|
|
||||||
use semver::VersionReq;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
|
||||||
|
|
||||||
use crate::metadata::ContractInstance;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq)]
|
|
||||||
pub struct Input {
|
|
||||||
#[serde(default = "default_caller")]
|
|
||||||
pub caller: Address,
|
|
||||||
pub comment: Option<String>,
|
|
||||||
#[serde(default = "default_instance")]
|
|
||||||
pub instance: ContractInstance,
|
|
||||||
pub method: Method,
|
|
||||||
pub calldata: Option<Calldata>,
|
|
||||||
pub expected: Option<Expected>,
|
|
||||||
pub value: Option<String>,
|
|
||||||
pub storage: Option<HashMap<String, Calldata>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Eq, PartialEq)]
|
|
||||||
#[serde(untagged)]
|
|
||||||
pub enum Expected {
|
|
||||||
Calldata(Calldata),
|
|
||||||
Expected(ExpectedOutput),
|
|
||||||
ExpectedMany(Vec<ExpectedOutput>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq)]
|
|
||||||
pub struct ExpectedOutput {
|
|
||||||
compiler_version: Option<VersionReq>,
|
|
||||||
return_data: Option<Calldata>,
|
|
||||||
events: Option<Value>,
|
|
||||||
exception: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Eq, PartialEq)]
|
|
||||||
#[serde(untagged)]
|
|
||||||
pub enum Calldata {
|
|
||||||
Single(String),
|
|
||||||
Compound(Vec<String>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Specify how the contract is called.
|
|
||||||
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
|
||||||
pub enum Method {
|
|
||||||
/// Initiate a deploy transaction, calling contracts constructor.
|
|
||||||
///
|
|
||||||
/// Indicated by `#deployer`.
|
|
||||||
#[serde(rename = "#deployer")]
|
|
||||||
Deployer,
|
|
||||||
|
|
||||||
/// Does not calculate and insert a function selector.
|
|
||||||
///
|
|
||||||
/// Indicated by `#fallback`.
|
|
||||||
#[default]
|
|
||||||
#[serde(rename = "#fallback")]
|
|
||||||
Fallback,
|
|
||||||
|
|
||||||
/// Call the public function with the given name.
|
|
||||||
#[serde(untagged)]
|
|
||||||
FunctionName(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Calldata {
|
|
||||||
pub fn find_all_contract_instances(&self, vec: &mut Vec<ContractInstance>) {
|
|
||||||
if let Calldata::Compound(compound) = self {
|
|
||||||
for item in compound {
|
|
||||||
if let Some(instance) = item.strip_suffix(".address") {
|
|
||||||
vec.push(ContractInstance::new_from(instance))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExpectedOutput {
|
|
||||||
pub fn find_all_contract_instances(&self, vec: &mut Vec<ContractInstance>) {
|
|
||||||
if let Some(ref cd) = self.return_data {
|
|
||||||
cd.find_all_contract_instances(vec);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Input {
|
|
||||||
fn instance_to_address(
|
|
||||||
&self,
|
|
||||||
instance: &ContractInstance,
|
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
|
||||||
) -> anyhow::Result<Address> {
|
|
||||||
deployed_contracts
|
|
||||||
.get(instance)
|
|
||||||
.map(|(a, _)| *a)
|
|
||||||
.ok_or_else(|| anyhow::anyhow!("instance {instance:?} not deployed"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn encoded_input(
|
|
||||||
&self,
|
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
|
||||||
chain_state_provider: &impl EthereumNode,
|
|
||||||
) -> anyhow::Result<Bytes> {
|
|
||||||
match self.method {
|
|
||||||
Method::Deployer | Method::Fallback => {
|
|
||||||
let calldata_args = match &self.calldata {
|
|
||||||
Some(Calldata::Compound(args)) => args,
|
|
||||||
_ => anyhow::bail!("Expected compound calldata for function call"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut calldata = Vec::<u8>::with_capacity(calldata_args.len() * 32);
|
|
||||||
for (arg_idx, arg) in calldata_args.iter().enumerate() {
|
|
||||||
match resolve_argument(arg, deployed_contracts, chain_state_provider) {
|
|
||||||
Ok(resolved) => {
|
|
||||||
calldata.extend(resolved.to_be_bytes::<32>());
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(arg, arg_idx, ?error, "Failed to resolve argument");
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(calldata.into())
|
|
||||||
}
|
|
||||||
Method::FunctionName(ref function_name) => {
|
|
||||||
let Some(abi) = deployed_contracts.get(&self.instance).map(|(_, a)| a) else {
|
|
||||||
tracing::error!(
|
|
||||||
contract_name = self.instance.as_ref(),
|
|
||||||
available_abis = ?deployed_contracts.keys().collect::<Vec<_>>(),
|
|
||||||
"Attempted to lookup ABI of contract but it wasn't found"
|
|
||||||
);
|
|
||||||
anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref());
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::trace!("ABI found for instance: {}", &self.instance.as_ref());
|
|
||||||
|
|
||||||
// We follow the same logic that's implemented in the matter-labs-tester where they resolve
|
|
||||||
// the function name into a function selector and they assume that he function doesn't have
|
|
||||||
// any existing overloads.
|
|
||||||
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
|
|
||||||
let function = abi
|
|
||||||
.functions()
|
|
||||||
.find(|function| function.name.starts_with(function_name))
|
|
||||||
.ok_or_else(|| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"Function with name {:?} not found in ABI for the instance {:?}",
|
|
||||||
function_name,
|
|
||||||
&self.instance
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
tracing::trace!("Functions found for instance: {}", self.instance.as_ref());
|
|
||||||
|
|
||||||
let calldata_args = match &self.calldata {
|
|
||||||
Some(Calldata::Compound(args)) => args,
|
|
||||||
_ => anyhow::bail!("Expected compound calldata for function call"),
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::trace!(
|
|
||||||
"Starting encoding ABI's parameters for instance: {}",
|
|
||||||
self.instance.as_ref()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Allocating a vector that we will be using for the calldata. The vector size will be:
|
|
||||||
// 4 bytes for the function selector.
|
|
||||||
// function.inputs.len() * 32 bytes for the arguments (each argument is a U256).
|
|
||||||
//
|
|
||||||
// We're using indices in the following code in order to avoid the need for us to allocate
|
|
||||||
// a new buffer for each one of the resolved arguments.
|
|
||||||
let mut calldata = Vec::<u8>::with_capacity(4 + calldata_args.len() * 32);
|
|
||||||
calldata.extend(function.selector().0);
|
|
||||||
|
|
||||||
for (arg_idx, arg) in calldata_args.iter().enumerate() {
|
|
||||||
match resolve_argument(arg, deployed_contracts, chain_state_provider) {
|
|
||||||
Ok(resolved) => {
|
|
||||||
calldata.extend(resolved.to_be_bytes::<32>());
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(arg, arg_idx, ?error, "Failed to resolve argument");
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(calldata.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse this input into a legacy transaction.
|
|
||||||
pub fn legacy_transaction(
|
|
||||||
&self,
|
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
|
||||||
chain_state_provider: &impl EthereumNode,
|
|
||||||
) -> anyhow::Result<TransactionRequest> {
|
|
||||||
let input_data = self.encoded_input(deployed_contracts, chain_state_provider)?;
|
|
||||||
let transaction_request = TransactionRequest::default();
|
|
||||||
match self.method {
|
|
||||||
Method::Deployer => Ok(transaction_request.with_deploy_code(input_data)),
|
|
||||||
_ => Ok(transaction_request
|
|
||||||
.to(self.instance_to_address(&self.instance, deployed_contracts)?)
|
|
||||||
.input(input_data.into())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_all_contract_instances(&self) -> Vec<ContractInstance> {
|
|
||||||
let mut vec = Vec::new();
|
|
||||||
vec.push(self.instance.clone());
|
|
||||||
|
|
||||||
if let Some(ref cd) = self.calldata {
|
|
||||||
cd.find_all_contract_instances(&mut vec);
|
|
||||||
}
|
|
||||||
match &self.expected {
|
|
||||||
Some(Expected::Calldata(cd)) => {
|
|
||||||
cd.find_all_contract_instances(&mut vec);
|
|
||||||
}
|
|
||||||
Some(Expected::Expected(expected)) => {
|
|
||||||
expected.find_all_contract_instances(&mut vec);
|
|
||||||
}
|
|
||||||
Some(Expected::ExpectedMany(expected)) => {
|
|
||||||
for expected in expected {
|
|
||||||
expected.find_all_contract_instances(&mut vec);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
vec
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_instance() -> ContractInstance {
|
|
||||||
ContractInstance::new_from("Test")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_caller() -> Address {
|
|
||||||
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This function takes in the string calldata argument provided in the JSON input and resolves it
|
|
||||||
/// into a [`U256`] which is later used to construct the calldata.
|
|
||||||
///
|
|
||||||
/// # Note
|
|
||||||
///
|
|
||||||
/// This piece of code is taken from the matter-labs-tester repository which is licensed under MIT
|
|
||||||
/// or Apache. The original source code can be found here:
|
|
||||||
/// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146
|
|
||||||
fn resolve_argument(
|
|
||||||
value: &str,
|
|
||||||
deployed_contracts: &HashMap<ContractInstance, (Address, JsonAbi)>,
|
|
||||||
chain_state_provider: &impl EthereumNode,
|
|
||||||
) -> anyhow::Result<U256> {
|
|
||||||
if let Some(instance) = value.strip_suffix(".address") {
|
|
||||||
Ok(U256::from_be_slice(
|
|
||||||
deployed_contracts
|
|
||||||
.get(&ContractInstance::new_from(instance))
|
|
||||||
.map(|(a, _)| *a)
|
|
||||||
.ok_or_else(|| anyhow::anyhow!("Instance `{}` not found", instance))?
|
|
||||||
.as_ref(),
|
|
||||||
))
|
|
||||||
} else if let Some(value) = value.strip_prefix('-') {
|
|
||||||
let value = U256::from_str_radix(value, 10)
|
|
||||||
.map_err(|error| anyhow::anyhow!("Invalid decimal literal after `-`: {}", error))?;
|
|
||||||
if value > U256::ONE << 255u8 {
|
|
||||||
anyhow::bail!("Decimal literal after `-` is too big");
|
|
||||||
}
|
|
||||||
let value = value
|
|
||||||
.checked_sub(U256::ONE)
|
|
||||||
.ok_or_else(|| anyhow::anyhow!("`-0` is invalid literal"))?;
|
|
||||||
Ok(U256::MAX.checked_sub(value).expect("Always valid"))
|
|
||||||
} else if let Some(value) = value.strip_prefix("0x") {
|
|
||||||
Ok(U256::from_str_radix(value, 16)
|
|
||||||
.map_err(|error| anyhow::anyhow!("Invalid hexadecimal literal: {}", error))?)
|
|
||||||
} else if value == "$CHAIN_ID" {
|
|
||||||
let chain_id = chain_state_provider.chain_id()?;
|
|
||||||
Ok(U256::from(chain_id))
|
|
||||||
} else if value == "$GAS_LIMIT" {
|
|
||||||
let gas_limit = chain_state_provider.block_gas_limit(BlockNumberOrTag::Latest)?;
|
|
||||||
Ok(U256::from(gas_limit))
|
|
||||||
} else if value == "$COINBASE" {
|
|
||||||
let coinbase = chain_state_provider.block_coinbase(BlockNumberOrTag::Latest)?;
|
|
||||||
Ok(U256::from_be_slice(coinbase.as_ref()))
|
|
||||||
} else if value == "$DIFFICULTY" {
|
|
||||||
let block_difficulty = chain_state_provider.block_difficulty(BlockNumberOrTag::Latest)?;
|
|
||||||
Ok(block_difficulty)
|
|
||||||
} else if value.starts_with("$BLOCK_HASH") {
|
|
||||||
let offset: u64 = value
|
|
||||||
.split(':')
|
|
||||||
.next_back()
|
|
||||||
.and_then(|value| value.parse().ok())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
let current_block_number = chain_state_provider.last_block_number()?;
|
|
||||||
let desired_block_number = current_block_number - offset;
|
|
||||||
|
|
||||||
let block_hash = chain_state_provider.block_hash(desired_block_number.into())?;
|
|
||||||
|
|
||||||
Ok(U256::from_be_bytes(block_hash.0))
|
|
||||||
} else if value == "$BLOCK_NUMBER" {
|
|
||||||
let current_block_number = chain_state_provider.last_block_number()?;
|
|
||||||
Ok(U256::from(current_block_number))
|
|
||||||
} else if value == "$BLOCK_TIMESTAMP" {
|
|
||||||
let timestamp = chain_state_provider.block_timestamp(BlockNumberOrTag::Latest)?;
|
|
||||||
Ok(U256::from(timestamp))
|
|
||||||
} else {
|
|
||||||
Ok(U256::from_str_radix(value, 10)
|
|
||||||
.map_err(|error| anyhow::anyhow!("Invalid decimal literal: {}", error))?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
use alloy::json_abi::JsonAbi;
|
|
||||||
use alloy_primitives::address;
|
|
||||||
use alloy_sol_types::SolValue;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
struct DummyEthereumNode;
|
|
||||||
|
|
||||||
impl EthereumNode for DummyEthereumNode {
|
|
||||||
fn execute_transaction(
|
|
||||||
&self,
|
|
||||||
_: TransactionRequest,
|
|
||||||
) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn trace_transaction(
|
|
||||||
&self,
|
|
||||||
_: alloy::rpc::types::TransactionReceipt,
|
|
||||||
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn state_diff(
|
|
||||||
&self,
|
|
||||||
_: alloy::rpc::types::TransactionReceipt,
|
|
||||||
) -> anyhow::Result<alloy::rpc::types::trace::geth::DiffMode> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fetch_add_nonce(&self, _: Address) -> anyhow::Result<u64> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn chain_id(&self) -> anyhow::Result<alloy_primitives::ChainId> {
|
|
||||||
Ok(0x123)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_gas_limit(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result<u128> {
|
|
||||||
Ok(0x1234)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_coinbase(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result<Address> {
|
|
||||||
Ok(Address::ZERO)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_difficulty(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result<U256> {
|
|
||||||
Ok(U256::from(0x12345u128))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_hash(
|
|
||||||
&self,
|
|
||||||
_: alloy::eips::BlockNumberOrTag,
|
|
||||||
) -> anyhow::Result<alloy_primitives::BlockHash> {
|
|
||||||
Ok([0xEE; 32].into())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_timestamp(
|
|
||||||
&self,
|
|
||||||
_: alloy::eips::BlockNumberOrTag,
|
|
||||||
) -> anyhow::Result<alloy_primitives::BlockTimestamp> {
|
|
||||||
Ok(0x123456)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn last_block_number(&self) -> anyhow::Result<alloy_primitives::BlockNumber> {
|
|
||||||
Ok(0x1234567)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_encoded_input_uint256() {
|
|
||||||
let raw_metadata = r#"
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"inputs": [{"name": "value", "type": "uint256"}],
|
|
||||||
"name": "store",
|
|
||||||
"outputs": [],
|
|
||||||
"stateMutability": "nonpayable",
|
|
||||||
"type": "function"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
"#;
|
|
||||||
|
|
||||||
let parsed_abi: JsonAbi = serde_json::from_str(raw_metadata).unwrap();
|
|
||||||
let selector = parsed_abi
|
|
||||||
.function("store")
|
|
||||||
.unwrap()
|
|
||||||
.first()
|
|
||||||
.unwrap()
|
|
||||||
.selector()
|
|
||||||
.0;
|
|
||||||
|
|
||||||
let input = Input {
|
|
||||||
instance: ContractInstance::new_from("Contract"),
|
|
||||||
method: Method::FunctionName("store".to_owned()),
|
|
||||||
calldata: Some(Calldata::Compound(vec!["42".into()])),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut contracts = HashMap::new();
|
|
||||||
contracts.insert(
|
|
||||||
ContractInstance::new_from("Contract"),
|
|
||||||
(Address::ZERO, parsed_abi),
|
|
||||||
);
|
|
||||||
|
|
||||||
let encoded = input.encoded_input(&contracts, &DummyEthereumNode).unwrap();
|
|
||||||
assert!(encoded.0.starts_with(&selector));
|
|
||||||
|
|
||||||
type T = (u64,);
|
|
||||||
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
|
|
||||||
assert_eq!(decoded.0, 42);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_encoded_input_address() {
|
|
||||||
let raw_abi = r#"[
|
|
||||||
{
|
|
||||||
"inputs": [{"name": "recipient", "type": "address"}],
|
|
||||||
"name": "send",
|
|
||||||
"outputs": [],
|
|
||||||
"stateMutability": "nonpayable",
|
|
||||||
"type": "function"
|
|
||||||
}
|
|
||||||
]"#;
|
|
||||||
|
|
||||||
let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap();
|
|
||||||
let selector = parsed_abi
|
|
||||||
.function("send")
|
|
||||||
.unwrap()
|
|
||||||
.first()
|
|
||||||
.unwrap()
|
|
||||||
.selector()
|
|
||||||
.0;
|
|
||||||
|
|
||||||
let input: Input = Input {
|
|
||||||
instance: ContractInstance::new_from("Contract"),
|
|
||||||
method: Method::FunctionName("send".to_owned()),
|
|
||||||
calldata: Some(Calldata::Compound(vec![
|
|
||||||
"0x1000000000000000000000000000000000000001".to_string(),
|
|
||||||
])),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut contracts = HashMap::new();
|
|
||||||
contracts.insert(
|
|
||||||
ContractInstance::new_from("Contract"),
|
|
||||||
(Address::ZERO, parsed_abi),
|
|
||||||
);
|
|
||||||
|
|
||||||
let encoded = input.encoded_input(&contracts, &DummyEthereumNode).unwrap();
|
|
||||||
assert!(encoded.0.starts_with(&selector));
|
|
||||||
|
|
||||||
type T = (alloy_primitives::Address,);
|
|
||||||
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
decoded.0,
|
|
||||||
address!("0x1000000000000000000000000000000000000001")
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resolver_can_resolve_chain_id_variable() {
|
|
||||||
// Arrange
|
|
||||||
let input = "$CHAIN_ID";
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let resolved = resolve_argument(input, &Default::default(), &DummyEthereumNode);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let resolved = resolved.expect("Failed to resolve argument");
|
|
||||||
assert_eq!(resolved, U256::from(DummyEthereumNode.chain_id().unwrap()))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resolver_can_resolve_gas_limit_variable() {
|
|
||||||
// Arrange
|
|
||||||
let input = "$GAS_LIMIT";
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let resolved = resolve_argument(input, &Default::default(), &DummyEthereumNode);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let resolved = resolved.expect("Failed to resolve argument");
|
|
||||||
assert_eq!(
|
|
||||||
resolved,
|
|
||||||
U256::from(
|
|
||||||
DummyEthereumNode
|
|
||||||
.block_gas_limit(Default::default())
|
|
||||||
.unwrap()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resolver_can_resolve_coinbase_variable() {
|
|
||||||
// Arrange
|
|
||||||
let input = "$COINBASE";
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let resolved = resolve_argument(input, &Default::default(), &DummyEthereumNode);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let resolved = resolved.expect("Failed to resolve argument");
|
|
||||||
assert_eq!(
|
|
||||||
resolved,
|
|
||||||
U256::from_be_slice(
|
|
||||||
DummyEthereumNode
|
|
||||||
.block_coinbase(Default::default())
|
|
||||||
.unwrap()
|
|
||||||
.as_ref()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resolver_can_resolve_block_difficulty_variable() {
|
|
||||||
// Arrange
|
|
||||||
let input = "$DIFFICULTY";
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let resolved = resolve_argument(input, &Default::default(), &DummyEthereumNode);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let resolved = resolved.expect("Failed to resolve argument");
|
|
||||||
assert_eq!(
|
|
||||||
resolved,
|
|
||||||
DummyEthereumNode
|
|
||||||
.block_difficulty(Default::default())
|
|
||||||
.unwrap()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resolver_can_resolve_block_hash_variable() {
|
|
||||||
// Arrange
|
|
||||||
let input = "$BLOCK_HASH";
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let resolved = resolve_argument(input, &Default::default(), &DummyEthereumNode);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let resolved = resolved.expect("Failed to resolve argument");
|
|
||||||
assert_eq!(
|
|
||||||
resolved,
|
|
||||||
U256::from_be_bytes(DummyEthereumNode.block_hash(Default::default()).unwrap().0)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resolver_can_resolve_block_number_variable() {
|
|
||||||
// Arrange
|
|
||||||
let input = "$BLOCK_NUMBER";
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let resolved = resolve_argument(input, &Default::default(), &DummyEthereumNode);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let resolved = resolved.expect("Failed to resolve argument");
|
|
||||||
assert_eq!(
|
|
||||||
resolved,
|
|
||||||
U256::from(DummyEthereumNode.last_block_number().unwrap())
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn resolver_can_resolve_block_timestamp_variable() {
|
|
||||||
// Arrange
|
|
||||||
let input = "$BLOCK_TIMESTAMP";
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let resolved = resolve_argument(input, &Default::default(), &DummyEthereumNode);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let resolved = resolved.expect("Failed to resolve argument");
|
|
||||||
assert_eq!(
|
|
||||||
resolved,
|
|
||||||
U256::from(
|
|
||||||
DummyEthereumNode
|
|
||||||
.block_timestamp(Default::default())
|
|
||||||
.unwrap()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
pub mod case;
|
pub mod case;
|
||||||
pub mod corpus;
|
pub mod corpus;
|
||||||
pub mod input;
|
|
||||||
pub mod macros;
|
|
||||||
pub mod metadata;
|
pub mod metadata;
|
||||||
pub mod mode;
|
pub mod steps;
|
||||||
|
pub mod traits;
|
||||||
|
|||||||
+356
-82
@@ -1,19 +1,26 @@
|
|||||||
use std::{
|
use std::{
|
||||||
|
cmp::Ordering,
|
||||||
collections::BTreeMap,
|
collections::BTreeMap,
|
||||||
fmt::Display,
|
fmt::Display,
|
||||||
fs::{File, read_to_string},
|
fs::File,
|
||||||
ops::Deref,
|
ops::Deref,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
str::FromStr,
|
str::FromStr,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::{
|
use revive_common::EVMVersion;
|
||||||
case::Case,
|
use revive_dt_common::{
|
||||||
define_wrapper_type,
|
cached_fs::read_to_string,
|
||||||
mode::{Mode, SolcMode},
|
iterators::FilesWithExtensionIterator,
|
||||||
|
macros::define_wrapper_type,
|
||||||
|
types::{Mode, ParsedMode, VmIdentifier},
|
||||||
};
|
};
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
|
use crate::case::Case;
|
||||||
|
|
||||||
pub const METADATA_FILE_EXTENSION: &str = "json";
|
pub const METADATA_FILE_EXTENSION: &str = "json";
|
||||||
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
|
||||||
@@ -21,16 +28,26 @@ pub const SOLIDITY_CASE_COMMENT_MARKER: &str = "//!";
|
|||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
||||||
pub struct MetadataFile {
|
pub struct MetadataFile {
|
||||||
pub path: PathBuf,
|
/// The path of the metadata file. This will either be a JSON or solidity file.
|
||||||
|
pub metadata_file_path: PathBuf,
|
||||||
|
|
||||||
|
/// This is the path contained within the corpus file. This could either be the path of some dir
|
||||||
|
/// or could be the actual metadata file path.
|
||||||
|
pub corpus_file_path: PathBuf,
|
||||||
|
|
||||||
|
/// The metadata contained within the file.
|
||||||
pub content: Metadata,
|
pub content: Metadata,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MetadataFile {
|
impl MetadataFile {
|
||||||
pub fn try_from_file(path: &Path) -> Option<Self> {
|
pub fn relative_path(&self) -> &Path {
|
||||||
Metadata::try_from_file(path).map(|metadata| Self {
|
if self.corpus_file_path.is_file() {
|
||||||
path: path.to_owned(),
|
&self.corpus_file_path
|
||||||
content: metadata,
|
} else {
|
||||||
})
|
self.metadata_file_path
|
||||||
|
.strip_prefix(&self.corpus_file_path)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,33 +59,83 @@ impl Deref for MetadataFile {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)]
|
/// A MatterLabs metadata file.
|
||||||
|
///
|
||||||
|
/// This defines the structure that the MatterLabs metadata files follow for defining the tests or
|
||||||
|
/// the workloads.
|
||||||
|
///
|
||||||
|
/// Each metadata file is composed of multiple test cases where each test case is isolated from the
|
||||||
|
/// others and runs in a completely different address space. Each test case is composed of a number
|
||||||
|
/// of steps and assertions that should be performed as part of the test case.
|
||||||
|
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
|
||||||
pub struct Metadata {
|
pub struct Metadata {
|
||||||
pub cases: Vec<Case>,
|
/// This is an optional comment on the metadata file which has no impact on the execution in any
|
||||||
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdentifier>>,
|
/// way.
|
||||||
// TODO: Convert into wrapper types for clarity.
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub libraries: Option<BTreeMap<String, BTreeMap<String, String>>>,
|
pub comment: Option<String>,
|
||||||
|
|
||||||
|
/// An optional boolean which defines if the metadata file as a whole should be ignored. If null
|
||||||
|
/// then the metadata file will not be ignored.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub ignore: Option<bool>,
|
pub ignore: Option<bool>,
|
||||||
pub modes: Option<Vec<Mode>>,
|
|
||||||
|
/// An optional vector of targets that this Metadata file's cases can be executed on. As an
|
||||||
|
/// example, if we wish for the metadata file's cases to only be run on PolkaVM then we'd
|
||||||
|
/// specify a target of "PolkaVM" in here.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub targets: Option<Vec<VmIdentifier>>,
|
||||||
|
|
||||||
|
/// A vector of the test cases and workloads contained within the metadata file. This is their
|
||||||
|
/// primary description.
|
||||||
|
pub cases: Vec<Case>,
|
||||||
|
|
||||||
|
/// A map of all of the contracts that the test requires to run.
|
||||||
|
///
|
||||||
|
/// This is a map where the key is the name of the contract instance and the value is the
|
||||||
|
/// contract's path and ident in the file.
|
||||||
|
///
|
||||||
|
/// If any contract is to be used by the test then it must be included in here first so that the
|
||||||
|
/// framework is aware of its path, compiles it, and prepares it.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
|
||||||
|
|
||||||
|
/// The set of libraries that this metadata file requires.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub libraries: Option<BTreeMap<PathBuf, BTreeMap<ContractIdent, ContractInstance>>>,
|
||||||
|
|
||||||
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
|
///
|
||||||
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
|
/// ```
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub modes: Option<Vec<ParsedMode>>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[schemars(skip)]
|
||||||
pub file_path: Option<PathBuf>,
|
pub file_path: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// This field specifies an EVM version requirement that the test case has where the test might
|
||||||
|
/// be run of the evm version of the nodes match the evm version specified here.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub required_evm_version: Option<EvmVersionRequirement>,
|
||||||
|
|
||||||
|
/// A set of compilation directives that will be passed to the compiler whenever the contracts
|
||||||
|
/// for the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`]
|
||||||
|
/// is just a filter for when a test can run whereas this is an instruction to the compiler.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub compiler_directives: Option<CompilationDirectives>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Metadata {
|
impl Metadata {
|
||||||
/// Returns the solc modes of this metadata, inserting a default mode if not present.
|
/// Returns the modes that we should test from this metadata.
|
||||||
pub fn solc_modes(&self) -> Vec<SolcMode> {
|
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||||
self.modes
|
match &self.modes {
|
||||||
.to_owned()
|
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||||
.unwrap_or_else(|| vec![Mode::Solidity(Default::default())])
|
None => Mode::all().cloned().collect(),
|
||||||
.iter()
|
|
||||||
.filter_map(|mode| match mode {
|
|
||||||
Mode::Solidity(solc_mode) => Some(solc_mode),
|
|
||||||
Mode::Unknown(mode) => {
|
|
||||||
tracing::debug!("compiler: ignoring unknown mode '{mode}'");
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
})
|
|
||||||
.cloned()
|
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the base directory of this metadata.
|
/// Returns the base directory of this metadata.
|
||||||
@@ -84,7 +151,7 @@ impl Metadata {
|
|||||||
/// Returns the contract sources with canonicalized paths for the files
|
/// Returns the contract sources with canonicalized paths for the files
|
||||||
pub fn contract_sources(
|
pub fn contract_sources(
|
||||||
&self,
|
&self,
|
||||||
) -> anyhow::Result<BTreeMap<ContractInstance, ContractPathAndIdentifier>> {
|
) -> anyhow::Result<BTreeMap<ContractInstance, ContractPathAndIdent>> {
|
||||||
let directory = self.directory()?;
|
let directory = self.directory()?;
|
||||||
let mut sources = BTreeMap::new();
|
let mut sources = BTreeMap::new();
|
||||||
let Some(contracts) = &self.contracts else {
|
let Some(contracts) = &self.contracts else {
|
||||||
@@ -93,19 +160,27 @@ impl Metadata {
|
|||||||
|
|
||||||
for (
|
for (
|
||||||
alias,
|
alias,
|
||||||
ContractPathAndIdentifier {
|
ContractPathAndIdent {
|
||||||
contract_source_path,
|
contract_source_path,
|
||||||
contract_ident,
|
contract_ident,
|
||||||
},
|
},
|
||||||
) in contracts
|
) in contracts
|
||||||
{
|
{
|
||||||
let alias = alias.clone();
|
let alias = alias.clone();
|
||||||
let absolute_path = directory.join(contract_source_path).canonicalize()?;
|
let absolute_path = directory
|
||||||
|
.join(contract_source_path)
|
||||||
|
.canonicalize()
|
||||||
|
.map_err(|error| {
|
||||||
|
anyhow::anyhow!(
|
||||||
|
"Failed to canonicalize contract source path '{}': {error}",
|
||||||
|
directory.join(contract_source_path).display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
let contract_ident = contract_ident.clone();
|
let contract_ident = contract_ident.clone();
|
||||||
|
|
||||||
sources.insert(
|
sources.insert(
|
||||||
alias,
|
alias,
|
||||||
ContractPathAndIdentifier {
|
ContractPathAndIdent {
|
||||||
contract_source_path: absolute_path,
|
contract_source_path: absolute_path,
|
||||||
contract_ident,
|
contract_ident,
|
||||||
},
|
},
|
||||||
@@ -124,10 +199,7 @@ impl Metadata {
|
|||||||
pub fn try_from_file(path: &Path) -> Option<Self> {
|
pub fn try_from_file(path: &Path) -> Option<Self> {
|
||||||
assert!(path.is_file(), "not a file: {}", path.display());
|
assert!(path.is_file(), "not a file: {}", path.display());
|
||||||
|
|
||||||
let Some(file_extension) = path.extension() else {
|
let file_extension = path.extension()?;
|
||||||
tracing::debug!("skipping corpus file: {}", path.display());
|
|
||||||
return None;
|
|
||||||
};
|
|
||||||
|
|
||||||
if file_extension == METADATA_FILE_EXTENSION {
|
if file_extension == METADATA_FILE_EXTENSION {
|
||||||
return Self::try_from_json(path);
|
return Self::try_from_json(path);
|
||||||
@@ -137,18 +209,12 @@ impl Metadata {
|
|||||||
return Self::try_from_solidity(path);
|
return Self::try_from_solidity(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
tracing::debug!("ignoring invalid corpus file: {}", path.display());
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn try_from_json(path: &Path) -> Option<Self> {
|
fn try_from_json(path: &Path) -> Option<Self> {
|
||||||
let file = File::open(path)
|
let file = File::open(path)
|
||||||
.inspect_err(|error| {
|
.inspect_err(|err| error!(path = %path.display(), %err, "Failed to open file"))
|
||||||
tracing::error!(
|
|
||||||
"opening JSON test metadata file '{}' error: {error}",
|
|
||||||
path.display()
|
|
||||||
);
|
|
||||||
})
|
|
||||||
.ok()?;
|
.ok()?;
|
||||||
|
|
||||||
match serde_json::from_reader::<_, Metadata>(file) {
|
match serde_json::from_reader::<_, Metadata>(file) {
|
||||||
@@ -156,11 +222,8 @@ impl Metadata {
|
|||||||
metadata.file_path = Some(path.to_path_buf());
|
metadata.file_path = Some(path.to_path_buf());
|
||||||
Some(metadata)
|
Some(metadata)
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(err) => {
|
||||||
tracing::error!(
|
error!(path = %path.display(), %err, "Deserialization of metadata failed");
|
||||||
"parsing JSON test metadata file '{}' error: {error}",
|
|
||||||
path.display()
|
|
||||||
);
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -168,12 +231,7 @@ impl Metadata {
|
|||||||
|
|
||||||
fn try_from_solidity(path: &Path) -> Option<Self> {
|
fn try_from_solidity(path: &Path) -> Option<Self> {
|
||||||
let spec = read_to_string(path)
|
let spec = read_to_string(path)
|
||||||
.inspect_err(|error| {
|
.inspect_err(|err| error!(path = %path.display(), %err, "Failed to read file content"))
|
||||||
tracing::error!(
|
|
||||||
"opening JSON test metadata file '{}' error: {error}",
|
|
||||||
path.display()
|
|
||||||
);
|
|
||||||
})
|
|
||||||
.ok()?
|
.ok()?
|
||||||
.lines()
|
.lines()
|
||||||
.filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER))
|
.filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER))
|
||||||
@@ -191,43 +249,69 @@ impl Metadata {
|
|||||||
metadata.file_path = Some(path.to_path_buf());
|
metadata.file_path = Some(path.to_path_buf());
|
||||||
metadata.contracts = Some(
|
metadata.contracts = Some(
|
||||||
[(
|
[(
|
||||||
ContractInstance::new_from("test"),
|
ContractInstance::new("Test"),
|
||||||
ContractPathAndIdentifier {
|
ContractPathAndIdent {
|
||||||
contract_source_path: path.to_path_buf(),
|
contract_source_path: path.to_path_buf(),
|
||||||
contract_ident: ContractIdent::new_from("Test"),
|
contract_ident: ContractIdent::new("Test"),
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
.into(),
|
.into(),
|
||||||
);
|
);
|
||||||
Some(metadata)
|
Some(metadata)
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(err) => {
|
||||||
tracing::error!(
|
error!(path = %path.display(), %err, "Failed to deserialize metadata");
|
||||||
"parsing Solidity test metadata file '{}' error: '{error}' from data: {spec}",
|
|
||||||
path.display()
|
|
||||||
);
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over all of the solidity files that needs to be compiled for this
|
||||||
|
/// [`Metadata`] object
|
||||||
|
///
|
||||||
|
/// Note: if the metadata is contained within a solidity file then this is the only file that
|
||||||
|
/// we wish to compile since this is a self-contained test. Otherwise, if it's a JSON file
|
||||||
|
/// then we need to compile all of the contracts that are in the directory since imports are
|
||||||
|
/// allowed in there.
|
||||||
|
pub fn files_to_compile(&self) -> anyhow::Result<Box<dyn Iterator<Item = PathBuf>>> {
|
||||||
|
let Some(ref metadata_file_path) = self.file_path else {
|
||||||
|
anyhow::bail!("The metadata file path is not defined");
|
||||||
|
};
|
||||||
|
if metadata_file_path
|
||||||
|
.extension()
|
||||||
|
.is_some_and(|extension| extension.eq_ignore_ascii_case("sol"))
|
||||||
|
{
|
||||||
|
Ok(Box::new(std::iter::once(metadata_file_path.clone())))
|
||||||
|
} else {
|
||||||
|
Ok(Box::new(
|
||||||
|
FilesWithExtensionIterator::new(self.directory()?)
|
||||||
|
.with_allowed_extension("sol")
|
||||||
|
.with_use_cached_fs(true),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
/// Represents a contract instance found a metadata file.
|
/// Represents a contract instance found a metadata file.
|
||||||
///
|
///
|
||||||
/// Typically, this is used as the key to the "contracts" field of metadata files.
|
/// Typically, this is used as the key to the "contracts" field of metadata files.
|
||||||
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(
|
||||||
|
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema
|
||||||
|
)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
ContractInstance(String);
|
pub struct ContractInstance(String) impl Display;
|
||||||
);
|
);
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
/// Represents a contract identifier found a metadata file.
|
/// Represents a contract identifier found a metadata file.
|
||||||
///
|
///
|
||||||
/// A contract identifier is the name of the contract in the source code.
|
/// A contract identifier is the name of the contract in the source code.
|
||||||
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(
|
||||||
|
Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema
|
||||||
|
)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
ContractIdent(String);
|
pub struct ContractIdent(String) impl Display;
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Represents an identifier used for contracts.
|
/// Represents an identifier used for contracts.
|
||||||
@@ -237,9 +321,11 @@ define_wrapper_type!(
|
|||||||
/// ```text
|
/// ```text
|
||||||
/// ${path}:${contract_ident}
|
/// ${path}:${contract_ident}
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(
|
||||||
|
Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema,
|
||||||
|
)]
|
||||||
#[serde(try_from = "String", into = "String")]
|
#[serde(try_from = "String", into = "String")]
|
||||||
pub struct ContractPathAndIdentifier {
|
pub struct ContractPathAndIdent {
|
||||||
/// The path of the contract source code relative to the directory containing the metadata file.
|
/// The path of the contract source code relative to the directory containing the metadata file.
|
||||||
pub contract_source_path: PathBuf,
|
pub contract_source_path: PathBuf,
|
||||||
|
|
||||||
@@ -247,7 +333,7 @@ pub struct ContractPathAndIdentifier {
|
|||||||
pub contract_ident: ContractIdent,
|
pub contract_ident: ContractIdent,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for ContractPathAndIdentifier {
|
impl Display for ContractPathAndIdent {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
@@ -258,7 +344,7 @@ impl Display for ContractPathAndIdentifier {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromStr for ContractPathAndIdentifier {
|
impl FromStr for ContractPathAndIdent {
|
||||||
type Err = anyhow::Error;
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
@@ -281,20 +367,26 @@ impl FromStr for ContractPathAndIdentifier {
|
|||||||
identifier = Some(next_item.to_owned())
|
identifier = Some(next_item.to_owned())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let Some(path) = path else {
|
match (path, identifier) {
|
||||||
anyhow::bail!("Path is not defined");
|
(Some(path), Some(identifier)) => Ok(Self {
|
||||||
};
|
contract_source_path: PathBuf::from(path),
|
||||||
let Some(identifier) = identifier else {
|
contract_ident: ContractIdent::new(identifier),
|
||||||
anyhow::bail!("Contract identifier is not defined")
|
}),
|
||||||
|
(None, Some(path)) | (Some(path), None) => {
|
||||||
|
let Some(identifier) = path.split(".").next().map(ToOwned::to_owned) else {
|
||||||
|
anyhow::bail!("Failed to find identifier");
|
||||||
};
|
};
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
contract_source_path: PathBuf::from(path),
|
contract_source_path: PathBuf::from(path),
|
||||||
contract_ident: ContractIdent::new(identifier),
|
contract_ident: ContractIdent::new(identifier),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
(None, None) => anyhow::bail!("Failed to find the path and identifier"),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<String> for ContractPathAndIdentifier {
|
impl TryFrom<String> for ContractPathAndIdent {
|
||||||
type Error = anyhow::Error;
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
@@ -302,12 +394,194 @@ impl TryFrom<String> for ContractPathAndIdentifier {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ContractPathAndIdentifier> for String {
|
impl From<ContractPathAndIdent> for String {
|
||||||
fn from(value: ContractPathAndIdentifier) -> Self {
|
fn from(value: ContractPathAndIdent) -> Self {
|
||||||
value.to_string()
|
value.to_string()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An EVM version requirement that the test case has. This gets serialized and deserialized from
|
||||||
|
/// and into [`String`]. This follows a simple format of (>=|<=|=|>|<) followed by a string of the
|
||||||
|
/// EVM version.
|
||||||
|
///
|
||||||
|
/// When specified, the framework will only run the test if the node's EVM version matches that
|
||||||
|
/// required by the metadata file.
|
||||||
|
#[derive(
|
||||||
|
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema,
|
||||||
|
)]
|
||||||
|
#[serde(try_from = "String", into = "String")]
|
||||||
|
pub struct EvmVersionRequirement {
|
||||||
|
ordering: Ordering,
|
||||||
|
or_equal: bool,
|
||||||
|
evm_version: EVMVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EvmVersionRequirement {
|
||||||
|
pub fn new_greater_than_or_equals(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Greater,
|
||||||
|
or_equal: true,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_greater_than(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Greater,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_equals(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Equal,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_less_than(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Less,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_less_than_or_equals(version: EVMVersion) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: Ordering::Less,
|
||||||
|
or_equal: true,
|
||||||
|
evm_version: version,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn matches(&self, other: &EVMVersion) -> bool {
|
||||||
|
let ordering = other.cmp(&self.evm_version);
|
||||||
|
ordering == self.ordering || (self.or_equal && matches!(ordering, Ordering::Equal))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for EvmVersionRequirement {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let Self {
|
||||||
|
ordering,
|
||||||
|
or_equal,
|
||||||
|
evm_version,
|
||||||
|
} = self;
|
||||||
|
match ordering {
|
||||||
|
Ordering::Less => write!(f, "<")?,
|
||||||
|
Ordering::Equal => write!(f, "=")?,
|
||||||
|
Ordering::Greater => write!(f, ">")?,
|
||||||
|
}
|
||||||
|
if *or_equal && !matches!(ordering, Ordering::Equal) {
|
||||||
|
write!(f, "=")?;
|
||||||
|
}
|
||||||
|
write!(f, "{evm_version}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for EvmVersionRequirement {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
match s.as_bytes() {
|
||||||
|
[b'>', b'=', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Greater,
|
||||||
|
or_equal: true,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
[b'>', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Greater,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
[b'<', b'=', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Less,
|
||||||
|
or_equal: true,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
[b'<', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Less,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
[b'=', remaining @ ..] => Ok(Self {
|
||||||
|
ordering: Ordering::Equal,
|
||||||
|
or_equal: false,
|
||||||
|
evm_version: str::from_utf8(remaining)?.try_into()?,
|
||||||
|
}),
|
||||||
|
_ => anyhow::bail!("Invalid EVM version requirement {s}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for EvmVersionRequirement {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
|
value.parse()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<EvmVersionRequirement> for String {
|
||||||
|
fn from(value: EvmVersionRequirement) -> Self {
|
||||||
|
value.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A set of compilation directives that will be passed to the compiler whenever the contracts for
|
||||||
|
/// the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] is
|
||||||
|
/// just a filter for when a test can run whereas this is an instruction to the compiler.
|
||||||
|
/// Defines how the compiler should handle revert strings.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
Copy,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Default,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
JsonSchema,
|
||||||
|
)]
|
||||||
|
pub struct CompilationDirectives {
|
||||||
|
/// Defines how the revert strings should be handled.
|
||||||
|
pub revert_string_handling: Option<RevertString>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines how the compiler should handle revert strings.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Debug,
|
||||||
|
Copy,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Default,
|
||||||
|
Serialize,
|
||||||
|
Deserialize,
|
||||||
|
JsonSchema,
|
||||||
|
)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
pub enum RevertString {
|
||||||
|
/// The default handling of the revert strings.
|
||||||
|
#[default]
|
||||||
|
Default,
|
||||||
|
/// The debug handling of the revert strings.
|
||||||
|
Debug,
|
||||||
|
/// Strip the revert strings.
|
||||||
|
Strip,
|
||||||
|
/// Provide verbose debug strings for the revert string.
|
||||||
|
VerboseDebug,
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -318,7 +592,7 @@ mod test {
|
|||||||
let string = "ERC20/ERC20.sol:ERC20";
|
let string = "ERC20/ERC20.sol:ERC20";
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let identifier = ContractPathAndIdentifier::from_str(string);
|
let identifier = ContractPathAndIdent::from_str(string);
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
let identifier = identifier.expect("Failed to parse");
|
let identifier = identifier.expect("Failed to parse");
|
||||||
|
|||||||
@@ -1,96 +0,0 @@
|
|||||||
use semver::Version;
|
|
||||||
use serde::de::Deserializer;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Specifies the compilation mode of the test artifact.
|
|
||||||
#[derive(Hash, Debug, Clone, Eq, PartialEq)]
|
|
||||||
pub enum Mode {
|
|
||||||
Solidity(SolcMode),
|
|
||||||
Unknown(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Specify Solidity specific compiler options.
|
|
||||||
#[derive(Hash, Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
|
|
||||||
pub struct SolcMode {
|
|
||||||
pub solc_version: Option<semver::VersionReq>,
|
|
||||||
solc_optimize: Option<bool>,
|
|
||||||
pub llvm_optimizer_settings: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SolcMode {
|
|
||||||
/// Try to parse a mode string into a solc mode.
|
|
||||||
/// Returns `None` if the string wasn't a solc YUL mode string.
|
|
||||||
///
|
|
||||||
/// The mode string is expected to start with the `Y` ID (YUL ID),
|
|
||||||
/// optionally followed by `+` or `-` for the solc optimizer settings.
|
|
||||||
///
|
|
||||||
/// Options can be separated by a whitespace contain the following
|
|
||||||
/// - A solc `SemVer version requirement` string
|
|
||||||
/// - One or more `-OX` where X is a supposed to be an LLVM opt mode
|
|
||||||
pub fn parse_from_mode_string(mode_string: &str) -> Option<Self> {
|
|
||||||
let mut result = Self::default();
|
|
||||||
|
|
||||||
let mut parts = mode_string.trim().split(" ");
|
|
||||||
|
|
||||||
match parts.next()? {
|
|
||||||
"Y" => {}
|
|
||||||
"Y+" => result.solc_optimize = Some(true),
|
|
||||||
"Y-" => result.solc_optimize = Some(false),
|
|
||||||
_ => return None,
|
|
||||||
}
|
|
||||||
|
|
||||||
for part in parts {
|
|
||||||
if let Ok(solc_version) = semver::VersionReq::parse(part) {
|
|
||||||
result.solc_version = Some(solc_version);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if let Some(level) = part.strip_prefix("-O") {
|
|
||||||
result.llvm_optimizer_settings.push(level.to_string());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
panic!("the YUL mode string {mode_string} failed to parse, invalid part: {part}")
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(result)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns whether to enable the solc optimizer.
|
|
||||||
pub fn solc_optimize(&self) -> bool {
|
|
||||||
self.solc_optimize.unwrap_or(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calculate the latest matching solc patch version. Returns:
|
|
||||||
/// - `latest_supported` if no version request was specified.
|
|
||||||
/// - A matching version with the same minor version as `latest_supported`, if any.
|
|
||||||
/// - `None` if no minor version of the `latest_supported` version matches.
|
|
||||||
pub fn last_patch_version(&self, latest_supported: &Version) -> Option<Version> {
|
|
||||||
let Some(version_req) = self.solc_version.as_ref() else {
|
|
||||||
return Some(latest_supported.to_owned());
|
|
||||||
};
|
|
||||||
|
|
||||||
// lgtm
|
|
||||||
for patch in (0..latest_supported.patch + 1).rev() {
|
|
||||||
let version = Version::new(0, latest_supported.minor, patch);
|
|
||||||
if version_req.matches(&version) {
|
|
||||||
return Some(version);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for Mode {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let mode_string = String::deserialize(deserializer)?;
|
|
||||||
|
|
||||||
if let Some(solc_mode) = SolcMode::parse_from_mode_string(&mode_string) {
|
|
||||||
return Ok(Self::Solidity(solc_mode));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self::Unknown(mode_string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,176 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
|
use alloy::eips::BlockNumberOrTag;
|
||||||
|
use alloy::json_abi::JsonAbi;
|
||||||
|
use alloy::primitives::TxHash;
|
||||||
|
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
||||||
|
use anyhow::Result;
|
||||||
|
|
||||||
|
use crate::metadata::{ContractIdent, ContractInstance};
|
||||||
|
|
||||||
|
/// A trait of the interface are required to implement to be used by the resolution logic that this
|
||||||
|
/// crate implements to go from string calldata and into the bytes calldata.
|
||||||
|
pub trait ResolverApi {
|
||||||
|
/// Returns the ID of the chain that the node is on.
|
||||||
|
fn chain_id(&self) -> Pin<Box<dyn Future<Output = Result<ChainId>> + '_>>;
|
||||||
|
|
||||||
|
/// Returns the gas price for the specified transaction.
|
||||||
|
fn transaction_gas_price(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||||
|
|
||||||
|
// TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit
|
||||||
|
// when we implement the changes to the gas we need to adjust this to be a u64.
|
||||||
|
/// Returns the gas limit of the specified block.
|
||||||
|
fn block_gas_limit(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||||
|
|
||||||
|
/// Returns the coinbase of the specified block.
|
||||||
|
fn block_coinbase(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<Address>> + '_>>;
|
||||||
|
|
||||||
|
/// Returns the difficulty of the specified block.
|
||||||
|
fn block_difficulty(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||||
|
|
||||||
|
/// Returns the base fee of the specified block.
|
||||||
|
fn block_base_fee(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<u64>> + '_>>;
|
||||||
|
|
||||||
|
/// Returns the hash of the specified block.
|
||||||
|
fn block_hash(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<BlockHash>> + '_>>;
|
||||||
|
|
||||||
|
/// Returns the timestamp of the specified block,
|
||||||
|
fn block_timestamp(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<BlockTimestamp>> + '_>>;
|
||||||
|
|
||||||
|
/// Returns the number of the last block.
|
||||||
|
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = Result<BlockNumber>> + '_>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, Default)]
|
||||||
|
/// Contextual information required by the code that's performing the resolution.
|
||||||
|
pub struct ResolutionContext<'a> {
|
||||||
|
/// When provided the contracts provided here will be used for resolutions.
|
||||||
|
deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
|
||||||
|
/// When provided the variables in here will be used for performing resolutions.
|
||||||
|
variables: Option<&'a HashMap<String, U256>>,
|
||||||
|
|
||||||
|
/// When provided this block number will be treated as the tip of the chain.
|
||||||
|
block_number: Option<&'a BlockNumber>,
|
||||||
|
|
||||||
|
/// When provided the resolver will use this transaction hash for all of its resolutions.
|
||||||
|
transaction_hash: Option<&'a TxHash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ResolutionContext<'a> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Default::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_from_parts(
|
||||||
|
deployed_contracts: impl Into<
|
||||||
|
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
>,
|
||||||
|
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||||
|
block_number: impl Into<Option<&'a BlockNumber>>,
|
||||||
|
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
deployed_contracts: deployed_contracts.into(),
|
||||||
|
variables: variables.into(),
|
||||||
|
block_number: block_number.into(),
|
||||||
|
transaction_hash: transaction_hash.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_deployed_contracts(
|
||||||
|
mut self,
|
||||||
|
deployed_contracts: impl Into<
|
||||||
|
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
>,
|
||||||
|
) -> Self {
|
||||||
|
self.deployed_contracts = deployed_contracts.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_variables(
|
||||||
|
mut self,
|
||||||
|
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||||
|
) -> Self {
|
||||||
|
self.variables = variables.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self {
|
||||||
|
self.block_number = block_number.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_transaction_hash(
|
||||||
|
mut self,
|
||||||
|
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||||
|
) -> Self {
|
||||||
|
self.transaction_hash = transaction_hash.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag {
|
||||||
|
match self.block_number {
|
||||||
|
Some(block_number) => match number {
|
||||||
|
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
|
||||||
|
n @ (BlockNumberOrTag::Finalized
|
||||||
|
| BlockNumberOrTag::Safe
|
||||||
|
| BlockNumberOrTag::Earliest
|
||||||
|
| BlockNumberOrTag::Pending
|
||||||
|
| BlockNumberOrTag::Number(_)) => n,
|
||||||
|
},
|
||||||
|
None => number,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deployed_contract(
|
||||||
|
&self,
|
||||||
|
instance: &ContractInstance,
|
||||||
|
) -> Option<&(ContractIdent, Address, JsonAbi)> {
|
||||||
|
self.deployed_contracts
|
||||||
|
.and_then(|deployed_contracts| deployed_contracts.get(instance))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> {
|
||||||
|
self.deployed_contract(instance).map(|(_, a, _)| a)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> {
|
||||||
|
self.deployed_contract(instance).map(|(_, _, a)| a)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
|
||||||
|
self.variables
|
||||||
|
.and_then(|variables| variables.get(name.as_ref()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
|
||||||
|
self.block_number
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn transaction_hash(&self) -> Option<&'a TxHash> {
|
||||||
|
self.transaction_hash
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -9,9 +9,14 @@ repository.workspace = true
|
|||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
|
revive-dt-format = { workspace = true }
|
||||||
|
revive-dt-report = { workspace = true }
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
tracing = { workspace = true }
|
|
||||||
once_cell = { workspace = true }
|
[lints]
|
||||||
tokio = { workspace = true }
|
workspace = true
|
||||||
|
|||||||
@@ -1,220 +0,0 @@
|
|||||||
//! The alloy crate __requires__ a tokio runtime.
|
|
||||||
//! We contain any async rust right here.
|
|
||||||
|
|
||||||
use std::{any::Any, panic::AssertUnwindSafe, pin::Pin, thread};
|
|
||||||
|
|
||||||
use futures::FutureExt;
|
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use tokio::{
|
|
||||||
runtime::Builder,
|
|
||||||
sync::{mpsc::UnboundedSender, oneshot},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// A blocking async executor.
|
|
||||||
///
|
|
||||||
/// This struct exposes the abstraction of a blocking async executor. It is a global and static
|
|
||||||
/// executor which means that it doesn't require for new instances of it to be created, it's a
|
|
||||||
/// singleton and can be accessed by any thread that wants to perform some async computation on the
|
|
||||||
/// blocking executor thread.
|
|
||||||
///
|
|
||||||
/// The API of the blocking executor is created in a way so that it's very natural, simple to use,
|
|
||||||
/// and unbounded to specific tasks or return types. The following is an example of using this
|
|
||||||
/// executor to drive an async computation:
|
|
||||||
///
|
|
||||||
/// ```rust
|
|
||||||
/// use revive_dt_node_interaction::*;
|
|
||||||
///
|
|
||||||
/// fn blocking_function() {
|
|
||||||
/// let result = BlockingExecutor::execute(async move {
|
|
||||||
/// tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
|
||||||
/// 0xFFu8
|
|
||||||
/// })
|
|
||||||
/// .expect("Computation failed");
|
|
||||||
///
|
|
||||||
/// assert_eq!(result, 0xFF);
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// Users get to pass in their async tasks without needing to worry about putting them in a [`Box`],
|
|
||||||
/// [`Pin`], needing to perform down-casting, or the internal channel mechanism used by the runtime.
|
|
||||||
/// To the user, it just looks like a function that converts some async code into sync code.
|
|
||||||
///
|
|
||||||
/// This struct also handled panics that occur in the passed futures and converts them into errors
|
|
||||||
/// that can be handled by the user. This is done to allow the executor to be robust.
|
|
||||||
///
|
|
||||||
/// Internally, the executor communicates with the tokio runtime thread through channels which carry
|
|
||||||
/// the [`TaskMessage`] and the results of the execution.
|
|
||||||
pub struct BlockingExecutor;
|
|
||||||
|
|
||||||
impl BlockingExecutor {
|
|
||||||
pub fn execute<R>(future: impl Future<Output = R> + Send + 'static) -> Result<R, anyhow::Error>
|
|
||||||
where
|
|
||||||
R: Send + 'static,
|
|
||||||
{
|
|
||||||
// Note: The blocking executor is a singleton and therefore we store its state in a static
|
|
||||||
// so that it's assigned only once. Additionally, when we set the state of the executor we
|
|
||||||
// spawn the thread where the async runtime runs.
|
|
||||||
static STATE: Lazy<ExecutorState> = Lazy::new(|| {
|
|
||||||
tracing::trace!("Initializing the BlockingExecutor state");
|
|
||||||
|
|
||||||
// All communication with the tokio runtime thread happens over mspc channels where the
|
|
||||||
// producers here are the threads that want to run async tasks and the consumer here is
|
|
||||||
// the tokio runtime thread.
|
|
||||||
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::<TaskMessage>();
|
|
||||||
|
|
||||||
thread::spawn(move || {
|
|
||||||
let runtime = Builder::new_current_thread()
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.expect("Failed to create the async runtime");
|
|
||||||
|
|
||||||
runtime.block_on(async move {
|
|
||||||
while let Some(TaskMessage {
|
|
||||||
future: task,
|
|
||||||
response_tx: response_channel,
|
|
||||||
}) = rx.recv().await
|
|
||||||
{
|
|
||||||
tracing::trace!("Received a new future to execute");
|
|
||||||
tokio::spawn(async move {
|
|
||||||
// One of the things that the blocking executor does is that it allows
|
|
||||||
// us to catch panics if they occur. By wrapping the given future in an
|
|
||||||
// AssertUnwindSafe::catch_unwind we are able to catch all panic unwinds
|
|
||||||
// in the given future and convert them into errors.
|
|
||||||
let task = AssertUnwindSafe(task).catch_unwind();
|
|
||||||
|
|
||||||
let result = task.await;
|
|
||||||
let _ = response_channel.send(result);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
ExecutorState { tx }
|
|
||||||
});
|
|
||||||
|
|
||||||
// We need to perform blocking synchronous communication between the current thread and the
|
|
||||||
// tokio runtime thread with the result of the async computation and the oneshot channels
|
|
||||||
// from tokio allows us to do that. The sender side of the channel will be given to the
|
|
||||||
// tokio runtime thread to send the result when the computation is completed and the receive
|
|
||||||
// side of the channel will be kept with this thread to await for the response of the async
|
|
||||||
// task to come back.
|
|
||||||
let (response_tx, response_rx) =
|
|
||||||
oneshot::channel::<Result<Box<dyn Any + Send>, Box<dyn Any + Send>>>();
|
|
||||||
|
|
||||||
// The tokio runtime thread expects a Future<Output = Box<dyn Any + Send>> + Send to be
|
|
||||||
// sent to it to execute. However, this function has a typed Future<Output = R> + Send and
|
|
||||||
// therefore we need to change the type of the future to fit what the runtime thread expects
|
|
||||||
// in the task message. In doing this conversion, we lose some of the type information since
|
|
||||||
// we're converting R => dyn Any. However, we will perform down-casting on the result to
|
|
||||||
// convert it back into R.
|
|
||||||
let future = Box::pin(async move { Box::new(future.await) as Box<dyn Any + Send> });
|
|
||||||
|
|
||||||
let task = TaskMessage::new(future, response_tx);
|
|
||||||
if let Err(error) = STATE.tx.send(task) {
|
|
||||||
tracing::error!(?error, "Failed to send the task to the blocking executor");
|
|
||||||
anyhow::bail!("Failed to send the task to the blocking executor: {error:?}")
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = match response_rx.blocking_recv() {
|
|
||||||
Ok(result) => result,
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(
|
|
||||||
?error,
|
|
||||||
"Failed to get the response from the blocking executor"
|
|
||||||
);
|
|
||||||
anyhow::bail!("Failed to get the response from the blocking executor: {error:?}")
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match result.map(|result| {
|
|
||||||
*result
|
|
||||||
.downcast::<R>()
|
|
||||||
.expect("Type mismatch in the downcast")
|
|
||||||
}) {
|
|
||||||
Ok(result) => Ok(result),
|
|
||||||
Err(error) => {
|
|
||||||
tracing::error!(
|
|
||||||
?error,
|
|
||||||
"Failed to downcast the returned result into the expected type"
|
|
||||||
);
|
|
||||||
anyhow::bail!(
|
|
||||||
"Failed to downcast the returned result into the expected type: {error:?}"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// Represents the state of the async runtime. This runtime is designed to be a singleton runtime
|
|
||||||
/// which means that in the current running program there's just a single thread that has an async
|
|
||||||
/// runtime.
|
|
||||||
struct ExecutorState {
|
|
||||||
/// The sending side of the task messages channel. This is used by all of the other threads to
|
|
||||||
/// communicate with the async runtime thread.
|
|
||||||
tx: UnboundedSender<TaskMessage>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a message that contains an asynchronous task that's to be executed by the runtime
|
|
||||||
/// as well as a way for the runtime to report back on the result of the execution.
|
|
||||||
struct TaskMessage {
|
|
||||||
/// The task that's being requested to run. This is a future that returns an object that does
|
|
||||||
/// implement [`Any`] and [`Send`] to allow it to be sent between the requesting thread and the
|
|
||||||
/// async thread.
|
|
||||||
future: Pin<Box<dyn Future<Output = Box<dyn Any + Send>> + Send>>,
|
|
||||||
|
|
||||||
/// A one shot sender channel where the sender of the task is expecting to hear back on the
|
|
||||||
/// result of the task.
|
|
||||||
response_tx: oneshot::Sender<Result<Box<dyn Any + Send>, Box<dyn Any + Send>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TaskMessage {
|
|
||||||
pub fn new(
|
|
||||||
future: Pin<Box<dyn Future<Output = Box<dyn Any + Send>> + Send>>,
|
|
||||||
response_tx: oneshot::Sender<Result<Box<dyn Any + Send>, Box<dyn Any + Send>>>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
future,
|
|
||||||
response_tx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn simple_future_works() {
|
|
||||||
// Act
|
|
||||||
let result = BlockingExecutor::execute(async move {
|
|
||||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
|
||||||
0xFFu8
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
assert_eq!(result, 0xFFu8);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[allow(unreachable_code, clippy::unreachable)]
|
|
||||||
fn panics_in_futures_are_caught() {
|
|
||||||
// Act
|
|
||||||
let result = BlockingExecutor::execute(async move {
|
|
||||||
panic!("This is a panic!");
|
|
||||||
0xFFu8
|
|
||||||
});
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
assert!(result.is_err());
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let result = BlockingExecutor::execute(async move {
|
|
||||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
|
||||||
0xFFu8
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
assert_eq!(result, 0xFFu8)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,48 +1,83 @@
|
|||||||
//! This crate implements all node interactions.
|
//! This crate implements all node interactions.
|
||||||
|
|
||||||
use alloy::eips::BlockNumberOrTag;
|
use std::pin::Pin;
|
||||||
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
use std::sync::Arc;
|
||||||
use alloy::rpc::types::trace::geth::{DiffMode, GethTrace};
|
|
||||||
use alloy::rpc::types::{TransactionReceipt, TransactionRequest};
|
use alloy::network::Ethereum;
|
||||||
|
use alloy::primitives::{Address, StorageKey, TxHash, U256};
|
||||||
|
use alloy::providers::DynProvider;
|
||||||
|
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
||||||
|
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
mod blocking_executor;
|
use futures::Stream;
|
||||||
pub use blocking_executor::*;
|
use revive_common::EVMVersion;
|
||||||
|
use revive_dt_format::traits::ResolverApi;
|
||||||
|
use revive_dt_report::MinedBlockInformation;
|
||||||
|
|
||||||
/// An interface for all interactions with Ethereum compatible nodes.
|
/// An interface for all interactions with Ethereum compatible nodes.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
pub trait EthereumNode {
|
pub trait EthereumNode {
|
||||||
|
/// A function to run post spawning the nodes and before any transactions are run on the node.
|
||||||
|
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>;
|
||||||
|
|
||||||
|
fn id(&self) -> usize;
|
||||||
|
|
||||||
|
/// Returns the nodes connection string.
|
||||||
|
fn connection_string(&self) -> &str;
|
||||||
|
|
||||||
|
fn submit_transaction(
|
||||||
|
&self,
|
||||||
|
transaction: TransactionRequest,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>;
|
||||||
|
|
||||||
|
fn get_receipt(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||||
|
|
||||||
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
||||||
fn execute_transaction(&self, transaction: TransactionRequest) -> Result<TransactionReceipt>;
|
fn execute_transaction(
|
||||||
|
&self,
|
||||||
|
transaction: TransactionRequest,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||||
|
|
||||||
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
|
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
|
||||||
fn trace_transaction(&self, transaction: TransactionReceipt) -> Result<GethTrace>;
|
fn trace_transaction(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
trace_options: GethDebugTracingOptions,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<GethTrace>> + '_>>;
|
||||||
|
|
||||||
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
||||||
fn state_diff(&self, transaction: TransactionReceipt) -> Result<DiffMode>;
|
fn state_diff(&self, tx_hash: TxHash) -> Pin<Box<dyn Future<Output = Result<DiffMode>> + '_>>;
|
||||||
|
|
||||||
/// Returns the next available nonce for the given [Address].
|
/// Returns the balance of the provided [`Address`] back.
|
||||||
fn fetch_add_nonce(&self, address: Address) -> Result<u64>;
|
fn balance_of(&self, address: Address) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||||
|
|
||||||
/// Returns the ID of the chain that the node is on.
|
/// Returns the latest storage proof of the provided [`Address`]
|
||||||
fn chain_id(&self) -> Result<ChainId>;
|
fn latest_state_proof(
|
||||||
|
&self,
|
||||||
|
address: Address,
|
||||||
|
keys: Vec<StorageKey>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>;
|
||||||
|
|
||||||
// TODO: This is currently a u128 due to Kitchensink needing more than 64 bits for its gas limit
|
/// Returns the resolver that is to use with this ethereum node.
|
||||||
// when we implement the changes to the gas we need to adjust this to be a u64.
|
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>;
|
||||||
/// Returns the gas limit of the specified block.
|
|
||||||
fn block_gas_limit(&self, number: BlockNumberOrTag) -> Result<u128>;
|
|
||||||
|
|
||||||
/// Returns the coinbase of the specified block.
|
/// Returns the EVM version of the node.
|
||||||
fn block_coinbase(&self, number: BlockNumberOrTag) -> Result<Address>;
|
fn evm_version(&self) -> EVMVersion;
|
||||||
|
|
||||||
/// Returns the difficulty of the specified block.
|
/// Returns a stream of the blocks that were mined by the node.
|
||||||
fn block_difficulty(&self, number: BlockNumberOrTag) -> Result<U256>;
|
fn subscribe_to_full_blocks_information(
|
||||||
|
&self,
|
||||||
|
) -> Pin<
|
||||||
|
Box<
|
||||||
|
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
||||||
|
+ '_,
|
||||||
|
>,
|
||||||
|
>;
|
||||||
|
|
||||||
/// Returns the hash of the specified block.
|
fn provider(&self)
|
||||||
fn block_hash(&self, number: BlockNumberOrTag) -> Result<BlockHash>;
|
-> Pin<Box<dyn Future<Output = anyhow::Result<DynProvider<Ethereum>>> + '_>>;
|
||||||
|
|
||||||
/// Returns the timestamp of the specified block,
|
|
||||||
fn block_timestamp(&self, number: BlockNumberOrTag) -> Result<BlockTimestamp>;
|
|
||||||
|
|
||||||
/// Returns the number of the last block.
|
|
||||||
fn last_block_number(&self) -> Result<BlockNumber>;
|
|
||||||
}
|
}
|
||||||
|
|||||||
+14
-1
@@ -11,18 +11,31 @@ rust-version.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
|
futures = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
|
tower = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|
||||||
revive-dt-node-interaction = { workspace = true }
|
revive-common = { workspace = true }
|
||||||
|
revive-dt-common = { workspace = true }
|
||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
|
revive-dt-format = { workspace = true }
|
||||||
|
revive-dt-node-interaction = { workspace = true }
|
||||||
|
revive-dt-report = { workspace = true }
|
||||||
|
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
serde_with = { workspace = true }
|
||||||
|
serde_yaml_ng = { workspace = true }
|
||||||
|
|
||||||
sp-core = { workspace = true }
|
sp-core = { workspace = true }
|
||||||
sp-runtime = { workspace = true }
|
sp-runtime = { workspace = true }
|
||||||
|
subxt = { workspace = true }
|
||||||
|
zombienet-sdk = { workspace = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
temp-dir = { workspace = true }
|
temp-dir = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
@@ -0,0 +1,10 @@
|
|||||||
|
use alloy::primitives::ChainId;
|
||||||
|
|
||||||
|
/// This constant defines how much Wei accounts are pre-seeded with in genesis.
|
||||||
|
///
|
||||||
|
/// Note: After changing this number, check that the tests for substrate work as we encountered
|
||||||
|
/// some issues with different values of the initial balance on substrate.
|
||||||
|
pub const INITIAL_BALANCE: u128 = 10u128.pow(37);
|
||||||
|
|
||||||
|
/// The chain id used for all of the chains spawned by the framework.
|
||||||
|
pub const CHAIN_ID: ChainId = 420420420;
|
||||||
@@ -1,655 +0,0 @@
|
|||||||
//! The go-ethereum node implementation.
|
|
||||||
|
|
||||||
use std::{
|
|
||||||
collections::HashMap,
|
|
||||||
fs::{File, OpenOptions, create_dir_all, remove_dir_all},
|
|
||||||
io::{BufRead, BufReader, Read, Write},
|
|
||||||
path::PathBuf,
|
|
||||||
process::{Child, Command, Stdio},
|
|
||||||
sync::{
|
|
||||||
Mutex,
|
|
||||||
atomic::{AtomicU32, Ordering},
|
|
||||||
},
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
|
|
||||||
use alloy::{
|
|
||||||
eips::BlockNumberOrTag,
|
|
||||||
network::{Ethereum, EthereumWallet},
|
|
||||||
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, U256},
|
|
||||||
providers::{
|
|
||||||
Provider, ProviderBuilder,
|
|
||||||
ext::DebugApi,
|
|
||||||
fillers::{FillProvider, TxFiller},
|
|
||||||
},
|
|
||||||
rpc::types::{
|
|
||||||
TransactionReceipt, TransactionRequest,
|
|
||||||
trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use revive_dt_config::Arguments;
|
|
||||||
use revive_dt_node_interaction::{BlockingExecutor, EthereumNode};
|
|
||||||
use tracing::Level;
|
|
||||||
|
|
||||||
use crate::Node;
|
|
||||||
|
|
||||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
|
||||||
|
|
||||||
/// The go-ethereum node instance implementation.
|
|
||||||
///
|
|
||||||
/// Implements helpers to initialize, spawn and wait the node.
|
|
||||||
///
|
|
||||||
/// Assumes dev mode and IPC only (`P2P`, `http`` etc. are kept disabled).
|
|
||||||
///
|
|
||||||
/// Prunes the child process and the base directory on drop.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Instance {
|
|
||||||
connection_string: String,
|
|
||||||
base_directory: PathBuf,
|
|
||||||
data_directory: PathBuf,
|
|
||||||
logs_directory: PathBuf,
|
|
||||||
geth: PathBuf,
|
|
||||||
id: u32,
|
|
||||||
handle: Option<Child>,
|
|
||||||
network_id: u64,
|
|
||||||
start_timeout: u64,
|
|
||||||
wallet: EthereumWallet,
|
|
||||||
nonces: Mutex<HashMap<Address, u64>>,
|
|
||||||
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
|
|
||||||
/// node object is dropped. We do not store them in a structured fashion at the moment (in
|
|
||||||
/// separate fields) as the logic that we need to apply to them is all the same regardless of
|
|
||||||
/// what it belongs to, we just want to flush them on [`Drop`] of the node.
|
|
||||||
logs_file_to_flush: Vec<File>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Instance {
|
|
||||||
const BASE_DIRECTORY: &str = "geth";
|
|
||||||
const DATA_DIRECTORY: &str = "data";
|
|
||||||
const LOGS_DIRECTORY: &str = "logs";
|
|
||||||
|
|
||||||
const IPC_FILE: &str = "geth.ipc";
|
|
||||||
const GENESIS_JSON_FILE: &str = "genesis.json";
|
|
||||||
|
|
||||||
const READY_MARKER: &str = "IPC endpoint opened";
|
|
||||||
const ERROR_MARKER: &str = "Fatal:";
|
|
||||||
|
|
||||||
const GETH_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log";
|
|
||||||
const GETH_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
|
|
||||||
|
|
||||||
/// Create the node directory and call `geth init` to configure the genesis.
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> {
|
|
||||||
create_dir_all(&self.base_directory)?;
|
|
||||||
create_dir_all(&self.logs_directory)?;
|
|
||||||
|
|
||||||
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
|
||||||
File::create(&genesis_path)?.write_all(genesis.as_bytes())?;
|
|
||||||
|
|
||||||
let mut child = Command::new(&self.geth)
|
|
||||||
.arg("init")
|
|
||||||
.arg("--datadir")
|
|
||||||
.arg(&self.data_directory)
|
|
||||||
.arg(genesis_path)
|
|
||||||
.stderr(Stdio::piped())
|
|
||||||
.stdout(Stdio::null())
|
|
||||||
.spawn()?;
|
|
||||||
|
|
||||||
let mut stderr = String::new();
|
|
||||||
child
|
|
||||||
.stderr
|
|
||||||
.take()
|
|
||||||
.expect("should be piped")
|
|
||||||
.read_to_string(&mut stderr)?;
|
|
||||||
|
|
||||||
if !child.wait()?.success() {
|
|
||||||
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Spawn the go-ethereum node child process.
|
|
||||||
///
|
|
||||||
/// [Instance::init] must be called prior.
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
|
|
||||||
// This is the `OpenOptions` that we wish to use for all of the log files that we will be
|
|
||||||
// opening in this method. We need to construct it in this way to:
|
|
||||||
// 1. Be consistent
|
|
||||||
// 2. Less verbose and more dry
|
|
||||||
// 3. Because the builder pattern uses mutable references so we need to get around that.
|
|
||||||
let open_options = {
|
|
||||||
let mut options = OpenOptions::new();
|
|
||||||
options.create(true).truncate(true).write(true);
|
|
||||||
options
|
|
||||||
};
|
|
||||||
|
|
||||||
let stdout_logs_file = open_options
|
|
||||||
.clone()
|
|
||||||
.open(self.geth_stdout_log_file_path())?;
|
|
||||||
let stderr_logs_file = open_options.open(self.geth_stderr_log_file_path())?;
|
|
||||||
self.handle = Command::new(&self.geth)
|
|
||||||
.arg("--dev")
|
|
||||||
.arg("--datadir")
|
|
||||||
.arg(&self.data_directory)
|
|
||||||
.arg("--ipcpath")
|
|
||||||
.arg(&self.connection_string)
|
|
||||||
.arg("--networkid")
|
|
||||||
.arg(self.network_id.to_string())
|
|
||||||
.arg("--nodiscover")
|
|
||||||
.arg("--maxpeers")
|
|
||||||
.arg("0")
|
|
||||||
.stderr(stderr_logs_file.try_clone()?)
|
|
||||||
.stdout(stdout_logs_file.try_clone()?)
|
|
||||||
.spawn()?
|
|
||||||
.into();
|
|
||||||
|
|
||||||
if let Err(error) = self.wait_ready() {
|
|
||||||
tracing::error!(?error, "Failed to start geth, shutting down gracefully");
|
|
||||||
self.shutdown()?;
|
|
||||||
return Err(error);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.logs_file_to_flush
|
|
||||||
.extend([stderr_logs_file, stdout_logs_file]);
|
|
||||||
|
|
||||||
Ok(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wait for the g-ethereum node child process getting ready.
|
|
||||||
///
|
|
||||||
/// [Instance::spawn_process] must be called priorly.
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn wait_ready(&mut self) -> anyhow::Result<&mut Self> {
|
|
||||||
let start_time = Instant::now();
|
|
||||||
|
|
||||||
let logs_file = OpenOptions::new()
|
|
||||||
.read(true)
|
|
||||||
.write(false)
|
|
||||||
.append(false)
|
|
||||||
.truncate(false)
|
|
||||||
.open(self.geth_stderr_log_file_path())?;
|
|
||||||
|
|
||||||
let maximum_wait_time = Duration::from_millis(self.start_timeout);
|
|
||||||
let mut stderr = BufReader::new(logs_file).lines();
|
|
||||||
loop {
|
|
||||||
if let Some(Ok(line)) = stderr.next() {
|
|
||||||
if line.contains(Self::ERROR_MARKER) {
|
|
||||||
anyhow::bail!("Failed to start geth {line}");
|
|
||||||
}
|
|
||||||
if line.contains(Self::READY_MARKER) {
|
|
||||||
return Ok(self);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if Instant::now().duration_since(start_time) > maximum_wait_time {
|
|
||||||
anyhow::bail!("Timeout in starting geth");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)]
|
|
||||||
fn geth_stdout_log_file_path(&self) -> PathBuf {
|
|
||||||
self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)]
|
|
||||||
fn geth_stderr_log_file_path(&self) -> PathBuf {
|
|
||||||
self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn provider(
|
|
||||||
&self,
|
|
||||||
) -> impl Future<
|
|
||||||
Output = anyhow::Result<
|
|
||||||
FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>,
|
|
||||||
>,
|
|
||||||
> + 'static {
|
|
||||||
let connection_string = self.connection_string();
|
|
||||||
let wallet = self.wallet.clone();
|
|
||||||
Box::pin(async move {
|
|
||||||
ProviderBuilder::new()
|
|
||||||
.wallet(wallet)
|
|
||||||
.connect(&connection_string)
|
|
||||||
.await
|
|
||||||
.map_err(Into::into)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EthereumNode for Instance {
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn execute_transaction(
|
|
||||||
&self,
|
|
||||||
transaction: TransactionRequest,
|
|
||||||
) -> anyhow::Result<alloy::rpc::types::TransactionReceipt> {
|
|
||||||
let provider = self.provider();
|
|
||||||
BlockingExecutor::execute(async move {
|
|
||||||
let outer_span = tracing::debug_span!("Submitting transaction", ?transaction,);
|
|
||||||
let _outer_guard = outer_span.enter();
|
|
||||||
|
|
||||||
let provider = provider.await?;
|
|
||||||
|
|
||||||
let pending_transaction = provider.send_transaction(transaction).await?;
|
|
||||||
let transaction_hash = pending_transaction.tx_hash();
|
|
||||||
|
|
||||||
let span = tracing::info_span!("Awaiting transaction receipt", ?transaction_hash);
|
|
||||||
let _guard = span.enter();
|
|
||||||
|
|
||||||
// The following is a fix for the "transaction indexing is in progress" error that we
|
|
||||||
// used to get. You can find more information on this in the following GH issue in geth
|
|
||||||
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
|
|
||||||
// before we can get the receipt of the transaction it needs to have been indexed by the
|
|
||||||
// node's indexer. Just because the transaction has been confirmed it doesn't mean that
|
|
||||||
// it has been indexed. When we call alloy's `get_receipt` it checks if the transaction
|
|
||||||
// was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method
|
|
||||||
// which _might_ return the above error if the tx has not yet been indexed yet. So, we
|
|
||||||
// need to implement a retry mechanism for the receipt to keep retrying to get it until
|
|
||||||
// it eventually works, but we only do that if the error we get back is the "transaction
|
|
||||||
// indexing is in progress" error or if the receipt is None.
|
|
||||||
//
|
|
||||||
// At the moment we do not allow for the 60 seconds to be modified and we take it as
|
|
||||||
// being an implementation detail that's invisible to anything outside of this module.
|
|
||||||
//
|
|
||||||
// We allow a total of 60 retries for getting the receipt with one second between each
|
|
||||||
// retry and the next which means that we allow for a total of 60 seconds of waiting
|
|
||||||
// before we consider that we're unable to get the transaction receipt.
|
|
||||||
let mut retries = 0;
|
|
||||||
loop {
|
|
||||||
match provider.get_transaction_receipt(*transaction_hash).await {
|
|
||||||
Ok(Some(receipt)) => {
|
|
||||||
tracing::info!("Obtained the transaction receipt");
|
|
||||||
break Ok(receipt);
|
|
||||||
}
|
|
||||||
Ok(None) => {
|
|
||||||
if retries == 60 {
|
|
||||||
tracing::error!(
|
|
||||||
"Polled for transaction receipt for 60 seconds but failed to get it"
|
|
||||||
);
|
|
||||||
break Err(anyhow::anyhow!("Failed to get the transaction receipt"));
|
|
||||||
} else {
|
|
||||||
tracing::trace!(
|
|
||||||
retries,
|
|
||||||
"Sleeping for 1 second and trying to get the receipt again"
|
|
||||||
);
|
|
||||||
retries += 1;
|
|
||||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(error) => {
|
|
||||||
let error_string = error.to_string();
|
|
||||||
if error_string.contains("transaction indexing is in progress") {
|
|
||||||
if retries == 60 {
|
|
||||||
tracing::error!(
|
|
||||||
"Polled for transaction receipt for 60 seconds but failed to get it"
|
|
||||||
);
|
|
||||||
break Err(error.into());
|
|
||||||
} else {
|
|
||||||
tracing::trace!(
|
|
||||||
retries,
|
|
||||||
"Sleeping for 1 second and trying to get the receipt again"
|
|
||||||
);
|
|
||||||
retries += 1;
|
|
||||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
break Err(error.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn trace_transaction(
|
|
||||||
&self,
|
|
||||||
transaction: TransactionReceipt,
|
|
||||||
) -> anyhow::Result<alloy::rpc::types::trace::geth::GethTrace> {
|
|
||||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
|
||||||
diff_mode: Some(true),
|
|
||||||
disable_code: None,
|
|
||||||
disable_storage: None,
|
|
||||||
});
|
|
||||||
let provider = self.provider();
|
|
||||||
|
|
||||||
BlockingExecutor::execute(async move {
|
|
||||||
Ok(provider
|
|
||||||
.await?
|
|
||||||
.debug_trace_transaction(transaction.transaction_hash, trace_options)
|
|
||||||
.await?)
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn state_diff(
|
|
||||||
&self,
|
|
||||||
transaction: alloy::rpc::types::TransactionReceipt,
|
|
||||||
) -> anyhow::Result<DiffMode> {
|
|
||||||
match self
|
|
||||||
.trace_transaction(transaction)?
|
|
||||||
.try_into_pre_state_frame()?
|
|
||||||
{
|
|
||||||
PreStateFrame::Diff(diff) => Ok(diff),
|
|
||||||
_ => anyhow::bail!("expected a diff mode trace"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn fetch_add_nonce(&self, address: Address) -> anyhow::Result<u64> {
|
|
||||||
let provider = self.provider();
|
|
||||||
let onchain_nonce = BlockingExecutor::execute::<anyhow::Result<_>>(async move {
|
|
||||||
provider
|
|
||||||
.await?
|
|
||||||
.get_transaction_count(address)
|
|
||||||
.await
|
|
||||||
.map_err(Into::into)
|
|
||||||
})??;
|
|
||||||
|
|
||||||
let mut nonces = self.nonces.lock().unwrap();
|
|
||||||
let current = nonces.entry(address).or_insert(onchain_nonce);
|
|
||||||
let value = *current;
|
|
||||||
*current += 1;
|
|
||||||
Ok(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn chain_id(&self) -> anyhow::Result<alloy::primitives::ChainId> {
|
|
||||||
let provider = self.provider();
|
|
||||||
BlockingExecutor::execute(async move {
|
|
||||||
provider.await?.get_chain_id().await.map_err(Into::into)
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result<u128> {
|
|
||||||
let provider = self.provider();
|
|
||||||
BlockingExecutor::execute(async move {
|
|
||||||
provider
|
|
||||||
.await?
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await?
|
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
|
||||||
.map(|block| block.header.gas_limit as _)
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result<Address> {
|
|
||||||
let provider = self.provider();
|
|
||||||
BlockingExecutor::execute(async move {
|
|
||||||
provider
|
|
||||||
.await?
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await?
|
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
|
||||||
.map(|block| block.header.beneficiary)
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result<U256> {
|
|
||||||
let provider = self.provider();
|
|
||||||
BlockingExecutor::execute(async move {
|
|
||||||
provider
|
|
||||||
.await?
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await?
|
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
|
||||||
.map(|block| block.header.difficulty)
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockHash> {
|
|
||||||
let provider = self.provider();
|
|
||||||
BlockingExecutor::execute(async move {
|
|
||||||
provider
|
|
||||||
.await?
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await?
|
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
|
||||||
.map(|block| block.header.hash)
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result<BlockTimestamp> {
|
|
||||||
let provider = self.provider();
|
|
||||||
BlockingExecutor::execute(async move {
|
|
||||||
provider
|
|
||||||
.await?
|
|
||||||
.get_block_by_number(number)
|
|
||||||
.await?
|
|
||||||
.ok_or(anyhow::Error::msg("Blockchain has no blocks"))
|
|
||||||
.map(|block| block.header.timestamp)
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn last_block_number(&self) -> anyhow::Result<BlockNumber> {
|
|
||||||
let provider = self.provider();
|
|
||||||
BlockingExecutor::execute(async move {
|
|
||||||
provider.await?.get_block_number().await.map_err(Into::into)
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Node for Instance {
|
|
||||||
fn new(config: &Arguments) -> Self {
|
|
||||||
let geth_directory = config.directory().join(Self::BASE_DIRECTORY);
|
|
||||||
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
|
||||||
let base_directory = geth_directory.join(id.to_string());
|
|
||||||
|
|
||||||
Self {
|
|
||||||
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
|
|
||||||
data_directory: base_directory.join(Self::DATA_DIRECTORY),
|
|
||||||
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
|
|
||||||
base_directory,
|
|
||||||
geth: config.geth.clone(),
|
|
||||||
id,
|
|
||||||
handle: None,
|
|
||||||
network_id: config.network_id,
|
|
||||||
start_timeout: config.geth_start_timeout,
|
|
||||||
wallet: config.wallet(),
|
|
||||||
nonces: Mutex::new(HashMap::new()),
|
|
||||||
// We know that we only need to be storing 2 files so we can specify that when creating
|
|
||||||
// the vector. It's the stdout and stderr of the geth node.
|
|
||||||
logs_file_to_flush: Vec::with_capacity(2),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn connection_string(&self) -> String {
|
|
||||||
self.connection_string.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
|
||||||
// Terminate the processes in a graceful manner to allow for the output to be flushed.
|
|
||||||
if let Some(mut child) = self.handle.take() {
|
|
||||||
child
|
|
||||||
.kill()
|
|
||||||
.map_err(|error| anyhow::anyhow!("Failed to kill the geth process: {error:?}"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flushing the files that we're using for keeping the logs before shutdown.
|
|
||||||
for file in self.logs_file_to_flush.iter_mut() {
|
|
||||||
file.flush()?
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the node's database so that subsequent runs do not run on the same database. We
|
|
||||||
// ignore the error just in case the directory didn't exist in the first place and therefore
|
|
||||||
// there's nothing to be deleted.
|
|
||||||
let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn spawn(&mut self, genesis: String) -> anyhow::Result<()> {
|
|
||||||
self.init(genesis)?.spawn_process()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn version(&self) -> anyhow::Result<String> {
|
|
||||||
let output = Command::new(&self.geth)
|
|
||||||
.arg("--version")
|
|
||||||
.stdin(Stdio::null())
|
|
||||||
.stdout(Stdio::piped())
|
|
||||||
.stderr(Stdio::null())
|
|
||||||
.spawn()?
|
|
||||||
.wait_with_output()?
|
|
||||||
.stdout;
|
|
||||||
Ok(String::from_utf8_lossy(&output).into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for Instance {
|
|
||||||
#[tracing::instrument(skip_all, fields(geth_node_id = self.id))]
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.shutdown().expect("Failed to shutdown")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use revive_dt_config::Arguments;
|
|
||||||
use temp_dir::TempDir;
|
|
||||||
|
|
||||||
use crate::{GENESIS_JSON, Node};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
fn test_config() -> (Arguments, TempDir) {
|
|
||||||
let mut config = Arguments::default();
|
|
||||||
let temp_dir = TempDir::new().unwrap();
|
|
||||||
config.working_directory = temp_dir.path().to_path_buf().into();
|
|
||||||
|
|
||||||
(config, temp_dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_node() -> (Instance, TempDir) {
|
|
||||||
let (args, temp_dir) = test_config();
|
|
||||||
let mut node = Instance::new(&args);
|
|
||||||
node.init(GENESIS_JSON.to_owned())
|
|
||||||
.expect("Failed to initialize the node")
|
|
||||||
.spawn_process()
|
|
||||||
.expect("Failed to spawn the node process");
|
|
||||||
(node, temp_dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn init_works() {
|
|
||||||
Instance::new(&test_config().0)
|
|
||||||
.init(GENESIS_JSON.to_string())
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn spawn_works() {
|
|
||||||
Instance::new(&test_config().0)
|
|
||||||
.spawn(GENESIS_JSON.to_string())
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn version_works() {
|
|
||||||
let version = Instance::new(&test_config().0).version().unwrap();
|
|
||||||
assert!(
|
|
||||||
version.starts_with("geth version"),
|
|
||||||
"expected version string, got: '{version}'"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_get_chain_id_from_node() {
|
|
||||||
// Arrange
|
|
||||||
let (node, _temp_dir) = new_node();
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let chain_id = node.chain_id();
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let chain_id = chain_id.expect("Failed to get the chain id");
|
|
||||||
assert_eq!(chain_id, 420_420_420);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_get_gas_limit_from_node() {
|
|
||||||
// Arrange
|
|
||||||
let (node, _temp_dir) = new_node();
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let gas_limit = gas_limit.expect("Failed to get the gas limit");
|
|
||||||
assert_eq!(gas_limit, u32::MAX as u128)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_get_coinbase_from_node() {
|
|
||||||
// Arrange
|
|
||||||
let (node, _temp_dir) = new_node();
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let coinbase = node.block_coinbase(BlockNumberOrTag::Latest);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let coinbase = coinbase.expect("Failed to get the coinbase");
|
|
||||||
assert_eq!(coinbase, Address::new([0xFF; 20]))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_get_block_difficulty_from_node() {
|
|
||||||
// Arrange
|
|
||||||
let (node, _temp_dir) = new_node();
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let block_difficulty = block_difficulty.expect("Failed to get the block difficulty");
|
|
||||||
assert_eq!(block_difficulty, U256::ZERO)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_get_block_hash_from_node() {
|
|
||||||
// Arrange
|
|
||||||
let (node, _temp_dir) = new_node();
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let block_hash = node.block_hash(BlockNumberOrTag::Latest);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let _ = block_hash.expect("Failed to get the block hash");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_get_block_timestamp_from_node() {
|
|
||||||
// Arrange
|
|
||||||
let (node, _temp_dir) = new_node();
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest);
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let _ = block_timestamp.expect("Failed to get the block timestamp");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_get_block_number_from_node() {
|
|
||||||
// Arrange
|
|
||||||
let (node, _temp_dir) = new_node();
|
|
||||||
|
|
||||||
// Act
|
|
||||||
let block_number = node.last_block_number();
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let block_number = block_number.expect("Failed to get the block number");
|
|
||||||
assert_eq!(block_number, 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
mod process;
|
||||||
|
|
||||||
|
pub use process::*;
|
||||||
@@ -0,0 +1,191 @@
|
|||||||
|
use std::{
|
||||||
|
fs::{File, OpenOptions},
|
||||||
|
io::{BufRead, BufReader, Write},
|
||||||
|
path::Path,
|
||||||
|
process::{Child, Command},
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::{Context, Result, bail};
|
||||||
|
|
||||||
|
/// A wrapper around processes which allows for their stdout and stderr to be logged and flushed
|
||||||
|
/// when the process is dropped.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Process {
|
||||||
|
/// The handle of the child process.
|
||||||
|
child: Child,
|
||||||
|
|
||||||
|
/// The file that stdout is being logged to.
|
||||||
|
stdout_logs_file: File,
|
||||||
|
|
||||||
|
/// The file that stderr is being logged to.
|
||||||
|
stderr_logs_file: File,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Process {
|
||||||
|
pub fn new(
|
||||||
|
log_file_prefix: impl Into<Option<&'static str>>,
|
||||||
|
logs_directory: impl AsRef<Path>,
|
||||||
|
binary_path: impl AsRef<Path>,
|
||||||
|
command_building_callback: impl FnOnce(&mut Command, File, File),
|
||||||
|
process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let log_file_prefix = log_file_prefix.into();
|
||||||
|
|
||||||
|
let (stdout_file_name, stderr_file_name) = match log_file_prefix {
|
||||||
|
Some(prefix) => (
|
||||||
|
format!("{prefix}_stdout.log"),
|
||||||
|
format!("{prefix}_stderr.log"),
|
||||||
|
),
|
||||||
|
None => ("stdout.log".to_string(), "stderr.log".to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
|
||||||
|
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
|
||||||
|
|
||||||
|
let stdout_logs_file = OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.truncate(true)
|
||||||
|
.create(true)
|
||||||
|
.open(stdout_logs_file_path.as_path())
|
||||||
|
.context("Failed to open the stdout logs file")?;
|
||||||
|
let stderr_logs_file = OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.truncate(true)
|
||||||
|
.create(true)
|
||||||
|
.open(stderr_logs_file_path.as_path())
|
||||||
|
.context("Failed to open the stderr logs file")?;
|
||||||
|
|
||||||
|
let mut command = {
|
||||||
|
let stdout_logs_file = stdout_logs_file
|
||||||
|
.try_clone()
|
||||||
|
.context("Failed to clone the stdout logs file")?;
|
||||||
|
let stderr_logs_file = stderr_logs_file
|
||||||
|
.try_clone()
|
||||||
|
.context("Failed to clone the stderr logs file")?;
|
||||||
|
|
||||||
|
let mut command = Command::new(binary_path.as_ref());
|
||||||
|
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
|
||||||
|
command
|
||||||
|
};
|
||||||
|
let mut child = command
|
||||||
|
.spawn()
|
||||||
|
.context("Failed to spawn the built command")?;
|
||||||
|
|
||||||
|
match process_readiness_wait_behavior {
|
||||||
|
ProcessReadinessWaitBehavior::NoStartupWait => {}
|
||||||
|
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
|
||||||
|
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||||
|
max_wait_duration,
|
||||||
|
mut check_function,
|
||||||
|
} => {
|
||||||
|
let spawn_time = Instant::now();
|
||||||
|
|
||||||
|
let stdout_logs_file = OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.open(stdout_logs_file_path)
|
||||||
|
.context("Failed to open the stdout logs file")?;
|
||||||
|
let stderr_logs_file = OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.open(stderr_logs_file_path)
|
||||||
|
.context("Failed to open the stderr logs file")?;
|
||||||
|
|
||||||
|
let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
|
||||||
|
let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
|
||||||
|
|
||||||
|
let mut stdout = String::new();
|
||||||
|
let mut stderr = String::new();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let stdout_line = stdout_lines.next().and_then(Result::ok);
|
||||||
|
let stderr_line = stderr_lines.next().and_then(Result::ok);
|
||||||
|
|
||||||
|
if let Some(stdout_line) = stdout_line.as_ref() {
|
||||||
|
stdout.push_str(stdout_line);
|
||||||
|
stdout.push('\n');
|
||||||
|
}
|
||||||
|
if let Some(stderr_line) = stderr_line.as_ref() {
|
||||||
|
stderr.push_str(stderr_line);
|
||||||
|
stderr.push('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
let check_result =
|
||||||
|
check_function(stdout_line.as_deref(), stderr_line.as_deref()).context(
|
||||||
|
format!(
|
||||||
|
"Failed to wait for the process to be ready - {stdout} - {stderr}"
|
||||||
|
),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if check_result {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if Instant::now().duration_since(spawn_time) > max_wait_duration {
|
||||||
|
bail!(
|
||||||
|
"Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
|
||||||
|
if !child
|
||||||
|
.wait()
|
||||||
|
.context("Failed waiting for process to finish")?
|
||||||
|
.success()
|
||||||
|
{
|
||||||
|
anyhow::bail!("Failed to spawn command");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
child,
|
||||||
|
stdout_logs_file,
|
||||||
|
stderr_logs_file,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for Process {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.child.kill().expect("Failed to kill the process");
|
||||||
|
self.stdout_logs_file
|
||||||
|
.flush()
|
||||||
|
.expect("Failed to flush the stdout logs file");
|
||||||
|
self.stderr_logs_file
|
||||||
|
.flush()
|
||||||
|
.expect("Failed to flush the stderr logs file");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum ProcessReadinessWaitBehavior {
|
||||||
|
/// The process does not require any kind of wait after it's been spawned and can be used
|
||||||
|
/// straight away.
|
||||||
|
NoStartupWait,
|
||||||
|
|
||||||
|
/// Waits for the command to exit.
|
||||||
|
WaitForCommandToExit,
|
||||||
|
|
||||||
|
/// The process does require some amount of wait duration after it's been started.
|
||||||
|
WaitDuration(Duration),
|
||||||
|
|
||||||
|
/// The process requires a time bounded wait function which is a function of the lines that
|
||||||
|
/// appear in the log files.
|
||||||
|
TimeBoundedWaitFunction {
|
||||||
|
/// The maximum amount of time to wait for the check function to return true.
|
||||||
|
max_wait_duration: Duration,
|
||||||
|
|
||||||
|
/// The function to use to check if the process spawned is ready to use or not. This
|
||||||
|
/// function should return the following in the following cases:
|
||||||
|
///
|
||||||
|
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
|
||||||
|
/// and the wait is completed.
|
||||||
|
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
|
||||||
|
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears
|
||||||
|
/// that it has encountered an error when it was being spawned.
|
||||||
|
///
|
||||||
|
/// The first argument is a line from stdout and the second argument is a line from stderr.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
|
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
|
||||||
|
},
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
+6
-14
@@ -1,33 +1,25 @@
|
|||||||
//! This crate implements the testing nodes.
|
//! This crate implements the testing nodes.
|
||||||
|
|
||||||
use revive_dt_config::Arguments;
|
use alloy::genesis::Genesis;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
|
||||||
pub mod geth;
|
pub mod constants;
|
||||||
pub mod kitchensink;
|
pub mod helpers;
|
||||||
pub mod pool;
|
pub mod node_implementations;
|
||||||
|
pub mod provider_utils;
|
||||||
/// The default genesis configuration.
|
|
||||||
pub const GENESIS_JSON: &str = include_str!("../../../genesis.json");
|
|
||||||
|
|
||||||
/// An abstract interface for testing nodes.
|
/// An abstract interface for testing nodes.
|
||||||
pub trait Node: EthereumNode {
|
pub trait Node: EthereumNode {
|
||||||
/// Create a new uninitialized instance.
|
|
||||||
fn new(config: &Arguments) -> Self;
|
|
||||||
|
|
||||||
/// Spawns a node configured according to the genesis json.
|
/// Spawns a node configured according to the genesis json.
|
||||||
///
|
///
|
||||||
/// Blocking until it's ready to accept transactions.
|
/// Blocking until it's ready to accept transactions.
|
||||||
fn spawn(&mut self, genesis: String) -> anyhow::Result<()>;
|
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
|
||||||
|
|
||||||
/// Prune the node instance and related data.
|
/// Prune the node instance and related data.
|
||||||
///
|
///
|
||||||
/// Blocking until it's completely stopped.
|
/// Blocking until it's completely stopped.
|
||||||
fn shutdown(&mut self) -> anyhow::Result<()>;
|
fn shutdown(&mut self) -> anyhow::Result<()>;
|
||||||
|
|
||||||
/// Returns the nodes connection string.
|
|
||||||
fn connection_string(&self) -> String;
|
|
||||||
|
|
||||||
/// Returns the node version.
|
/// Returns the node version.
|
||||||
fn version(&self) -> anyhow::Result<String>;
|
fn version(&self) -> anyhow::Result<String>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,841 @@
|
|||||||
|
//! The go-ethereum node implementation.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
fs::{File, create_dir_all, remove_dir_all},
|
||||||
|
io::Read,
|
||||||
|
path::PathBuf,
|
||||||
|
pin::Pin,
|
||||||
|
process::{Command, Stdio},
|
||||||
|
sync::{
|
||||||
|
Arc,
|
||||||
|
atomic::{AtomicU32, Ordering},
|
||||||
|
},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
eips::BlockNumberOrTag,
|
||||||
|
genesis::{Genesis, GenesisAccount},
|
||||||
|
network::{Ethereum, EthereumWallet, NetworkWallet},
|
||||||
|
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||||
|
providers::{
|
||||||
|
Provider,
|
||||||
|
ext::DebugApi,
|
||||||
|
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
|
||||||
|
},
|
||||||
|
rpc::types::{
|
||||||
|
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
||||||
|
trace::geth::{
|
||||||
|
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use futures::{FutureExt, Stream, StreamExt};
|
||||||
|
use revive_common::EVMVersion;
|
||||||
|
use tokio::sync::OnceCell;
|
||||||
|
use tracing::{error, instrument};
|
||||||
|
|
||||||
|
use revive_dt_common::fs::clear_directory;
|
||||||
|
use revive_dt_config::*;
|
||||||
|
use revive_dt_format::traits::ResolverApi;
|
||||||
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
use revive_dt_report::{EthereumMinedBlockInformation, MinedBlockInformation};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Node,
|
||||||
|
constants::{CHAIN_ID, INITIAL_BALANCE},
|
||||||
|
helpers::{Process, ProcessReadinessWaitBehavior},
|
||||||
|
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
|
||||||
|
};
|
||||||
|
|
||||||
|
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||||
|
|
||||||
|
/// The go-ethereum node instance implementation.
|
||||||
|
///
|
||||||
|
/// Implements helpers to initialize, spawn and wait the node.
|
||||||
|
///
|
||||||
|
/// Assumes dev mode and IPC only (`P2P`, `http`` etc. are kept disabled).
|
||||||
|
///
|
||||||
|
/// Prunes the child process and the base directory on drop.
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
|
pub struct GethNode {
|
||||||
|
connection_string: String,
|
||||||
|
base_directory: PathBuf,
|
||||||
|
data_directory: PathBuf,
|
||||||
|
logs_directory: PathBuf,
|
||||||
|
geth: PathBuf,
|
||||||
|
id: u32,
|
||||||
|
handle: Option<Process>,
|
||||||
|
start_timeout: Duration,
|
||||||
|
wallet: Arc<EthereumWallet>,
|
||||||
|
nonce_manager: CachedNonceManager,
|
||||||
|
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||||
|
use_fallback_gas_filler: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GethNode {
|
||||||
|
const BASE_DIRECTORY: &str = "geth";
|
||||||
|
const DATA_DIRECTORY: &str = "data";
|
||||||
|
const LOGS_DIRECTORY: &str = "logs";
|
||||||
|
|
||||||
|
const IPC_FILE: &str = "geth.ipc";
|
||||||
|
const GENESIS_JSON_FILE: &str = "genesis.json";
|
||||||
|
|
||||||
|
const READY_MARKER: &str = "IPC endpoint opened";
|
||||||
|
const ERROR_MARKER: &str = "Fatal:";
|
||||||
|
|
||||||
|
pub fn new(
|
||||||
|
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||||
|
+ AsRef<WalletConfiguration>
|
||||||
|
+ AsRef<GethConfiguration>
|
||||||
|
+ Clone,
|
||||||
|
use_fallback_gas_filler: bool,
|
||||||
|
) -> Self {
|
||||||
|
let working_directory_configuration =
|
||||||
|
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||||
|
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
|
||||||
|
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
|
let geth_directory = working_directory_configuration
|
||||||
|
.as_path()
|
||||||
|
.join(Self::BASE_DIRECTORY);
|
||||||
|
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||||
|
let base_directory = geth_directory.join(id.to_string());
|
||||||
|
|
||||||
|
let wallet = wallet_configuration.wallet();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
connection_string: base_directory.join(Self::IPC_FILE).display().to_string(),
|
||||||
|
data_directory: base_directory.join(Self::DATA_DIRECTORY),
|
||||||
|
logs_directory: base_directory.join(Self::LOGS_DIRECTORY),
|
||||||
|
base_directory,
|
||||||
|
geth: geth_configuration.path.clone(),
|
||||||
|
id,
|
||||||
|
handle: None,
|
||||||
|
start_timeout: geth_configuration.start_timeout_ms,
|
||||||
|
wallet: wallet.clone(),
|
||||||
|
nonce_manager: Default::default(),
|
||||||
|
provider: Default::default(),
|
||||||
|
use_fallback_gas_filler,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create the node directory and call `geth init` to configure the genesis.
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn init(&mut self, genesis: Genesis) -> anyhow::Result<&mut Self> {
|
||||||
|
let _ = clear_directory(&self.base_directory);
|
||||||
|
let _ = clear_directory(&self.logs_directory);
|
||||||
|
|
||||||
|
create_dir_all(&self.base_directory)
|
||||||
|
.context("Failed to create base directory for geth node")?;
|
||||||
|
create_dir_all(&self.logs_directory)
|
||||||
|
.context("Failed to create logs directory for geth node")?;
|
||||||
|
|
||||||
|
let genesis = Self::node_genesis(genesis, self.wallet.as_ref());
|
||||||
|
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
|
||||||
|
serde_json::to_writer(
|
||||||
|
File::create(&genesis_path).context("Failed to create geth genesis file")?,
|
||||||
|
&genesis,
|
||||||
|
)
|
||||||
|
.context("Failed to serialize geth genesis JSON to file")?;
|
||||||
|
|
||||||
|
let mut child = Command::new(&self.geth)
|
||||||
|
.arg("--state.scheme")
|
||||||
|
.arg("hash")
|
||||||
|
.arg("init")
|
||||||
|
.arg("--datadir")
|
||||||
|
.arg(&self.data_directory)
|
||||||
|
.arg(genesis_path)
|
||||||
|
.stderr(Stdio::piped())
|
||||||
|
.stdout(Stdio::null())
|
||||||
|
.spawn()
|
||||||
|
.context("Failed to spawn geth --init process")?;
|
||||||
|
|
||||||
|
let mut stderr = String::new();
|
||||||
|
child
|
||||||
|
.stderr
|
||||||
|
.take()
|
||||||
|
.expect("should be piped")
|
||||||
|
.read_to_string(&mut stderr)
|
||||||
|
.context("Failed to read geth --init stderr")?;
|
||||||
|
|
||||||
|
if !child
|
||||||
|
.wait()
|
||||||
|
.context("Failed waiting for geth --init process to finish")?
|
||||||
|
.success()
|
||||||
|
{
|
||||||
|
anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn the go-ethereum node child process.
|
||||||
|
///
|
||||||
|
/// [Instance::init] must be called prior.
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
|
||||||
|
let process = Process::new(
|
||||||
|
None,
|
||||||
|
self.logs_directory.as_path(),
|
||||||
|
self.geth.as_path(),
|
||||||
|
|command, stdout_file, stderr_file| {
|
||||||
|
command
|
||||||
|
.arg("--dev")
|
||||||
|
.arg("--datadir")
|
||||||
|
.arg(&self.data_directory)
|
||||||
|
.arg("--ipcpath")
|
||||||
|
.arg(&self.connection_string)
|
||||||
|
.arg("--nodiscover")
|
||||||
|
.arg("--maxpeers")
|
||||||
|
.arg("0")
|
||||||
|
.arg("--txlookuplimit")
|
||||||
|
.arg("0")
|
||||||
|
.arg("--cache.blocklogs")
|
||||||
|
.arg("512")
|
||||||
|
.arg("--state.scheme")
|
||||||
|
.arg("hash")
|
||||||
|
.arg("--syncmode")
|
||||||
|
.arg("full")
|
||||||
|
.arg("--gcmode")
|
||||||
|
.arg("archive")
|
||||||
|
.stderr(stderr_file)
|
||||||
|
.stdout(stdout_file);
|
||||||
|
},
|
||||||
|
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||||
|
max_wait_duration: self.start_timeout,
|
||||||
|
check_function: Box::new(|_, stderr_line| match stderr_line {
|
||||||
|
Some(line) => {
|
||||||
|
if line.contains(Self::ERROR_MARKER) {
|
||||||
|
anyhow::bail!("Failed to start geth {line}");
|
||||||
|
} else if line.contains(Self::READY_MARKER) {
|
||||||
|
Ok(true)
|
||||||
|
} else {
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => Ok(false),
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
match process {
|
||||||
|
Ok(process) => self.handle = Some(process),
|
||||||
|
Err(err) => {
|
||||||
|
error!(?err, "Failed to start geth, shutting down gracefully");
|
||||||
|
self.shutdown()
|
||||||
|
.context("Failed to gracefully shutdown after geth start error")?;
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
|
||||||
|
self.provider
|
||||||
|
.get_or_try_init(|| async move {
|
||||||
|
construct_concurrency_limited_provider::<Ethereum, _>(
|
||||||
|
self.connection_string.as_str(),
|
||||||
|
FallbackGasFiller::default()
|
||||||
|
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
||||||
|
ChainIdFiller::new(Some(CHAIN_ID)),
|
||||||
|
NonceFiller::new(self.nonce_manager.clone()),
|
||||||
|
self.wallet.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context("Failed to construct the provider")
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
|
||||||
|
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
|
||||||
|
genesis
|
||||||
|
.alloc
|
||||||
|
.entry(signer_address)
|
||||||
|
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
|
||||||
|
}
|
||||||
|
genesis
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EthereumNode for GethNode {
|
||||||
|
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
|
||||||
|
Box::pin(async move { Ok(()) })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn id(&self) -> usize {
|
||||||
|
self.id as _
|
||||||
|
}
|
||||||
|
|
||||||
|
fn connection_string(&self) -> &str {
|
||||||
|
&self.connection_string
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(
|
||||||
|
level = "info",
|
||||||
|
skip_all,
|
||||||
|
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||||
|
err,
|
||||||
|
)]
|
||||||
|
fn submit_transaction(
|
||||||
|
&self,
|
||||||
|
transaction: TransactionRequest,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let provider = self
|
||||||
|
.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create the provider for transaction submission")?;
|
||||||
|
let pending_transaction = provider
|
||||||
|
.send_transaction(transaction)
|
||||||
|
.await
|
||||||
|
.context("Failed to submit the transaction through the provider")?;
|
||||||
|
Ok(*pending_transaction.tx_hash())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(
|
||||||
|
level = "info",
|
||||||
|
skip_all,
|
||||||
|
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||||
|
err,
|
||||||
|
)]
|
||||||
|
fn get_receipt(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create provider for getting the receipt")?
|
||||||
|
.get_transaction_receipt(tx_hash)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the receipt of the transaction")?
|
||||||
|
.context("Failed to get the receipt of the transaction")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(
|
||||||
|
level = "info",
|
||||||
|
skip_all,
|
||||||
|
fields(geth_node_id = self.id, connection_string = self.connection_string),
|
||||||
|
err,
|
||||||
|
)]
|
||||||
|
fn execute_transaction(
|
||||||
|
&self,
|
||||||
|
transaction: TransactionRequest,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create provider for transaction submission")?
|
||||||
|
.send_transaction(transaction)
|
||||||
|
.await
|
||||||
|
.context("Encountered an error when submitting a transaction")?
|
||||||
|
.get_receipt()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the receipt for the transaction")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn trace_transaction(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
trace_options: GethDebugTracingOptions,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create provider for tracing")?
|
||||||
|
.debug_trace_transaction(tx_hash, trace_options)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the transaction trace")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn state_diff(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<DiffMode>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||||
|
diff_mode: Some(true),
|
||||||
|
disable_code: None,
|
||||||
|
disable_storage: None,
|
||||||
|
});
|
||||||
|
match self
|
||||||
|
.trace_transaction(tx_hash, trace_options)
|
||||||
|
.await
|
||||||
|
.context("Failed to trace transaction for prestate diff")?
|
||||||
|
.try_into_pre_state_frame()
|
||||||
|
.context("Failed to convert trace into pre-state frame")?
|
||||||
|
{
|
||||||
|
PreStateFrame::Diff(diff) => Ok(diff),
|
||||||
|
_ => anyhow::bail!("expected a diff mode trace"),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn balance_of(
|
||||||
|
&self,
|
||||||
|
address: Address,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
|
.get_balance(address)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn latest_state_proof(
|
||||||
|
&self,
|
||||||
|
address: Address,
|
||||||
|
keys: Vec<StorageKey>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the Geth provider")?
|
||||||
|
.get_proof(address, keys)
|
||||||
|
.latest()
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn resolver(
|
||||||
|
&self,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let id = self.id;
|
||||||
|
let provider = self.provider().await?;
|
||||||
|
Ok(Arc::new(GethNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn evm_version(&self) -> EVMVersion {
|
||||||
|
EVMVersion::Cancun
|
||||||
|
}
|
||||||
|
|
||||||
|
fn subscribe_to_full_blocks_information(
|
||||||
|
&self,
|
||||||
|
) -> Pin<
|
||||||
|
Box<
|
||||||
|
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
||||||
|
+ '_,
|
||||||
|
>,
|
||||||
|
> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let provider = self
|
||||||
|
.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create the provider for block subscription")?;
|
||||||
|
let block_subscription = provider.subscribe_full_blocks();
|
||||||
|
let block_stream = block_subscription
|
||||||
|
.into_stream()
|
||||||
|
.await
|
||||||
|
.context("Failed to create the block stream")?;
|
||||||
|
|
||||||
|
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
||||||
|
let block = block.ok()?;
|
||||||
|
Some(MinedBlockInformation {
|
||||||
|
ethereum_block_information: EthereumMinedBlockInformation {
|
||||||
|
block_number: block.number(),
|
||||||
|
block_timestamp: block.header.timestamp,
|
||||||
|
mined_gas: block.header.gas_used as _,
|
||||||
|
block_gas_limit: block.header.gas_limit as _,
|
||||||
|
transaction_hashes: block
|
||||||
|
.transactions
|
||||||
|
.into_hashes()
|
||||||
|
.as_hashes()
|
||||||
|
.expect("Must be hashes")
|
||||||
|
.to_vec(),
|
||||||
|
},
|
||||||
|
substrate_block_information: None,
|
||||||
|
tx_counts: Default::default(),
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Box::pin(mined_block_information_stream)
|
||||||
|
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn provider(
|
||||||
|
&self,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
|
||||||
|
{
|
||||||
|
Box::pin(
|
||||||
|
self.provider()
|
||||||
|
.map(|provider| provider.map(|provider| provider.erased())),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct GethNodeResolver {
|
||||||
|
id: u32,
|
||||||
|
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResolverApi for GethNodeResolver {
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn chain_id(
|
||||||
|
&self,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
|
||||||
|
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn transaction_gas_price(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_transaction_receipt(tx_hash)
|
||||||
|
.await?
|
||||||
|
.context("Failed to get the transaction receipt")
|
||||||
|
.map(|receipt| receipt.effective_gas_price)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn block_gas_limit(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
|
.map(|block| block.header.gas_limit as _)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn block_coinbase(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Address>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
|
.map(|block| block.header.beneficiary)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn block_difficulty(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
|
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn block_base_fee(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<u64>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
|
.and_then(|block| {
|
||||||
|
block
|
||||||
|
.header
|
||||||
|
.base_fee_per_gas
|
||||||
|
.context("Failed to get the base fee per gas")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn block_hash(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockHash>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
|
.map(|block| block.header.hash)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn block_timestamp(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockTimestamp>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the geth block")?
|
||||||
|
.context("Failed to get the Geth block, perhaps there are no blocks?")
|
||||||
|
.map(|block| block.header.timestamp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
|
||||||
|
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Node for GethNode {
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||||
|
drop(self.handle.take());
|
||||||
|
|
||||||
|
// Remove the node's database so that subsequent runs do not run on the same database. We
|
||||||
|
// ignore the error just in case the directory didn't exist in the first place and therefore
|
||||||
|
// there's nothing to be deleted.
|
||||||
|
let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||||
|
self.init(genesis)?.spawn_process()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn version(&self) -> anyhow::Result<String> {
|
||||||
|
let output = Command::new(&self.geth)
|
||||||
|
.arg("--version")
|
||||||
|
.stdin(Stdio::null())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.stderr(Stdio::null())
|
||||||
|
.spawn()
|
||||||
|
.context("Failed to spawn geth --version process")?
|
||||||
|
.wait_with_output()
|
||||||
|
.context("Failed to wait for geth --version output")?
|
||||||
|
.stdout;
|
||||||
|
Ok(String::from_utf8_lossy(&output).into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for GethNode {
|
||||||
|
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.shutdown().expect("Failed to shutdown")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn test_config() -> TestExecutionContext {
|
||||||
|
TestExecutionContext::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node() -> (TestExecutionContext, GethNode) {
|
||||||
|
let context = test_config();
|
||||||
|
let mut node = GethNode::new(&context, true);
|
||||||
|
node.init(context.genesis_configuration.genesis().unwrap().clone())
|
||||||
|
.expect("Failed to initialize the node")
|
||||||
|
.spawn_process()
|
||||||
|
.expect("Failed to spawn the node process");
|
||||||
|
(context, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn shared_state() -> &'static (TestExecutionContext, GethNode) {
|
||||||
|
static STATE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node);
|
||||||
|
&STATE
|
||||||
|
}
|
||||||
|
|
||||||
|
fn shared_node() -> &'static GethNode {
|
||||||
|
&shared_state().1
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
||||||
|
// Arrange
|
||||||
|
let (context, node) = shared_state();
|
||||||
|
|
||||||
|
let account_address = context
|
||||||
|
.wallet_configuration
|
||||||
|
.wallet()
|
||||||
|
.default_signer()
|
||||||
|
.address();
|
||||||
|
let transaction = TransactionRequest::default()
|
||||||
|
.to(account_address)
|
||||||
|
.value(U256::from(100_000_000_000_000u128));
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let receipt = node.execute_transaction(transaction).await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let _ = receipt.expect("Failed to get the receipt for the transfer");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
|
fn version_works() {
|
||||||
|
// Arrange
|
||||||
|
let node = shared_node();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let version = node.version();
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let version = version.expect("Failed to get the version");
|
||||||
|
assert!(
|
||||||
|
version.starts_with("geth version"),
|
||||||
|
"expected version string, got: '{version}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
|
async fn can_get_chain_id_from_node() {
|
||||||
|
// Arrange
|
||||||
|
let node = shared_node();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let chain_id = node.resolver().await.unwrap().chain_id().await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let chain_id = chain_id.expect("Failed to get the chain id");
|
||||||
|
assert_eq!(chain_id, 420_420_420);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
|
async fn can_get_gas_limit_from_node() {
|
||||||
|
// Arrange
|
||||||
|
let node = shared_node();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let gas_limit = node
|
||||||
|
.resolver()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.block_gas_limit(BlockNumberOrTag::Latest)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let _ = gas_limit.expect("Failed to get the gas limit");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
|
async fn can_get_coinbase_from_node() {
|
||||||
|
// Arrange
|
||||||
|
let node = shared_node();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let coinbase = node
|
||||||
|
.resolver()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.block_coinbase(BlockNumberOrTag::Latest)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let _ = coinbase.expect("Failed to get the coinbase");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
|
async fn can_get_block_difficulty_from_node() {
|
||||||
|
// Arrange
|
||||||
|
let node = shared_node();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let block_difficulty = node
|
||||||
|
.resolver()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.block_difficulty(BlockNumberOrTag::Latest)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let _ = block_difficulty.expect("Failed to get the block difficulty");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
|
async fn can_get_block_hash_from_node() {
|
||||||
|
// Arrange
|
||||||
|
let node = shared_node();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let block_hash = node
|
||||||
|
.resolver()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.block_hash(BlockNumberOrTag::Latest)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let _ = block_hash.expect("Failed to get the block hash");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
|
async fn can_get_block_timestamp_from_node() {
|
||||||
|
// Arrange
|
||||||
|
let node = shared_node();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let block_timestamp = node
|
||||||
|
.resolver()
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.block_timestamp(BlockNumberOrTag::Latest)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let _ = block_timestamp.expect("Failed to get the block timestamp");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
|
async fn can_get_block_number_from_node() {
|
||||||
|
// Arrange
|
||||||
|
let node = shared_node();
|
||||||
|
|
||||||
|
// Act
|
||||||
|
let block_number = node.resolver().await.unwrap().last_block_number().await;
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
let _ = block_number.expect("Failed to get the block number");
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,5 @@
|
|||||||
|
pub mod geth;
|
||||||
|
pub mod lighthouse_geth;
|
||||||
|
pub mod polkadot_omni_node;
|
||||||
|
pub mod substrate;
|
||||||
|
pub mod zombienet;
|
||||||
@@ -0,0 +1,791 @@
|
|||||||
|
use std::{
|
||||||
|
fs::{File, create_dir_all, remove_dir_all},
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
pin::Pin,
|
||||||
|
process::{Command, Stdio},
|
||||||
|
sync::{
|
||||||
|
Arc,
|
||||||
|
atomic::{AtomicU32, Ordering},
|
||||||
|
},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
eips::BlockNumberOrTag,
|
||||||
|
genesis::Genesis,
|
||||||
|
network::{Ethereum, EthereumWallet, NetworkWallet},
|
||||||
|
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||||
|
providers::{
|
||||||
|
Provider,
|
||||||
|
ext::DebugApi,
|
||||||
|
fillers::{CachedNonceManager, ChainIdFiller, NonceFiller},
|
||||||
|
},
|
||||||
|
rpc::types::{
|
||||||
|
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
||||||
|
trace::geth::{
|
||||||
|
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use futures::{FutureExt, Stream, StreamExt};
|
||||||
|
use revive_common::EVMVersion;
|
||||||
|
use revive_dt_common::fs::clear_directory;
|
||||||
|
use revive_dt_format::traits::ResolverApi;
|
||||||
|
use serde_json::json;
|
||||||
|
use sp_core::crypto::Ss58Codec;
|
||||||
|
use sp_runtime::AccountId32;
|
||||||
|
|
||||||
|
use revive_dt_config::*;
|
||||||
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
use revive_dt_report::{
|
||||||
|
EthereumMinedBlockInformation, MinedBlockInformation, SubstrateMinedBlockInformation,
|
||||||
|
};
|
||||||
|
use subxt::{OnlineClient, SubstrateConfig};
|
||||||
|
use tokio::sync::OnceCell;
|
||||||
|
use tracing::{instrument, trace};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Node,
|
||||||
|
constants::INITIAL_BALANCE,
|
||||||
|
helpers::{Process, ProcessReadinessWaitBehavior},
|
||||||
|
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
|
||||||
|
};
|
||||||
|
|
||||||
|
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||||
|
|
||||||
|
/// The number of blocks that should be cached by the polkadot-omni-node and the eth-rpc.
|
||||||
|
const NUMBER_OF_CACHED_BLOCKS: u32 = 100_000;
|
||||||
|
|
||||||
|
/// A node implementation for the polkadot-omni-node.
|
||||||
|
#[derive(Debug)]
|
||||||
|
|
||||||
|
pub struct PolkadotOmnichainNode {
|
||||||
|
/// The id of the node.
|
||||||
|
id: u32,
|
||||||
|
|
||||||
|
/// The path of the polkadot-omni-chain node binary.
|
||||||
|
polkadot_omnichain_node_binary_path: PathBuf,
|
||||||
|
/// The path of the eth-rpc binary.
|
||||||
|
eth_rpc_binary_path: PathBuf,
|
||||||
|
/// The path of the runtime's WASM that this node will be spawned with.
|
||||||
|
chain_spec_path: Option<PathBuf>,
|
||||||
|
/// The path of the base directory which contains all of the stored data for this node.
|
||||||
|
base_directory_path: PathBuf,
|
||||||
|
/// The path of the logs directory which contains all of the stored logs.
|
||||||
|
logs_directory_path: PathBuf,
|
||||||
|
|
||||||
|
/// Defines the amount of time to wait before considering that the node start has timed out.
|
||||||
|
node_start_timeout: Duration,
|
||||||
|
|
||||||
|
/// The id of the parachain that this node will be spawning.
|
||||||
|
parachain_id: Option<usize>,
|
||||||
|
/// The block time.
|
||||||
|
block_time: Duration,
|
||||||
|
|
||||||
|
/// The node's process.
|
||||||
|
polkadot_omnichain_node_process: Option<Process>,
|
||||||
|
/// The eth-rpc's process.
|
||||||
|
eth_rpc_process: Option<Process>,
|
||||||
|
|
||||||
|
/// The URL of the eth-rpc.
|
||||||
|
rpc_url: String,
|
||||||
|
/// The wallet object that's used to sign any transaction submitted through this node.
|
||||||
|
wallet: Arc<EthereumWallet>,
|
||||||
|
/// The nonce manager used to populate nonces for all transactions submitted through this node.
|
||||||
|
nonce_manager: CachedNonceManager,
|
||||||
|
/// The provider used for all RPC interactions with the RPC of this node.
|
||||||
|
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
|
||||||
|
|
||||||
|
/// A boolean that controls if the fallback gas filler should be used or not.
|
||||||
|
use_fallback_gas_filler: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PolkadotOmnichainNode {
|
||||||
|
const BASE_DIRECTORY: &str = "polkadot-omni-node";
|
||||||
|
const LOGS_DIRECTORY: &str = "logs";
|
||||||
|
|
||||||
|
const POLKADOT_OMNICHAIN_NODE_READY_MARKER: &str = "Running JSON-RPC server";
|
||||||
|
const ETH_RPC_READY_MARKER: &str = "Running JSON-RPC server";
|
||||||
|
const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json";
|
||||||
|
const BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT: u16 = 9944;
|
||||||
|
const BASE_ETH_RPC_PORT: u16 = 8545;
|
||||||
|
|
||||||
|
const POLKADOT_OMNICHAIN_NODE_LOG_ENV: &str =
|
||||||
|
"error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
|
||||||
|
const RPC_LOG_ENV: &str = "info,eth-rpc=debug";
|
||||||
|
|
||||||
|
pub fn new(
|
||||||
|
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||||
|
+ AsRef<EthRpcConfiguration>
|
||||||
|
+ AsRef<WalletConfiguration>
|
||||||
|
+ AsRef<PolkadotOmnichainNodeConfiguration>,
|
||||||
|
use_fallback_gas_filler: bool,
|
||||||
|
) -> Self {
|
||||||
|
let polkadot_omnichain_node_configuration =
|
||||||
|
AsRef::<PolkadotOmnichainNodeConfiguration>::as_ref(&context);
|
||||||
|
let working_directory_path =
|
||||||
|
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
|
||||||
|
let eth_rpc_path = AsRef::<EthRpcConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
|
||||||
|
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||||
|
let base_directory = working_directory_path
|
||||||
|
.join(Self::BASE_DIRECTORY)
|
||||||
|
.join(id.to_string());
|
||||||
|
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
id,
|
||||||
|
polkadot_omnichain_node_binary_path: polkadot_omnichain_node_configuration
|
||||||
|
.path
|
||||||
|
.to_path_buf(),
|
||||||
|
eth_rpc_binary_path: eth_rpc_path.to_path_buf(),
|
||||||
|
chain_spec_path: polkadot_omnichain_node_configuration
|
||||||
|
.chain_spec_path
|
||||||
|
.clone(),
|
||||||
|
base_directory_path: base_directory,
|
||||||
|
logs_directory_path: logs_directory,
|
||||||
|
parachain_id: polkadot_omnichain_node_configuration.parachain_id,
|
||||||
|
block_time: polkadot_omnichain_node_configuration.block_time,
|
||||||
|
polkadot_omnichain_node_process: Default::default(),
|
||||||
|
eth_rpc_process: Default::default(),
|
||||||
|
rpc_url: Default::default(),
|
||||||
|
wallet,
|
||||||
|
nonce_manager: Default::default(),
|
||||||
|
provider: Default::default(),
|
||||||
|
use_fallback_gas_filler,
|
||||||
|
node_start_timeout: polkadot_omnichain_node_configuration.start_timeout_ms,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
|
||||||
|
trace!("Removing the various directories");
|
||||||
|
let _ = remove_dir_all(self.base_directory_path.as_path());
|
||||||
|
let _ = clear_directory(&self.base_directory_path);
|
||||||
|
let _ = clear_directory(&self.logs_directory_path);
|
||||||
|
|
||||||
|
trace!("Creating the various directories");
|
||||||
|
create_dir_all(&self.base_directory_path)
|
||||||
|
.context("Failed to create base directory for polkadot-omni-node node")?;
|
||||||
|
create_dir_all(&self.logs_directory_path)
|
||||||
|
.context("Failed to create logs directory for polkadot-omni-node node")?;
|
||||||
|
|
||||||
|
let template_chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
|
||||||
|
|
||||||
|
let chainspec_json = Self::node_genesis(
|
||||||
|
&self.wallet,
|
||||||
|
self.chain_spec_path
|
||||||
|
.as_ref()
|
||||||
|
.context("No runtime path provided")?,
|
||||||
|
)
|
||||||
|
.context("Failed to prepare the chainspec command")?;
|
||||||
|
|
||||||
|
serde_json::to_writer_pretty(
|
||||||
|
std::fs::File::create(&template_chainspec_path)
|
||||||
|
.context("Failed to create polkadot-omni-node template chainspec file")?,
|
||||||
|
&chainspec_json,
|
||||||
|
)
|
||||||
|
.context("Failed to write polkadot-omni-node template chainspec JSON")?;
|
||||||
|
|
||||||
|
Ok(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spawn_process(&mut self) -> anyhow::Result<()> {
|
||||||
|
// Error out if the runtime's path or the parachain id are not set which means that the
|
||||||
|
// arguments we require were not provided.
|
||||||
|
self.chain_spec_path
|
||||||
|
.as_ref()
|
||||||
|
.context("No WASM path provided for the runtime")?;
|
||||||
|
self.parachain_id
|
||||||
|
.as_ref()
|
||||||
|
.context("No argument provided for the parachain-id")?;
|
||||||
|
|
||||||
|
let polkadot_omnichain_node_rpc_port =
|
||||||
|
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
|
||||||
|
let eth_rpc_port = Self::BASE_ETH_RPC_PORT + self.id as u16;
|
||||||
|
|
||||||
|
let chainspec_path = self.base_directory_path.join(Self::CHAIN_SPEC_JSON_FILE);
|
||||||
|
|
||||||
|
self.rpc_url = format!("http://127.0.0.1:{eth_rpc_port}");
|
||||||
|
|
||||||
|
let polkadot_omnichain_node_process = Process::new(
|
||||||
|
"node",
|
||||||
|
self.logs_directory_path.as_path(),
|
||||||
|
self.polkadot_omnichain_node_binary_path.as_path(),
|
||||||
|
|command, stdout_file, stderr_file| {
|
||||||
|
command
|
||||||
|
.arg("--log")
|
||||||
|
.arg(Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
|
||||||
|
.arg("--dev-block-time")
|
||||||
|
.arg(self.block_time.as_millis().to_string())
|
||||||
|
.arg("--rpc-port")
|
||||||
|
.arg(polkadot_omnichain_node_rpc_port.to_string())
|
||||||
|
.arg("--base-path")
|
||||||
|
.arg(self.base_directory_path.as_path())
|
||||||
|
.arg("--no-prometheus")
|
||||||
|
.arg("--no-hardware-benchmarks")
|
||||||
|
.arg("--authoring")
|
||||||
|
.arg("slot-based")
|
||||||
|
.arg("--chain")
|
||||||
|
.arg(chainspec_path)
|
||||||
|
.arg("--name")
|
||||||
|
.arg(format!("polkadot-omni-node-{}", self.id))
|
||||||
|
.arg("--rpc-methods")
|
||||||
|
.arg("unsafe")
|
||||||
|
.arg("--rpc-cors")
|
||||||
|
.arg("all")
|
||||||
|
.arg("--rpc-max-connections")
|
||||||
|
.arg(u32::MAX.to_string())
|
||||||
|
.arg("--pool-limit")
|
||||||
|
.arg(u32::MAX.to_string())
|
||||||
|
.arg("--pool-kbytes")
|
||||||
|
.arg(u32::MAX.to_string())
|
||||||
|
.arg("--state-pruning")
|
||||||
|
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
||||||
|
.env("RUST_LOG", Self::POLKADOT_OMNICHAIN_NODE_LOG_ENV)
|
||||||
|
.stdout(stdout_file)
|
||||||
|
.stderr(stderr_file);
|
||||||
|
},
|
||||||
|
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||||
|
max_wait_duration: self.node_start_timeout,
|
||||||
|
check_function: Box::new(|_, stderr_line| match stderr_line {
|
||||||
|
Some(line) => Ok(line.contains(Self::POLKADOT_OMNICHAIN_NODE_READY_MARKER)),
|
||||||
|
None => Ok(false),
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
match polkadot_omnichain_node_process {
|
||||||
|
Ok(process) => self.polkadot_omnichain_node_process = Some(process),
|
||||||
|
Err(err) => {
|
||||||
|
tracing::error!(
|
||||||
|
?err,
|
||||||
|
"Failed to start polkadot-omni-node, shutting down gracefully"
|
||||||
|
);
|
||||||
|
self.shutdown().context(
|
||||||
|
"Failed to gracefully shutdown after polkadot-omni-node start error",
|
||||||
|
)?;
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let eth_rpc_process = Process::new(
|
||||||
|
"eth-rpc",
|
||||||
|
self.logs_directory_path.as_path(),
|
||||||
|
self.eth_rpc_binary_path.as_path(),
|
||||||
|
|command, stdout_file, stderr_file| {
|
||||||
|
command
|
||||||
|
.arg("--dev")
|
||||||
|
.arg("--rpc-port")
|
||||||
|
.arg(eth_rpc_port.to_string())
|
||||||
|
.arg("--node-rpc-url")
|
||||||
|
.arg(format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}"))
|
||||||
|
.arg("--rpc-max-connections")
|
||||||
|
.arg(u32::MAX.to_string())
|
||||||
|
.arg("--index-last-n-blocks")
|
||||||
|
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
||||||
|
.arg("--cache-size")
|
||||||
|
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
||||||
|
.env("RUST_LOG", Self::RPC_LOG_ENV)
|
||||||
|
.stdout(stdout_file)
|
||||||
|
.stderr(stderr_file);
|
||||||
|
},
|
||||||
|
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||||
|
max_wait_duration: Duration::from_secs(30),
|
||||||
|
check_function: Box::new(|_, stderr_line| match stderr_line {
|
||||||
|
Some(line) => Ok(line.contains(Self::ETH_RPC_READY_MARKER)),
|
||||||
|
None => Ok(false),
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
match eth_rpc_process {
|
||||||
|
Ok(process) => self.eth_rpc_process = Some(process),
|
||||||
|
Err(err) => {
|
||||||
|
tracing::error!(?err, "Failed to start eth-rpc, shutting down gracefully");
|
||||||
|
self.shutdown()
|
||||||
|
.context("Failed to gracefully shutdown after eth-rpc start error")?;
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn eth_to_substrate_address(address: &Address) -> String {
|
||||||
|
let eth_bytes = address.0.0;
|
||||||
|
|
||||||
|
let mut padded = [0xEEu8; 32];
|
||||||
|
padded[..20].copy_from_slice(ð_bytes);
|
||||||
|
|
||||||
|
let account_id = AccountId32::from(padded);
|
||||||
|
account_id.to_ss58check()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
|
||||||
|
let output = Command::new(&self.eth_rpc_binary_path)
|
||||||
|
.arg("--version")
|
||||||
|
.stdin(Stdio::null())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.stderr(Stdio::null())
|
||||||
|
.spawn()?
|
||||||
|
.wait_with_output()?
|
||||||
|
.stdout;
|
||||||
|
Ok(String::from_utf8_lossy(&output).trim().to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
|
||||||
|
self.provider
|
||||||
|
.get_or_try_init(|| async move {
|
||||||
|
construct_concurrency_limited_provider::<Ethereum, _>(
|
||||||
|
self.rpc_url.as_str(),
|
||||||
|
FallbackGasFiller::default()
|
||||||
|
.with_fallback_mechanism(self.use_fallback_gas_filler),
|
||||||
|
ChainIdFiller::default(),
|
||||||
|
NonceFiller::new(self.nonce_manager.clone()),
|
||||||
|
self.wallet.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context("Failed to construct the provider")
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn node_genesis(
|
||||||
|
wallet: &EthereumWallet,
|
||||||
|
chain_spec_path: &Path,
|
||||||
|
) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let unmodified_chainspec_file =
|
||||||
|
File::open(chain_spec_path).context("Failed to open the unmodified chainspec file")?;
|
||||||
|
let mut chainspec_json =
|
||||||
|
serde_json::from_reader::<_, serde_json::Value>(&unmodified_chainspec_file)
|
||||||
|
.context("Failed to read the unmodified chainspec JSON")?;
|
||||||
|
|
||||||
|
let existing_chainspec_balances =
|
||||||
|
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
|
||||||
|
.as_array_mut()
|
||||||
|
.expect("Can't fail");
|
||||||
|
|
||||||
|
for address in NetworkWallet::<Ethereum>::signer_addresses(wallet) {
|
||||||
|
let substrate_address = Self::eth_to_substrate_address(&address);
|
||||||
|
let balance = INITIAL_BALANCE;
|
||||||
|
existing_chainspec_balances.push(json!((substrate_address, balance)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(chainspec_json)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EthereumNode for PolkadotOmnichainNode {
|
||||||
|
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
|
||||||
|
Box::pin(async move { Ok(()) })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn id(&self) -> usize {
|
||||||
|
self.id as _
|
||||||
|
}
|
||||||
|
|
||||||
|
fn connection_string(&self) -> &str {
|
||||||
|
&self.rpc_url
|
||||||
|
}
|
||||||
|
|
||||||
|
fn submit_transaction(
|
||||||
|
&self,
|
||||||
|
transaction: TransactionRequest,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<TxHash>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let provider = self
|
||||||
|
.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create the provider for transaction submission")?;
|
||||||
|
let pending_transaction = provider
|
||||||
|
.send_transaction(transaction)
|
||||||
|
.await
|
||||||
|
.context("Failed to submit the transaction through the provider")?;
|
||||||
|
Ok(*pending_transaction.tx_hash())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_receipt(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create provider for getting the receipt")?
|
||||||
|
.get_transaction_receipt(tx_hash)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the receipt of the transaction")?
|
||||||
|
.context("Failed to get the receipt of the transaction")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execute_transaction(
|
||||||
|
&self,
|
||||||
|
transaction: TransactionRequest,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create provider for transaction submission")?
|
||||||
|
.send_transaction(transaction)
|
||||||
|
.await
|
||||||
|
.context("Encountered an error when submitting a transaction")?
|
||||||
|
.get_receipt()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the receipt for the transaction")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn trace_transaction(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
trace_options: GethDebugTracingOptions,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to create provider for debug tracing")?
|
||||||
|
.debug_trace_transaction(tx_hash, trace_options)
|
||||||
|
.await
|
||||||
|
.context("Failed to obtain debug trace from eth-proxy")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn state_diff(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<DiffMode>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||||
|
diff_mode: Some(true),
|
||||||
|
disable_code: None,
|
||||||
|
disable_storage: None,
|
||||||
|
});
|
||||||
|
match self
|
||||||
|
.trace_transaction(tx_hash, trace_options)
|
||||||
|
.await?
|
||||||
|
.try_into_pre_state_frame()?
|
||||||
|
{
|
||||||
|
PreStateFrame::Diff(diff) => Ok(diff),
|
||||||
|
_ => anyhow::bail!("expected a diff mode trace"),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn balance_of(
|
||||||
|
&self,
|
||||||
|
address: Address,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the eth-rpc provider")?
|
||||||
|
.get_balance(address)
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn latest_state_proof(
|
||||||
|
&self,
|
||||||
|
address: Address,
|
||||||
|
keys: Vec<StorageKey>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<EIP1186AccountProofResponse>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider()
|
||||||
|
.await
|
||||||
|
.context("Failed to get the eth-rpc provider")?
|
||||||
|
.get_proof(address, keys)
|
||||||
|
.latest()
|
||||||
|
.await
|
||||||
|
.map_err(Into::into)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolver(
|
||||||
|
&self,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let id = self.id;
|
||||||
|
let provider = self.provider().await?;
|
||||||
|
Ok(Arc::new(PolkadotOmnichainNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn evm_version(&self) -> EVMVersion {
|
||||||
|
EVMVersion::Cancun
|
||||||
|
}
|
||||||
|
|
||||||
|
fn subscribe_to_full_blocks_information(
|
||||||
|
&self,
|
||||||
|
) -> Pin<
|
||||||
|
Box<
|
||||||
|
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
||||||
|
+ '_,
|
||||||
|
>,
|
||||||
|
> {
|
||||||
|
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
|
||||||
|
pub mod revive {}
|
||||||
|
|
||||||
|
Box::pin(async move {
|
||||||
|
let polkadot_omnichain_node_rpc_port =
|
||||||
|
Self::BASE_POLKADOT_OMNICHAIN_NODE_RPC_PORT + self.id as u16;
|
||||||
|
let polkadot_omnichain_node_rpc_url =
|
||||||
|
format!("ws://127.0.0.1:{polkadot_omnichain_node_rpc_port}");
|
||||||
|
let api = OnlineClient::<SubstrateConfig>::from_url(polkadot_omnichain_node_rpc_url)
|
||||||
|
.await
|
||||||
|
.context("Failed to create subxt rpc client")?;
|
||||||
|
let provider = self.provider().await.context("Failed to create provider")?;
|
||||||
|
|
||||||
|
let block_stream = api
|
||||||
|
.blocks()
|
||||||
|
.subscribe_all()
|
||||||
|
.await
|
||||||
|
.context("Failed to subscribe to blocks")?;
|
||||||
|
|
||||||
|
let mined_block_information_stream = block_stream.filter_map(move |block| {
|
||||||
|
let api = api.clone();
|
||||||
|
let provider = provider.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let substrate_block = block.ok()?;
|
||||||
|
let revive_block = provider
|
||||||
|
.get_block_by_number(
|
||||||
|
BlockNumberOrTag::Number(substrate_block.number() as _),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.expect("TODO: Remove")
|
||||||
|
.expect("TODO: Remove");
|
||||||
|
|
||||||
|
let used = api
|
||||||
|
.storage()
|
||||||
|
.at(substrate_block.reference())
|
||||||
|
.fetch_or_default(&revive::storage().system().block_weight())
|
||||||
|
.await
|
||||||
|
.expect("TODO: Remove");
|
||||||
|
|
||||||
|
let block_ref_time = (used.normal.ref_time as u128)
|
||||||
|
+ (used.operational.ref_time as u128)
|
||||||
|
+ (used.mandatory.ref_time as u128);
|
||||||
|
let block_proof_size = (used.normal.proof_size as u128)
|
||||||
|
+ (used.operational.proof_size as u128)
|
||||||
|
+ (used.mandatory.proof_size as u128);
|
||||||
|
|
||||||
|
let limits = api
|
||||||
|
.constants()
|
||||||
|
.at(&revive::constants().system().block_weights())
|
||||||
|
.expect("TODO: Remove");
|
||||||
|
|
||||||
|
let max_ref_time = limits.max_block.ref_time;
|
||||||
|
let max_proof_size = limits.max_block.proof_size;
|
||||||
|
|
||||||
|
Some(MinedBlockInformation {
|
||||||
|
ethereum_block_information: EthereumMinedBlockInformation {
|
||||||
|
block_number: revive_block.number(),
|
||||||
|
block_timestamp: revive_block.header.timestamp,
|
||||||
|
mined_gas: revive_block.header.gas_used as _,
|
||||||
|
block_gas_limit: revive_block.header.gas_limit as _,
|
||||||
|
transaction_hashes: revive_block
|
||||||
|
.transactions
|
||||||
|
.into_hashes()
|
||||||
|
.as_hashes()
|
||||||
|
.expect("Must be hashes")
|
||||||
|
.to_vec(),
|
||||||
|
},
|
||||||
|
substrate_block_information: Some(SubstrateMinedBlockInformation {
|
||||||
|
ref_time: block_ref_time,
|
||||||
|
max_ref_time,
|
||||||
|
proof_size: block_proof_size,
|
||||||
|
max_proof_size,
|
||||||
|
}),
|
||||||
|
tx_counts: Default::default(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Box::pin(mined_block_information_stream)
|
||||||
|
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn provider(
|
||||||
|
&self,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
|
||||||
|
{
|
||||||
|
Box::pin(
|
||||||
|
self.provider()
|
||||||
|
.map(|provider| provider.map(|provider| provider.erased())),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PolkadotOmnichainNodeResolver {
|
||||||
|
id: u32,
|
||||||
|
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResolverApi for PolkadotOmnichainNodeResolver {
|
||||||
|
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||||
|
fn chain_id(
|
||||||
|
&self,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::primitives::ChainId>> + '_>> {
|
||||||
|
Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) })
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||||
|
fn transaction_gas_price(
|
||||||
|
&self,
|
||||||
|
tx_hash: TxHash,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_transaction_receipt(tx_hash)
|
||||||
|
.await?
|
||||||
|
.context("Failed to get the transaction receipt")
|
||||||
|
.map(|receipt| receipt.effective_gas_price)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||||
|
fn block_gas_limit(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<u128>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the eth-rpc block")?
|
||||||
|
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||||
|
.map(|block| block.header.gas_limit as _)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||||
|
fn block_coinbase(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Address>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the eth-rpc block")?
|
||||||
|
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||||
|
.map(|block| block.header.beneficiary)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||||
|
fn block_difficulty(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<U256>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the eth-rpc block")?
|
||||||
|
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||||
|
.map(|block| U256::from_be_bytes(block.header.mix_hash.0))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||||
|
fn block_base_fee(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<u64>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the eth-rpc block")?
|
||||||
|
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||||
|
.and_then(|block| {
|
||||||
|
block
|
||||||
|
.header
|
||||||
|
.base_fee_per_gas
|
||||||
|
.context("Failed to get the base fee per gas")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||||
|
fn block_hash(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockHash>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the eth-rpc block")?
|
||||||
|
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||||
|
.map(|block| block.header.hash)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||||
|
fn block_timestamp(
|
||||||
|
&self,
|
||||||
|
number: BlockNumberOrTag,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockTimestamp>> + '_>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
self.provider
|
||||||
|
.get_block_by_number(number)
|
||||||
|
.await
|
||||||
|
.context("Failed to get the eth-rpc block")?
|
||||||
|
.context("Failed to get the eth-rpc block, perhaps the chain has no blocks?")
|
||||||
|
.map(|block| block.header.timestamp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "info", skip_all, fields(polkadot_omnichain_node_id = self.id))]
|
||||||
|
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<BlockNumber>> + '_>> {
|
||||||
|
Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Node for PolkadotOmnichainNode {
|
||||||
|
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||||
|
drop(self.polkadot_omnichain_node_process.take());
|
||||||
|
drop(self.eth_rpc_process.take());
|
||||||
|
|
||||||
|
// Remove the node's database so that subsequent runs do not run on the same database. We
|
||||||
|
// ignore the error just in case the directory didn't exist in the first place and therefore
|
||||||
|
// there's nothing to be deleted.
|
||||||
|
let _ = remove_dir_all(self.base_directory_path.join("data"));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> {
|
||||||
|
self.init(genesis)?.spawn_process()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn version(&self) -> anyhow::Result<String> {
|
||||||
|
let output = Command::new(&self.polkadot_omnichain_node_binary_path)
|
||||||
|
.arg("--version")
|
||||||
|
.stdin(Stdio::null())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.stderr(Stdio::null())
|
||||||
|
.spawn()
|
||||||
|
.context("Failed to spawn substrate --version")?
|
||||||
|
.wait_with_output()
|
||||||
|
.context("Failed to wait for substrate --version")?
|
||||||
|
.stdout;
|
||||||
|
Ok(String::from_utf8_lossy(&output).into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for PolkadotOmnichainNode {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.shutdown().expect("Failed to shutdown")
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,68 +0,0 @@
|
|||||||
//! This crate implements concurrent handling of testing node.
|
|
||||||
|
|
||||||
use std::{
|
|
||||||
fs::read_to_string,
|
|
||||||
sync::atomic::{AtomicUsize, Ordering},
|
|
||||||
thread,
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use revive_dt_config::Arguments;
|
|
||||||
|
|
||||||
use crate::Node;
|
|
||||||
|
|
||||||
/// The node pool starts one or more [Node] which then can be accessed
|
|
||||||
/// in a round robbin fasion.
|
|
||||||
pub struct NodePool<T> {
|
|
||||||
next: AtomicUsize,
|
|
||||||
nodes: Vec<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> NodePool<T>
|
|
||||||
where
|
|
||||||
T: Node + Send + 'static,
|
|
||||||
{
|
|
||||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
|
||||||
pub fn new(config: &Arguments) -> anyhow::Result<Self> {
|
|
||||||
let nodes = config.workers;
|
|
||||||
let genesis = read_to_string(&config.genesis_file).context(format!(
|
|
||||||
"can not read genesis file: {}",
|
|
||||||
config.genesis_file.display()
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let mut handles = Vec::with_capacity(nodes);
|
|
||||||
for _ in 0..nodes {
|
|
||||||
let config = config.clone();
|
|
||||||
let genesis = genesis.clone();
|
|
||||||
handles.push(thread::spawn(move || spawn_node::<T>(&config, genesis)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut nodes = Vec::with_capacity(nodes);
|
|
||||||
for handle in handles {
|
|
||||||
nodes.push(
|
|
||||||
handle
|
|
||||||
.join()
|
|
||||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))?
|
|
||||||
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
nodes,
|
|
||||||
next: Default::default(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a handle to the next node.
|
|
||||||
pub fn round_robbin(&self) -> &T {
|
|
||||||
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
|
||||||
self.nodes.get(current).unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn spawn_node<T: Node + Send>(args: &Arguments, genesis: String) -> anyhow::Result<T> {
|
|
||||||
let mut node = T::new(args);
|
|
||||||
tracing::info!("starting node: {}", node.connection_string());
|
|
||||||
node.spawn(genesis)?;
|
|
||||||
Ok(node)
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,69 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use alloy::transports::BoxFuture;
|
||||||
|
use tokio::sync::Semaphore;
|
||||||
|
use tower::{Layer, Service};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct ConcurrencyLimiterLayer {
|
||||||
|
semaphore: Arc<Semaphore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConcurrencyLimiterLayer {
|
||||||
|
pub fn new(permit_count: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
semaphore: Arc::new(Semaphore::new(permit_count)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> Layer<S> for ConcurrencyLimiterLayer {
|
||||||
|
type Service = ConcurrencyLimiterService<S>;
|
||||||
|
|
||||||
|
fn layer(&self, inner: S) -> Self::Service {
|
||||||
|
ConcurrencyLimiterService {
|
||||||
|
service: inner,
|
||||||
|
semaphore: self.semaphore.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ConcurrencyLimiterService<S> {
|
||||||
|
service: S,
|
||||||
|
semaphore: Arc<Semaphore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, Request> Service<Request> for ConcurrencyLimiterService<S>
|
||||||
|
where
|
||||||
|
S: Service<Request> + Send,
|
||||||
|
S::Future: Send + 'static,
|
||||||
|
{
|
||||||
|
type Response = S::Response;
|
||||||
|
type Error = S::Error;
|
||||||
|
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||||
|
|
||||||
|
fn poll_ready(
|
||||||
|
&mut self,
|
||||||
|
cx: &mut std::task::Context<'_>,
|
||||||
|
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||||
|
self.service.poll_ready(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, req: Request) -> Self::Future {
|
||||||
|
let semaphore = self.semaphore.clone();
|
||||||
|
let future = self.service.call(req);
|
||||||
|
|
||||||
|
Box::pin(async move {
|
||||||
|
let _permit = semaphore
|
||||||
|
.acquire()
|
||||||
|
.await
|
||||||
|
.expect("Semaphore has been closed");
|
||||||
|
tracing::debug!(
|
||||||
|
available_permits = semaphore.available_permits(),
|
||||||
|
"Acquired Semaphore Permit"
|
||||||
|
);
|
||||||
|
future.await
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,169 @@
|
|||||||
|
use alloy::{
|
||||||
|
eips::BlockNumberOrTag,
|
||||||
|
network::{Network, TransactionBuilder},
|
||||||
|
providers::{
|
||||||
|
Provider, SendableTx,
|
||||||
|
ext::DebugApi,
|
||||||
|
fillers::{GasFillable, GasFiller, TxFiller},
|
||||||
|
},
|
||||||
|
rpc::types::trace::geth::{
|
||||||
|
GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions,
|
||||||
|
GethDebugTracingOptions,
|
||||||
|
},
|
||||||
|
transports::{RpcError, TransportResult},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// An implementation of [`GasFiller`] with a fallback mechanism for reverting transactions.
|
||||||
|
///
|
||||||
|
/// This struct provides a fallback mechanism for alloy's [`GasFiller`] which kicks in when a
|
||||||
|
/// transaction's dry run fails due to it reverting allowing us to get gas estimates even for
|
||||||
|
/// failing transactions. In this codebase, this is very important since the MatterLabs tests
|
||||||
|
/// expect some transactions in the test suite revert. Since we're expected to run a number of
|
||||||
|
/// assertions on these reverting transactions we must commit them to the ledger.
|
||||||
|
///
|
||||||
|
/// Therefore, this struct does the following:
|
||||||
|
///
|
||||||
|
/// 1. It first attempts to estimate the gas through the mechanism implemented in the [`GasFiller`].
|
||||||
|
/// 2. If it fails, then we perform a debug trace of the transaction to find out how much gas the
|
||||||
|
/// transaction needs until it reverts.
|
||||||
|
/// 3. We fill in these values (either the success or failure case) into the transaction.
|
||||||
|
///
|
||||||
|
/// The fallback mechanism of this filler can be completely disabled if we don't want it to be used.
|
||||||
|
/// In that case, this gas filler will act in an identical way to alloy's [`GasFiller`].
|
||||||
|
///
|
||||||
|
/// We then fill in these values into the transaction.
|
||||||
|
///
|
||||||
|
/// The previous implementation of this fallback gas filler relied on making use of default values
|
||||||
|
/// for the gas limit in order to be able to submit the reverting transactions to the network. But,
|
||||||
|
/// it introduced a number of issues that we weren't anticipating at the time when it was built.
|
||||||
|
#[derive(Clone, Copy, Debug)]
|
||||||
|
pub struct FallbackGasFiller {
|
||||||
|
/// The inner [`GasFiller`] which we pass all of the calls to in the happy path.
|
||||||
|
inner: GasFiller,
|
||||||
|
|
||||||
|
/// A [`bool`] that controls if the fallback mechanism is enabled or not.
|
||||||
|
enable_fallback_mechanism: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FallbackGasFiller {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Default::default(),
|
||||||
|
enable_fallback_mechanism: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_fallback_mechanism(mut self, enable: bool) -> Self {
|
||||||
|
self.enable_fallback_mechanism = enable;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_fallback_mechanism_enabled(self) -> Self {
|
||||||
|
self.with_fallback_mechanism(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_fallback_mechanism_disabled(self) -> Self {
|
||||||
|
self.with_fallback_mechanism(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N> TxFiller<N> for FallbackGasFiller
|
||||||
|
where
|
||||||
|
N: Network,
|
||||||
|
{
|
||||||
|
type Fillable = <GasFiller as TxFiller<N>>::Fillable;
|
||||||
|
|
||||||
|
fn status(
|
||||||
|
&self,
|
||||||
|
tx: &<N as Network>::TransactionRequest,
|
||||||
|
) -> alloy::providers::fillers::FillerControlFlow {
|
||||||
|
TxFiller::<N>::status(&self.inner, tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_sync(&self, _: &mut SendableTx<N>) {}
|
||||||
|
|
||||||
|
async fn prepare<P: Provider<N>>(
|
||||||
|
&self,
|
||||||
|
provider: &P,
|
||||||
|
tx: &<N as Network>::TransactionRequest,
|
||||||
|
) -> TransportResult<Self::Fillable> {
|
||||||
|
match (
|
||||||
|
self.inner.prepare(provider, tx).await,
|
||||||
|
self.enable_fallback_mechanism,
|
||||||
|
) {
|
||||||
|
// Return the same thing if either this calls succeeds, or if the call falls and the
|
||||||
|
// fallback mechanism is disabled.
|
||||||
|
(rtn @ Ok(..), ..) | (rtn @ Err(..), false) => rtn,
|
||||||
|
(Err(..), true) => {
|
||||||
|
// Perform a trace of the transaction.
|
||||||
|
let trace = provider
|
||||||
|
.debug_trace_call(
|
||||||
|
tx.clone(),
|
||||||
|
BlockNumberOrTag::Latest.into(),
|
||||||
|
GethDebugTracingCallOptions {
|
||||||
|
tracing_options: GethDebugTracingOptions {
|
||||||
|
tracer: Some(GethDebugTracerType::BuiltInTracer(
|
||||||
|
GethDebugBuiltInTracerType::CallTracer,
|
||||||
|
)),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
state_overrides: Default::default(),
|
||||||
|
block_overrides: Default::default(),
|
||||||
|
tx_index: Default::default(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.try_into_call_frame()
|
||||||
|
.map_err(|err| {
|
||||||
|
RpcError::local_usage_str(
|
||||||
|
format!("Expected a callframe trace, but got: {err:?}").as_str(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let gas_used = u64::try_from(trace.gas_used).map_err(|_| {
|
||||||
|
RpcError::local_usage_str(
|
||||||
|
"Transaction trace returned a value of gas used that exceeds u64",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let gas_limit = gas_used.saturating_mul(2);
|
||||||
|
|
||||||
|
if let Some(gas_price) = tx.gas_price() {
|
||||||
|
return Ok(GasFillable::Legacy {
|
||||||
|
gas_limit,
|
||||||
|
gas_price,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let estimate = if let (Some(max_fee_per_gas), Some(max_priority_fee_per_gas)) =
|
||||||
|
(tx.max_fee_per_gas(), tx.max_priority_fee_per_gas())
|
||||||
|
{
|
||||||
|
alloy::eips::eip1559::Eip1559Estimation {
|
||||||
|
max_fee_per_gas,
|
||||||
|
max_priority_fee_per_gas,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
provider.estimate_eip1559_fees().await?
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(GasFillable::Eip1559 {
|
||||||
|
gas_limit,
|
||||||
|
estimate,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fill(
|
||||||
|
&self,
|
||||||
|
fillable: Self::Fillable,
|
||||||
|
tx: SendableTx<N>,
|
||||||
|
) -> TransportResult<SendableTx<N>> {
|
||||||
|
self.inner.fill(fillable, tx).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for FallbackGasFiller {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
mod concurrency_limiter;
|
||||||
|
mod fallback_gas_filler;
|
||||||
|
mod provider;
|
||||||
|
mod receipt_retry_layer;
|
||||||
|
|
||||||
|
pub use concurrency_limiter::*;
|
||||||
|
pub use fallback_gas_filler::*;
|
||||||
|
pub use provider::*;
|
||||||
|
pub use receipt_retry_layer::*;
|
||||||
@@ -0,0 +1,64 @@
|
|||||||
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
network::{Network, NetworkWallet, TransactionBuilder4844},
|
||||||
|
providers::{
|
||||||
|
Identity, ProviderBuilder, RootProvider,
|
||||||
|
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
||||||
|
},
|
||||||
|
rpc::client::ClientBuilder,
|
||||||
|
};
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
|
||||||
|
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller, RetryLayer};
|
||||||
|
|
||||||
|
pub type ConcreteProvider<N, W> = FillProvider<
|
||||||
|
JoinFill<
|
||||||
|
JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>,
|
||||||
|
WalletFiller<W>,
|
||||||
|
>,
|
||||||
|
RootProvider<N>,
|
||||||
|
N,
|
||||||
|
>;
|
||||||
|
|
||||||
|
pub async fn construct_concurrency_limited_provider<N, W>(
|
||||||
|
rpc_url: &str,
|
||||||
|
fallback_gas_filler: FallbackGasFiller,
|
||||||
|
chain_id_filler: ChainIdFiller,
|
||||||
|
nonce_filler: NonceFiller,
|
||||||
|
wallet: W,
|
||||||
|
) -> Result<ConcreteProvider<N, W>>
|
||||||
|
where
|
||||||
|
N: Network<TransactionRequest: TransactionBuilder4844>,
|
||||||
|
W: NetworkWallet<N>,
|
||||||
|
Identity: TxFiller<N>,
|
||||||
|
FallbackGasFiller: TxFiller<N>,
|
||||||
|
ChainIdFiller: TxFiller<N>,
|
||||||
|
NonceFiller: TxFiller<N>,
|
||||||
|
WalletFiller<W>: TxFiller<N>,
|
||||||
|
{
|
||||||
|
// This is a global limit on the RPC concurrency that applies to all of the providers created
|
||||||
|
// by the framework. With this limit, it means that we can have a maximum of N concurrent
|
||||||
|
// requests at any point of time and no more than that. This is done in an effort to stabilize
|
||||||
|
// the framework from some of the interment issues that we've been seeing related to RPC calls.
|
||||||
|
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
|
||||||
|
LazyLock::new(|| ConcurrencyLimiterLayer::new(500));
|
||||||
|
|
||||||
|
let client = ClientBuilder::default()
|
||||||
|
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
||||||
|
.layer(RetryLayer::default())
|
||||||
|
.connect(rpc_url)
|
||||||
|
.await
|
||||||
|
.context("Failed to construct the RPC client")?;
|
||||||
|
|
||||||
|
let provider = ProviderBuilder::new()
|
||||||
|
.disable_recommended_fillers()
|
||||||
|
.network::<N>()
|
||||||
|
.filler(fallback_gas_filler)
|
||||||
|
.filler(chain_id_filler)
|
||||||
|
.filler(nonce_filler)
|
||||||
|
.wallet(wallet)
|
||||||
|
.connect_client(client);
|
||||||
|
|
||||||
|
Ok(provider)
|
||||||
|
}
|
||||||
@@ -0,0 +1,158 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
network::{AnyNetwork, Network},
|
||||||
|
rpc::json_rpc::{RequestPacket, ResponsePacket},
|
||||||
|
transports::{TransportError, TransportErrorKind, TransportFut},
|
||||||
|
};
|
||||||
|
use tokio::time::{interval, timeout};
|
||||||
|
use tower::{Layer, Service};
|
||||||
|
|
||||||
|
/// A layer that allows for automatic retries for getting the receipt.
|
||||||
|
///
|
||||||
|
/// There are certain cases where getting the receipt of a committed transaction might fail. In Geth
|
||||||
|
/// this can happen if the transaction has been committed to the ledger but has not been indexed, in
|
||||||
|
/// the substrate and revive stack it can also happen for other reasons.
|
||||||
|
///
|
||||||
|
/// Therefore, just because the first attempt to get the receipt (after transaction confirmation)
|
||||||
|
/// has failed it doesn't mean that it will continue to fail. This layer can be added to any alloy
|
||||||
|
/// provider to allow the provider to retry getting the receipt for some period of time before it
|
||||||
|
/// considers that a timeout. It attempts to poll for the receipt for the `polling_duration` with an
|
||||||
|
/// interval of `polling_interval` between each poll. If by the end of the `polling_duration` it was
|
||||||
|
/// not able to get the receipt successfully then this is considered to be a timeout.
|
||||||
|
///
|
||||||
|
/// Additionally, this layer allows for retries for other rpc methods such as all tracing methods.
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub struct RetryLayer {
|
||||||
|
/// The amount of time to keep polling for the receipt before considering it a timeout.
|
||||||
|
polling_duration: Duration,
|
||||||
|
|
||||||
|
/// The interval of time to wait between each poll for the receipt.
|
||||||
|
polling_interval: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RetryLayer {
|
||||||
|
pub fn new(polling_duration: Duration, polling_interval: Duration) -> Self {
|
||||||
|
Self {
|
||||||
|
polling_duration,
|
||||||
|
polling_interval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_polling_duration(mut self, polling_duration: Duration) -> Self {
|
||||||
|
self.polling_duration = polling_duration;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_polling_interval(mut self, polling_interval: Duration) -> Self {
|
||||||
|
self.polling_interval = polling_interval;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for RetryLayer {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
polling_duration: Duration::from_secs(90),
|
||||||
|
polling_interval: Duration::from_millis(500),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> Layer<S> for RetryLayer {
|
||||||
|
type Service = RetryService<S>;
|
||||||
|
|
||||||
|
fn layer(&self, inner: S) -> Self::Service {
|
||||||
|
RetryService {
|
||||||
|
service: inner,
|
||||||
|
polling_duration: self.polling_duration,
|
||||||
|
polling_interval: self.polling_interval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub struct RetryService<S> {
|
||||||
|
/// The internal service.
|
||||||
|
service: S,
|
||||||
|
|
||||||
|
/// The amount of time to keep polling for the receipt before considering it a timeout.
|
||||||
|
polling_duration: Duration,
|
||||||
|
|
||||||
|
/// The interval of time to wait between each poll for the receipt.
|
||||||
|
polling_interval: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> Service<RequestPacket> for RetryService<S>
|
||||||
|
where
|
||||||
|
S: Service<RequestPacket, Future = TransportFut<'static>, Error = TransportError>
|
||||||
|
+ Send
|
||||||
|
+ 'static
|
||||||
|
+ Clone,
|
||||||
|
{
|
||||||
|
type Response = ResponsePacket;
|
||||||
|
type Error = TransportError;
|
||||||
|
type Future = TransportFut<'static>;
|
||||||
|
|
||||||
|
fn poll_ready(
|
||||||
|
&mut self,
|
||||||
|
cx: &mut std::task::Context<'_>,
|
||||||
|
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||||
|
self.service.poll_ready(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::nonminimal_bool)]
|
||||||
|
fn call(&mut self, req: RequestPacket) -> Self::Future {
|
||||||
|
type ReceiptOutput = <AnyNetwork as Network>::ReceiptResponse;
|
||||||
|
|
||||||
|
let mut service = self.service.clone();
|
||||||
|
let polling_interval = self.polling_interval;
|
||||||
|
let polling_duration = self.polling_duration;
|
||||||
|
|
||||||
|
Box::pin(async move {
|
||||||
|
let request = req.as_single().ok_or_else(|| {
|
||||||
|
TransportErrorKind::custom_str("Retry layer doesn't support batch requests")
|
||||||
|
})?;
|
||||||
|
let method = request.method();
|
||||||
|
let requires_retries = method == "eth_getTransactionReceipt"
|
||||||
|
|| (method.contains("debug") && method.contains("trace"));
|
||||||
|
|
||||||
|
if !requires_retries {
|
||||||
|
return service.call(req).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout(polling_duration, async {
|
||||||
|
let mut interval = interval(polling_interval);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
interval.tick().await;
|
||||||
|
|
||||||
|
let Ok(resp) = service.call(req.clone()).await else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let response = resp.as_single().expect("Can't fail");
|
||||||
|
if response.is_error() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if method == "eth_getTransactionReceipt"
|
||||||
|
&& response
|
||||||
|
.payload()
|
||||||
|
.clone()
|
||||||
|
.deserialize_success::<ReceiptOutput>()
|
||||||
|
.ok()
|
||||||
|
.and_then(|resp| resp.try_into_success().ok())
|
||||||
|
.is_some()
|
||||||
|
|| method != "eth_getTransactionReceipt"
|
||||||
|
{
|
||||||
|
return resp;
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|_| TransportErrorKind::custom_str("Timeout when retrying request"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
[package]
|
||||||
|
name = "revive-dt-report-processor"
|
||||||
|
description = "revive differential testing report processor utility"
|
||||||
|
version.workspace = true
|
||||||
|
authors.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
rust-version.workspace = true
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "report-processor"
|
||||||
|
path = "src/main.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
revive-dt-report = { workspace = true }
|
||||||
|
revive-dt-common = { workspace = true }
|
||||||
|
|
||||||
|
anyhow = { workspace = true }
|
||||||
|
clap = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
@@ -0,0 +1,329 @@
|
|||||||
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::{BTreeMap, BTreeSet},
|
||||||
|
fmt::Display,
|
||||||
|
fs::{File, OpenOptions},
|
||||||
|
ops::{Deref, DerefMut},
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
str::FromStr,
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::{Context as _, Error, Result, bail};
|
||||||
|
use clap::Parser;
|
||||||
|
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
||||||
|
|
||||||
|
use revive_dt_common::types::{Mode, ParsedTestSpecifier};
|
||||||
|
use revive_dt_report::{Report, TestCaseStatus};
|
||||||
|
|
||||||
|
fn main() -> Result<()> {
|
||||||
|
let cli = Cli::try_parse().context("Failed to parse the CLI arguments")?;
|
||||||
|
|
||||||
|
match cli {
|
||||||
|
Cli::GenerateExpectationsFile {
|
||||||
|
report_path,
|
||||||
|
output_path: output_file,
|
||||||
|
remove_prefix,
|
||||||
|
} => {
|
||||||
|
let remove_prefix = remove_prefix
|
||||||
|
.into_iter()
|
||||||
|
.map(|path| path.canonicalize().context("Failed to canonicalize path"))
|
||||||
|
.collect::<Result<Vec<_>>>()?;
|
||||||
|
|
||||||
|
let expectations = report_path
|
||||||
|
.execution_information
|
||||||
|
.iter()
|
||||||
|
.flat_map(|(metadata_file_path, metadata_file_report)| {
|
||||||
|
metadata_file_report
|
||||||
|
.case_reports
|
||||||
|
.iter()
|
||||||
|
.map(move |(case_idx, case_report)| {
|
||||||
|
(metadata_file_path, case_idx, case_report)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.flat_map(|(metadata_file_path, case_idx, case_report)| {
|
||||||
|
case_report.mode_execution_reports.iter().map(
|
||||||
|
move |(mode, execution_report)| {
|
||||||
|
(
|
||||||
|
metadata_file_path,
|
||||||
|
case_idx,
|
||||||
|
mode,
|
||||||
|
execution_report.status.as_ref(),
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.filter_map(|(metadata_file_path, case_idx, mode, status)| {
|
||||||
|
status.map(|status| (metadata_file_path, case_idx, mode, status))
|
||||||
|
})
|
||||||
|
.map(|(metadata_file_path, case_idx, mode, status)| {
|
||||||
|
(
|
||||||
|
TestSpecifier {
|
||||||
|
metadata_file_path: Cow::Borrowed(
|
||||||
|
remove_prefix
|
||||||
|
.iter()
|
||||||
|
.filter_map(|prefix| {
|
||||||
|
metadata_file_path.as_inner().strip_prefix(prefix).ok()
|
||||||
|
})
|
||||||
|
.next()
|
||||||
|
.unwrap_or(metadata_file_path.as_inner()),
|
||||||
|
),
|
||||||
|
case_idx: case_idx.into_inner(),
|
||||||
|
mode: Cow::Borrowed(mode),
|
||||||
|
},
|
||||||
|
Status::from(status),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.filter(|(_, status)| *status == Status::Failed)
|
||||||
|
.collect::<Expectations>();
|
||||||
|
|
||||||
|
let output_file = OpenOptions::new()
|
||||||
|
.truncate(true)
|
||||||
|
.create(true)
|
||||||
|
.write(true)
|
||||||
|
.open(output_file)
|
||||||
|
.context("Failed to create the output file")?;
|
||||||
|
serde_json::to_writer_pretty(output_file, &expectations)
|
||||||
|
.context("Failed to write the expectations to file")?;
|
||||||
|
}
|
||||||
|
Cli::CompareExpectationFiles {
|
||||||
|
base_expectation_path,
|
||||||
|
other_expectation_path,
|
||||||
|
} => {
|
||||||
|
let keys = base_expectation_path
|
||||||
|
.keys()
|
||||||
|
.chain(other_expectation_path.keys())
|
||||||
|
.collect::<BTreeSet<_>>();
|
||||||
|
|
||||||
|
for key in keys {
|
||||||
|
let base_status = base_expectation_path.get(key).context(format!(
|
||||||
|
"Entry not found in the base expectations: \"{}\"",
|
||||||
|
key
|
||||||
|
))?;
|
||||||
|
let other_status = other_expectation_path.get(key).context(format!(
|
||||||
|
"Entry not found in the other expectations: \"{}\"",
|
||||||
|
key
|
||||||
|
))?;
|
||||||
|
|
||||||
|
if base_status != other_status {
|
||||||
|
bail!(
|
||||||
|
"Expectations for entry \"{}\" have changed. They were {:?} and now they are {:?}",
|
||||||
|
key,
|
||||||
|
base_status,
|
||||||
|
other_status
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
type Expectations<'a> = BTreeMap<TestSpecifier<'a>, Status>;
|
||||||
|
|
||||||
|
/// A tool that's used to process the reports generated by the retester binary in various ways.
|
||||||
|
#[derive(Clone, Debug, Parser)]
|
||||||
|
#[command(name = "retester", term_width = 100)]
|
||||||
|
pub enum Cli {
|
||||||
|
/// Generates an expectation file out of a given report.
|
||||||
|
GenerateExpectationsFile {
|
||||||
|
/// The path of the report's JSON file to generate the expectation's file for.
|
||||||
|
#[clap(long)]
|
||||||
|
report_path: JsonFile<Report>,
|
||||||
|
|
||||||
|
/// The path of the output file to generate.
|
||||||
|
///
|
||||||
|
/// Note that we expect that:
|
||||||
|
/// 1. The provided path points to a JSON file.
|
||||||
|
/// 1. The ancestor's of the provided path already exist such that no directory creations
|
||||||
|
/// are required.
|
||||||
|
#[clap(long)]
|
||||||
|
output_path: PathBuf,
|
||||||
|
|
||||||
|
/// Prefix paths to remove from the paths in the final expectations file.
|
||||||
|
#[clap(long)]
|
||||||
|
remove_prefix: Vec<PathBuf>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Compares two expectation files to ensure that they match each other.
|
||||||
|
CompareExpectationFiles {
|
||||||
|
/// The path of the base expectation file.
|
||||||
|
#[clap(long)]
|
||||||
|
base_expectation_path: JsonFile<Expectations<'static>>,
|
||||||
|
|
||||||
|
/// The path of the other expectation file.
|
||||||
|
#[clap(long)]
|
||||||
|
other_expectation_path: JsonFile<Expectations<'static>>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
|
pub enum Status {
|
||||||
|
Succeeded,
|
||||||
|
Failed,
|
||||||
|
Ignored,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<TestCaseStatus> for Status {
|
||||||
|
fn from(value: TestCaseStatus) -> Self {
|
||||||
|
match value {
|
||||||
|
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
|
||||||
|
TestCaseStatus::Failed { .. } => Self::Failed,
|
||||||
|
TestCaseStatus::Ignored { .. } => Self::Ignored,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a TestCaseStatus> for Status {
|
||||||
|
fn from(value: &'a TestCaseStatus) -> Self {
|
||||||
|
match value {
|
||||||
|
TestCaseStatus::Succeeded { .. } => Self::Succeeded,
|
||||||
|
TestCaseStatus::Failed { .. } => Self::Failed,
|
||||||
|
TestCaseStatus::Ignored { .. } => Self::Ignored,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub struct JsonFile<T> {
|
||||||
|
path: PathBuf,
|
||||||
|
content: Box<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Deref for JsonFile<T> {
|
||||||
|
type Target = T;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.content
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> DerefMut for JsonFile<T> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
&mut self.content
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> FromStr for JsonFile<T>
|
||||||
|
where
|
||||||
|
T: DeserializeOwned,
|
||||||
|
{
|
||||||
|
type Err = Error;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
let path = PathBuf::from(s);
|
||||||
|
let file = File::open(&path).context("Failed to open the file")?;
|
||||||
|
serde_json::from_reader(&file)
|
||||||
|
.map(|content| Self { path, content })
|
||||||
|
.context(format!(
|
||||||
|
"Failed to deserialize file's content as {}",
|
||||||
|
std::any::type_name::<T>()
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Display for JsonFile<T> {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
Display::fmt(&self.path.display(), f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> From<JsonFile<T>> for String {
|
||||||
|
fn from(value: JsonFile<T>) -> Self {
|
||||||
|
value.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub struct TestSpecifier<'a> {
|
||||||
|
pub metadata_file_path: Cow<'a, Path>,
|
||||||
|
pub case_idx: usize,
|
||||||
|
pub mode: Cow<'a, Mode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Display for TestSpecifier<'a> {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}::{}::{}",
|
||||||
|
self.metadata_file_path.display(),
|
||||||
|
self.case_idx,
|
||||||
|
self.mode
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<TestSpecifier<'a>> for ParsedTestSpecifier {
|
||||||
|
fn from(
|
||||||
|
TestSpecifier {
|
||||||
|
metadata_file_path,
|
||||||
|
case_idx,
|
||||||
|
mode,
|
||||||
|
}: TestSpecifier,
|
||||||
|
) -> Self {
|
||||||
|
Self::CaseWithMode {
|
||||||
|
metadata_file_path: metadata_file_path.to_path_buf(),
|
||||||
|
case_idx,
|
||||||
|
mode: mode.into_owned(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<ParsedTestSpecifier> for TestSpecifier<'static> {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(value: ParsedTestSpecifier) -> Result<Self> {
|
||||||
|
let ParsedTestSpecifier::CaseWithMode {
|
||||||
|
metadata_file_path,
|
||||||
|
case_idx,
|
||||||
|
mode,
|
||||||
|
} = value
|
||||||
|
else {
|
||||||
|
bail!("Expected a full test case specifier")
|
||||||
|
};
|
||||||
|
Ok(Self {
|
||||||
|
metadata_file_path: Cow::Owned(metadata_file_path),
|
||||||
|
case_idx,
|
||||||
|
mode: Cow::Owned(mode),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Serialize for TestSpecifier<'a> {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
self.to_string().serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'d, 'a> Deserialize<'d> for TestSpecifier<'a> {
|
||||||
|
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'d>,
|
||||||
|
{
|
||||||
|
let string = String::deserialize(deserializer)?;
|
||||||
|
let mut splitted = string.split("::");
|
||||||
|
let (Some(metadata_file_path), Some(case_idx), Some(mode), None) = (
|
||||||
|
splitted.next(),
|
||||||
|
splitted.next(),
|
||||||
|
splitted.next(),
|
||||||
|
splitted.next(),
|
||||||
|
) else {
|
||||||
|
return Err(serde::de::Error::custom(
|
||||||
|
"Test specifier doesn't contain the components required",
|
||||||
|
));
|
||||||
|
};
|
||||||
|
let metadata_file_path = PathBuf::from(metadata_file_path);
|
||||||
|
let case_idx = usize::from_str(case_idx)
|
||||||
|
.map_err(|_| serde::de::Error::custom("Case idx is not a usize"))?;
|
||||||
|
let mode = Mode::from_str(mode).map_err(|_| serde::de::Error::custom("Invalid mode"))?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
metadata_file_path: Cow::Owned(metadata_file_path),
|
||||||
|
case_idx,
|
||||||
|
mode: Cow::Owned(mode),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -8,11 +8,22 @@ repository.workspace = true
|
|||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
revive-dt-common = { workspace = true }
|
||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
|
revive-dt-compiler = { workspace = true }
|
||||||
|
|
||||||
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
tracing = { workspace = true }
|
paste = { workspace = true }
|
||||||
|
indexmap = { workspace = true, features = ["serde"] }
|
||||||
|
itertools = { workspace = true }
|
||||||
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
revive-solc-json-interface = { workspace = true }
|
serde_with = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
@@ -0,0 +1,965 @@
|
|||||||
|
//! Implementation of the report aggregator task which consumes the events sent by the various
|
||||||
|
//! reporters and combines them into a single unified report.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||||
|
fs::OpenOptions,
|
||||||
|
ops::{Add, Div},
|
||||||
|
path::PathBuf,
|
||||||
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, TxHash};
|
||||||
|
use anyhow::{Context as _, Result};
|
||||||
|
use indexmap::IndexMap;
|
||||||
|
use itertools::Itertools;
|
||||||
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
|
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
||||||
|
use revive_dt_config::Context;
|
||||||
|
use revive_dt_format::{case::CaseIdx, metadata::ContractInstance, steps::StepPath};
|
||||||
|
use semver::Version;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_with::{DisplayFromStr, serde_as};
|
||||||
|
use tokio::sync::{
|
||||||
|
broadcast::{Sender, channel},
|
||||||
|
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||||
|
};
|
||||||
|
use tracing::debug;
|
||||||
|
|
||||||
|
use crate::*;
|
||||||
|
|
||||||
|
pub struct ReportAggregator {
|
||||||
|
/* Internal Report State */
|
||||||
|
report: Report,
|
||||||
|
remaining_cases: HashMap<MetadataFilePath, HashMap<Mode, HashSet<CaseIdx>>>,
|
||||||
|
/* Channels */
|
||||||
|
runner_tx: Option<UnboundedSender<RunnerEvent>>,
|
||||||
|
runner_rx: UnboundedReceiver<RunnerEvent>,
|
||||||
|
listener_tx: Sender<ReporterEvent>,
|
||||||
|
/* Context */
|
||||||
|
file_name: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReportAggregator {
|
||||||
|
pub fn new(context: Context) -> Self {
|
||||||
|
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||||
|
let (listener_tx, _) = channel::<ReporterEvent>(0xFFFF);
|
||||||
|
Self {
|
||||||
|
file_name: match context {
|
||||||
|
Context::Test(ref context) => context.report_configuration.file_name.clone(),
|
||||||
|
Context::Benchmark(ref context) => context.report_configuration.file_name.clone(),
|
||||||
|
Context::ExportJsonSchema | Context::ExportGenesis(..) => None,
|
||||||
|
},
|
||||||
|
report: Report::new(context),
|
||||||
|
remaining_cases: Default::default(),
|
||||||
|
runner_tx: Some(runner_tx),
|
||||||
|
runner_rx,
|
||||||
|
listener_tx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_task(mut self) -> (Reporter, impl Future<Output = Result<Report>>) {
|
||||||
|
let reporter = self
|
||||||
|
.runner_tx
|
||||||
|
.take()
|
||||||
|
.map(Into::into)
|
||||||
|
.expect("Can't fail since this can only be called once");
|
||||||
|
(reporter, async move { self.aggregate().await })
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn aggregate(mut self) -> Result<Report> {
|
||||||
|
debug!("Starting to aggregate report");
|
||||||
|
|
||||||
|
while let Some(event) = self.runner_rx.recv().await {
|
||||||
|
debug!(event = event.variant_name(), "Received Event");
|
||||||
|
match event {
|
||||||
|
RunnerEvent::SubscribeToEvents(event) => {
|
||||||
|
self.handle_subscribe_to_events_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::MetadataFileDiscovery(event) => {
|
||||||
|
self.handle_metadata_file_discovery_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::TestCaseDiscovery(event) => {
|
||||||
|
self.handle_test_case_discovery(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::TestSucceeded(event) => {
|
||||||
|
self.handle_test_succeeded_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::TestFailed(event) => {
|
||||||
|
self.handle_test_failed_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::TestIgnored(event) => {
|
||||||
|
self.handle_test_ignored_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::NodeAssigned(event) => {
|
||||||
|
self.handle_node_assigned_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::PreLinkContractsCompilationSucceeded(event) => {
|
||||||
|
self.handle_pre_link_contracts_compilation_succeeded_event(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::PostLinkContractsCompilationSucceeded(event) => {
|
||||||
|
self.handle_post_link_contracts_compilation_succeeded_event(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::PreLinkContractsCompilationFailed(event) => {
|
||||||
|
self.handle_pre_link_contracts_compilation_failed_event(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::PostLinkContractsCompilationFailed(event) => {
|
||||||
|
self.handle_post_link_contracts_compilation_failed_event(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::LibrariesDeployed(event) => {
|
||||||
|
self.handle_libraries_deployed_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::ContractDeployed(event) => {
|
||||||
|
self.handle_contract_deployed_event(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::Completion(_) => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/* Benchmarks Events */
|
||||||
|
RunnerEvent::StepTransactionInformation(event) => {
|
||||||
|
self.handle_step_transaction_information(*event)
|
||||||
|
}
|
||||||
|
RunnerEvent::ContractInformation(event) => {
|
||||||
|
self.handle_contract_information(*event);
|
||||||
|
}
|
||||||
|
RunnerEvent::BlockMined(event) => self.handle_block_mined(*event),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.handle_completion(CompletionEvent {});
|
||||||
|
debug!("Report aggregation completed");
|
||||||
|
|
||||||
|
let default_file_name = {
|
||||||
|
let current_timestamp = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.context("System clock is before UNIX_EPOCH; cannot compute report timestamp")?
|
||||||
|
.as_secs();
|
||||||
|
let mut file_name = current_timestamp.to_string();
|
||||||
|
file_name.push_str(".json");
|
||||||
|
file_name
|
||||||
|
};
|
||||||
|
let file_name = self.file_name.unwrap_or(default_file_name);
|
||||||
|
let file_path = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.working_directory_configuration()
|
||||||
|
.as_path()
|
||||||
|
.join(file_name);
|
||||||
|
let file = OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.write(true)
|
||||||
|
.truncate(true)
|
||||||
|
.read(false)
|
||||||
|
.open(&file_path)
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to open report file for writing: {}",
|
||||||
|
file_path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
|
||||||
|
format!("Failed to serialize report JSON to {}", file_path.display())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(self.report)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) {
|
||||||
|
let _ = event.tx.send(self.listener_tx.subscribe());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
||||||
|
self.report.metadata_files.insert(event.path.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) {
|
||||||
|
self.remaining_cases
|
||||||
|
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(event.test_specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.insert(event.test_specifier.case_idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) {
|
||||||
|
// Remove this from the set of cases we're tracking since it has completed.
|
||||||
|
self.remaining_cases
|
||||||
|
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(event.test_specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.remove(&event.test_specifier.case_idx);
|
||||||
|
|
||||||
|
// Add information on the fact that the case was ignored to the report.
|
||||||
|
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||||
|
test_case_report.status = Some(TestCaseStatus::Succeeded {
|
||||||
|
steps_executed: event.steps_executed,
|
||||||
|
});
|
||||||
|
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_test_failed_event(&mut self, event: TestFailedEvent) {
|
||||||
|
// Remove this from the set of cases we're tracking since it has completed.
|
||||||
|
self.remaining_cases
|
||||||
|
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(event.test_specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.remove(&event.test_specifier.case_idx);
|
||||||
|
|
||||||
|
// Add information on the fact that the case was ignored to the report.
|
||||||
|
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||||
|
test_case_report.status = Some(TestCaseStatus::Failed {
|
||||||
|
reason: event.reason,
|
||||||
|
});
|
||||||
|
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) {
|
||||||
|
// Remove this from the set of cases we're tracking since it has completed.
|
||||||
|
self.remaining_cases
|
||||||
|
.entry(event.test_specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(event.test_specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
.remove(&event.test_specifier.case_idx);
|
||||||
|
|
||||||
|
// Add information on the fact that the case was ignored to the report.
|
||||||
|
let test_case_report = self.test_case_report(&event.test_specifier);
|
||||||
|
test_case_report.status = Some(TestCaseStatus::Ignored {
|
||||||
|
reason: event.reason,
|
||||||
|
additional_fields: event.additional_fields,
|
||||||
|
});
|
||||||
|
self.handle_post_test_case_status_update(&event.test_specifier);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) {
|
||||||
|
let remaining_cases = self
|
||||||
|
.remaining_cases
|
||||||
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.entry(specifier.solc_mode.clone())
|
||||||
|
.or_default();
|
||||||
|
if !remaining_cases.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let case_status = self
|
||||||
|
.report
|
||||||
|
.execution_information
|
||||||
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.case_reports
|
||||||
|
.iter()
|
||||||
|
.flat_map(|(case_idx, mode_to_execution_map)| {
|
||||||
|
let case_status = mode_to_execution_map
|
||||||
|
.mode_execution_reports
|
||||||
|
.get(&specifier.solc_mode)?
|
||||||
|
.status
|
||||||
|
.clone()
|
||||||
|
.expect("Can't be uninitialized");
|
||||||
|
Some((*case_idx, case_status))
|
||||||
|
})
|
||||||
|
.collect::<BTreeMap<_, _>>();
|
||||||
|
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
|
metadata_file_path: specifier.metadata_file_path.clone().into(),
|
||||||
|
mode: specifier.solc_mode.clone(),
|
||||||
|
case_status,
|
||||||
|
};
|
||||||
|
|
||||||
|
// According to the documentation on send, the sending fails if there are no more receiver
|
||||||
|
// handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail
|
||||||
|
// to send then we ignore the error.
|
||||||
|
let _ = self.listener_tx.send(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) {
|
||||||
|
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||||
|
test_specifier: event.test_specifier,
|
||||||
|
node_id: event.id,
|
||||||
|
platform_identifier: event.platform_identifier,
|
||||||
|
});
|
||||||
|
execution_information.node = Some(TestCaseNodeInformation {
|
||||||
|
id: event.id,
|
||||||
|
platform_identifier: event.platform_identifier,
|
||||||
|
connection_string: event.connection_string,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_pre_link_contracts_compilation_succeeded_event(
|
||||||
|
&mut self,
|
||||||
|
event: PreLinkContractsCompilationSucceededEvent,
|
||||||
|
) {
|
||||||
|
let include_input = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.report_configuration()
|
||||||
|
.include_compiler_input;
|
||||||
|
let include_output = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.report_configuration()
|
||||||
|
.include_compiler_output;
|
||||||
|
|
||||||
|
let execution_information = self.execution_information(&event.execution_specifier);
|
||||||
|
|
||||||
|
let compiler_input = if include_input {
|
||||||
|
event.compiler_input
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let compiler_output = if include_output {
|
||||||
|
Some(event.compiler_output)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
|
||||||
|
is_cached: event.is_cached,
|
||||||
|
compiler_version: event.compiler_version,
|
||||||
|
compiler_path: event.compiler_path,
|
||||||
|
compiler_input,
|
||||||
|
compiler_output,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_post_link_contracts_compilation_succeeded_event(
|
||||||
|
&mut self,
|
||||||
|
event: PostLinkContractsCompilationSucceededEvent,
|
||||||
|
) {
|
||||||
|
let include_input = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.report_configuration()
|
||||||
|
.include_compiler_input;
|
||||||
|
let include_output = self
|
||||||
|
.report
|
||||||
|
.context
|
||||||
|
.report_configuration()
|
||||||
|
.include_compiler_output;
|
||||||
|
|
||||||
|
let execution_information = self.execution_information(&event.execution_specifier);
|
||||||
|
|
||||||
|
let compiler_input = if include_input {
|
||||||
|
event.compiler_input
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let compiler_output = if include_output {
|
||||||
|
Some(event.compiler_output)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
|
||||||
|
is_cached: event.is_cached,
|
||||||
|
compiler_version: event.compiler_version,
|
||||||
|
compiler_path: event.compiler_path,
|
||||||
|
compiler_input,
|
||||||
|
compiler_output,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_pre_link_contracts_compilation_failed_event(
|
||||||
|
&mut self,
|
||||||
|
event: PreLinkContractsCompilationFailedEvent,
|
||||||
|
) {
|
||||||
|
let execution_information = self.execution_information(&event.execution_specifier);
|
||||||
|
|
||||||
|
execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure {
|
||||||
|
reason: event.reason,
|
||||||
|
compiler_version: event.compiler_version,
|
||||||
|
compiler_path: event.compiler_path,
|
||||||
|
compiler_input: event.compiler_input,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_post_link_contracts_compilation_failed_event(
|
||||||
|
&mut self,
|
||||||
|
event: PostLinkContractsCompilationFailedEvent,
|
||||||
|
) {
|
||||||
|
let execution_information = self.execution_information(&event.execution_specifier);
|
||||||
|
|
||||||
|
execution_information.post_link_compilation_status = Some(CompilationStatus::Failure {
|
||||||
|
reason: event.reason,
|
||||||
|
compiler_version: event.compiler_version,
|
||||||
|
compiler_path: event.compiler_path,
|
||||||
|
compiler_input: event.compiler_input,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) {
|
||||||
|
self.execution_information(&event.execution_specifier)
|
||||||
|
.deployed_libraries = Some(event.libraries);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) {
|
||||||
|
self.execution_information(&event.execution_specifier)
|
||||||
|
.deployed_contracts
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(event.contract_instance.clone(), event.address);
|
||||||
|
self.test_case_report(&event.execution_specifier.test_specifier)
|
||||||
|
.contract_addresses
|
||||||
|
.entry(event.contract_instance)
|
||||||
|
.or_default()
|
||||||
|
.entry(event.execution_specifier.platform_identifier)
|
||||||
|
.or_default()
|
||||||
|
.push(event.address);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_completion(&mut self, _: CompletionEvent) {
|
||||||
|
self.runner_rx.close();
|
||||||
|
self.handle_metrics_computation();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_metrics_computation(&mut self) {
|
||||||
|
for report in self.report.execution_information.values_mut() {
|
||||||
|
for report in report.case_reports.values_mut() {
|
||||||
|
for report in report.mode_execution_reports.values_mut() {
|
||||||
|
for (platform_identifier, block_information) in
|
||||||
|
report.mined_block_information.iter_mut()
|
||||||
|
{
|
||||||
|
block_information.sort_by(|a, b| {
|
||||||
|
a.ethereum_block_information
|
||||||
|
.block_number
|
||||||
|
.cmp(&b.ethereum_block_information.block_number)
|
||||||
|
});
|
||||||
|
|
||||||
|
// Computing the TPS.
|
||||||
|
let tps = block_information
|
||||||
|
.iter()
|
||||||
|
.tuple_windows::<(_, _)>()
|
||||||
|
.map(|(block1, block2)| {
|
||||||
|
block2.ethereum_block_information.transaction_hashes.len() as u64
|
||||||
|
/ (block2.ethereum_block_information.block_timestamp
|
||||||
|
- block1.ethereum_block_information.block_timestamp)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.transaction_per_second
|
||||||
|
.with_list(*platform_identifier, tps);
|
||||||
|
|
||||||
|
// Computing the GPS.
|
||||||
|
let gps = block_information
|
||||||
|
.iter()
|
||||||
|
.tuple_windows::<(_, _)>()
|
||||||
|
.map(|(block1, block2)| {
|
||||||
|
block2.ethereum_block_information.mined_gas as u64
|
||||||
|
/ (block2.ethereum_block_information.block_timestamp
|
||||||
|
- block1.ethereum_block_information.block_timestamp)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.gas_per_second
|
||||||
|
.with_list(*platform_identifier, gps);
|
||||||
|
|
||||||
|
// Computing the gas block fullness
|
||||||
|
let gas_block_fullness = block_information
|
||||||
|
.iter()
|
||||||
|
.map(|block| block.gas_block_fullness_percentage())
|
||||||
|
.map(|v| v as u64)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.gas_block_fullness
|
||||||
|
.with_list(*platform_identifier, gas_block_fullness);
|
||||||
|
|
||||||
|
// Computing the ref-time block fullness
|
||||||
|
let reftime_block_fullness = block_information
|
||||||
|
.iter()
|
||||||
|
.filter_map(|block| block.ref_time_block_fullness_percentage())
|
||||||
|
.map(|v| v as u64)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !reftime_block_fullness.is_empty() {
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.ref_time_block_fullness
|
||||||
|
.get_or_insert_default()
|
||||||
|
.with_list(*platform_identifier, reftime_block_fullness);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Computing the proof size block fullness
|
||||||
|
let proof_size_block_fullness = block_information
|
||||||
|
.iter()
|
||||||
|
.filter_map(|block| block.proof_size_block_fullness_percentage())
|
||||||
|
.map(|v| v as u64)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !proof_size_block_fullness.is_empty() {
|
||||||
|
report
|
||||||
|
.metrics
|
||||||
|
.get_or_insert_default()
|
||||||
|
.proof_size_block_fullness
|
||||||
|
.get_or_insert_default()
|
||||||
|
.with_list(*platform_identifier, proof_size_block_fullness);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_step_transaction_information(&mut self, event: StepTransactionInformationEvent) {
|
||||||
|
self.test_case_report(&event.execution_specifier.test_specifier)
|
||||||
|
.steps
|
||||||
|
.entry(event.step_path)
|
||||||
|
.or_default()
|
||||||
|
.transactions
|
||||||
|
.entry(event.execution_specifier.platform_identifier)
|
||||||
|
.or_default()
|
||||||
|
.push(event.transaction_information);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_contract_information(&mut self, event: ContractInformationEvent) {
|
||||||
|
self.test_case_report(&event.execution_specifier.test_specifier)
|
||||||
|
.compiled_contracts
|
||||||
|
.entry(event.source_code_path)
|
||||||
|
.or_default()
|
||||||
|
.entry(event.contract_name)
|
||||||
|
.or_default()
|
||||||
|
.contract_size
|
||||||
|
.insert(
|
||||||
|
event.execution_specifier.platform_identifier,
|
||||||
|
event.contract_size,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_block_mined(&mut self, event: BlockMinedEvent) {
|
||||||
|
self.test_case_report(&event.execution_specifier.test_specifier)
|
||||||
|
.mined_block_information
|
||||||
|
.entry(event.execution_specifier.platform_identifier)
|
||||||
|
.or_default()
|
||||||
|
.push(event.mined_block_information);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut ExecutionReport {
|
||||||
|
self.report
|
||||||
|
.execution_information
|
||||||
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
|
.or_default()
|
||||||
|
.case_reports
|
||||||
|
.entry(specifier.case_idx)
|
||||||
|
.or_default()
|
||||||
|
.mode_execution_reports
|
||||||
|
.entry(specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn execution_information(
|
||||||
|
&mut self,
|
||||||
|
specifier: &ExecutionSpecifier,
|
||||||
|
) -> &mut ExecutionInformation {
|
||||||
|
let test_case_report = self.test_case_report(&specifier.test_specifier);
|
||||||
|
test_case_report
|
||||||
|
.platform_execution
|
||||||
|
.entry(specifier.platform_identifier)
|
||||||
|
.or_default()
|
||||||
|
.get_or_insert_default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct Report {
|
||||||
|
/// The context that the tool was started up with.
|
||||||
|
pub context: Context,
|
||||||
|
/// The list of metadata files that were found by the tool.
|
||||||
|
pub metadata_files: BTreeSet<MetadataFilePath>,
|
||||||
|
/// Metrics from the execution.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub metrics: Option<Metrics>,
|
||||||
|
/// Information relating to each test case.
|
||||||
|
pub execution_information: BTreeMap<MetadataFilePath, MetadataFileReport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Report {
|
||||||
|
pub fn new(context: Context) -> Self {
|
||||||
|
Self {
|
||||||
|
context,
|
||||||
|
metrics: Default::default(),
|
||||||
|
metadata_files: Default::default(),
|
||||||
|
execution_information: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||||
|
pub struct MetadataFileReport {
|
||||||
|
/// Metrics from the execution.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub metrics: Option<Metrics>,
|
||||||
|
/// The report of each case keyed by the case idx.
|
||||||
|
pub case_reports: BTreeMap<CaseIdx, CaseReport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||||
|
pub struct CaseReport {
|
||||||
|
/// Metrics from the execution.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub metrics: Option<Metrics>,
|
||||||
|
/// The [`ExecutionReport`] for each one of the [`Mode`]s.
|
||||||
|
#[serde_as(as = "HashMap<DisplayFromStr, _>")]
|
||||||
|
pub mode_execution_reports: HashMap<Mode, ExecutionReport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||||
|
pub struct ExecutionReport {
|
||||||
|
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub status: Option<TestCaseStatus>,
|
||||||
|
/// Metrics from the execution.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub metrics: Option<Metrics>,
|
||||||
|
/// Information related to the execution on one of the platforms.
|
||||||
|
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub platform_execution: PlatformKeyedInformation<Option<ExecutionInformation>>,
|
||||||
|
/// Information on the compiled contracts.
|
||||||
|
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub compiled_contracts: BTreeMap<PathBuf, BTreeMap<String, ContractInformation>>,
|
||||||
|
/// The addresses of the deployed contracts
|
||||||
|
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub contract_addresses: BTreeMap<ContractInstance, PlatformKeyedInformation<Vec<Address>>>,
|
||||||
|
/// Information on the mined blocks as part of this execution.
|
||||||
|
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub mined_block_information: PlatformKeyedInformation<Vec<MinedBlockInformation>>,
|
||||||
|
/// Information tracked for each step that was executed.
|
||||||
|
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub steps: BTreeMap<StepPath, StepReport>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||||
|
/// it was ignored.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
#[serde(tag = "status")]
|
||||||
|
pub enum TestCaseStatus {
|
||||||
|
/// The test case succeeded.
|
||||||
|
Succeeded {
|
||||||
|
/// The number of steps of the case that were executed.
|
||||||
|
steps_executed: usize,
|
||||||
|
},
|
||||||
|
/// The test case failed.
|
||||||
|
Failed {
|
||||||
|
/// The reason for the failure of the test case.
|
||||||
|
reason: String,
|
||||||
|
},
|
||||||
|
/// The test case was ignored. This variant carries information related to why it was ignored.
|
||||||
|
Ignored {
|
||||||
|
/// The reason behind the test case being ignored.
|
||||||
|
reason: String,
|
||||||
|
/// Additional fields that describe more information on why the test case is ignored.
|
||||||
|
#[serde(flatten)]
|
||||||
|
additional_fields: IndexMap<String, serde_json::Value>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information related to the platform node that's being used to execute the step.
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct TestCaseNodeInformation {
|
||||||
|
/// The ID of the node that this case is being executed on.
|
||||||
|
pub id: usize,
|
||||||
|
/// The platform of the node.
|
||||||
|
pub platform_identifier: PlatformIdentifier,
|
||||||
|
/// The connection string of the node.
|
||||||
|
pub connection_string: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execution information tied to the platform.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
|
pub struct ExecutionInformation {
|
||||||
|
/// Information related to the node assigned to this test case.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub node: Option<TestCaseNodeInformation>,
|
||||||
|
/// Information on the pre-link compiled contracts.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub pre_link_compilation_status: Option<CompilationStatus>,
|
||||||
|
/// Information on the post-link compiled contracts.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub post_link_compilation_status: Option<CompilationStatus>,
|
||||||
|
/// Information on the deployed libraries.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub deployed_libraries: Option<BTreeMap<ContractInstance, Address>>,
|
||||||
|
/// Information on the deployed contracts.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub deployed_contracts: Option<BTreeMap<ContractInstance, Address>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information related to compilation
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(tag = "status")]
|
||||||
|
pub enum CompilationStatus {
|
||||||
|
/// The compilation was successful.
|
||||||
|
Success {
|
||||||
|
/// A flag with information on whether the compilation artifacts were cached or not.
|
||||||
|
is_cached: bool,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Version,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: PathBuf,
|
||||||
|
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||||
|
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||||
|
/// the compiler was invoked.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The output of the compiler. This is only included if the appropriate flag is set in the
|
||||||
|
/// CLI contexts.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_output: Option<CompilerOutput>,
|
||||||
|
},
|
||||||
|
/// The compilation failed.
|
||||||
|
Failure {
|
||||||
|
/// The failure reason.
|
||||||
|
reason: String,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_version: Option<Version>,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_path: Option<PathBuf>,
|
||||||
|
/// The input provided to the compiler to compile the contracts. This is only included if
|
||||||
|
/// the appropriate flag is set in the CLI context and if the contracts were not cached and
|
||||||
|
/// the compiler was invoked.
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information on each step in the execution.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
|
pub struct StepReport {
|
||||||
|
/// Information on the transactions submitted as part of this step.
|
||||||
|
transactions: PlatformKeyedInformation<Vec<TransactionInformation>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
pub struct TransactionInformation {
|
||||||
|
/// The hash of the transaction
|
||||||
|
pub transaction_hash: TxHash,
|
||||||
|
pub submission_timestamp: u64,
|
||||||
|
pub block_timestamp: u64,
|
||||||
|
pub block_number: BlockNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The metrics we collect for our benchmarks.
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
|
pub struct Metrics {
|
||||||
|
pub transaction_per_second: Metric<u64>,
|
||||||
|
pub gas_per_second: Metric<u64>,
|
||||||
|
/* Block Fullness */
|
||||||
|
pub gas_block_fullness: Metric<u64>,
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub ref_time_block_fullness: Option<Metric<u64>>,
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub proof_size_block_fullness: Option<Metric<u64>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The data that we store for a given metric (e.g., TPS).
|
||||||
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
|
pub struct Metric<T> {
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub minimum: Option<PlatformKeyedInformation<T>>,
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub maximum: Option<PlatformKeyedInformation<T>>,
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub mean: Option<PlatformKeyedInformation<T>>,
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub median: Option<PlatformKeyedInformation<T>>,
|
||||||
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
|
pub raw: Option<PlatformKeyedInformation<Vec<T>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Metric<T>
|
||||||
|
where
|
||||||
|
T: Default
|
||||||
|
+ Copy
|
||||||
|
+ Ord
|
||||||
|
+ PartialOrd
|
||||||
|
+ Add<Output = T>
|
||||||
|
+ Div<Output = T>
|
||||||
|
+ TryFrom<usize, Error: std::fmt::Debug>,
|
||||||
|
{
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Default::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn platform_identifiers(&self) -> BTreeSet<PlatformIdentifier> {
|
||||||
|
self.minimum
|
||||||
|
.as_ref()
|
||||||
|
.map(|m| m.keys())
|
||||||
|
.into_iter()
|
||||||
|
.flatten()
|
||||||
|
.chain(
|
||||||
|
self.maximum
|
||||||
|
.as_ref()
|
||||||
|
.map(|m| m.keys())
|
||||||
|
.into_iter()
|
||||||
|
.flatten(),
|
||||||
|
)
|
||||||
|
.chain(self.mean.as_ref().map(|m| m.keys()).into_iter().flatten())
|
||||||
|
.chain(self.median.as_ref().map(|m| m.keys()).into_iter().flatten())
|
||||||
|
.chain(self.raw.as_ref().map(|m| m.keys()).into_iter().flatten())
|
||||||
|
.copied()
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_list(
|
||||||
|
&mut self,
|
||||||
|
platform_identifier: PlatformIdentifier,
|
||||||
|
original_list: Vec<T>,
|
||||||
|
) -> &mut Self {
|
||||||
|
let mut list = original_list.clone();
|
||||||
|
list.sort();
|
||||||
|
let Some(min) = list.first().copied() else {
|
||||||
|
return self;
|
||||||
|
};
|
||||||
|
let Some(max) = list.last().copied() else {
|
||||||
|
return self;
|
||||||
|
};
|
||||||
|
let sum = list.iter().fold(T::default(), |acc, num| acc + *num);
|
||||||
|
let mean = sum / TryInto::<T>::try_into(list.len()).unwrap();
|
||||||
|
|
||||||
|
let median = match list.len().is_multiple_of(2) {
|
||||||
|
true => {
|
||||||
|
let idx = list.len() / 2;
|
||||||
|
let val1 = *list.get(idx - 1).unwrap();
|
||||||
|
let val2 = *list.get(idx).unwrap();
|
||||||
|
(val1 + val2) / TryInto::<T>::try_into(2usize).unwrap()
|
||||||
|
}
|
||||||
|
false => {
|
||||||
|
let idx = list.len() / 2;
|
||||||
|
*list.get(idx).unwrap()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
self.minimum
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, min);
|
||||||
|
self.maximum
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, max);
|
||||||
|
self.mean
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, mean);
|
||||||
|
self.median
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, median);
|
||||||
|
self.raw
|
||||||
|
.get_or_insert_default()
|
||||||
|
.insert(platform_identifier, original_list);
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn combine(&self, other: &Self) -> Self {
|
||||||
|
let mut platform_identifiers = self.platform_identifiers();
|
||||||
|
platform_identifiers.extend(other.platform_identifiers());
|
||||||
|
|
||||||
|
let mut this = Self::new();
|
||||||
|
for platform_identifier in platform_identifiers {
|
||||||
|
let mut l1 = self
|
||||||
|
.raw
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|m| m.get(&platform_identifier))
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_default();
|
||||||
|
let l2 = other
|
||||||
|
.raw
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|m| m.get(&platform_identifier))
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_default();
|
||||||
|
l1.extend(l2);
|
||||||
|
this.with_list(platform_identifier, l1);
|
||||||
|
}
|
||||||
|
|
||||||
|
this
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||||
|
pub struct ContractInformation {
|
||||||
|
/// The size of the contract on the various platforms.
|
||||||
|
pub contract_size: PlatformKeyedInformation<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct MinedBlockInformation {
|
||||||
|
pub ethereum_block_information: EthereumMinedBlockInformation,
|
||||||
|
pub substrate_block_information: Option<SubstrateMinedBlockInformation>,
|
||||||
|
pub tx_counts: BTreeMap<StepPath, usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MinedBlockInformation {
|
||||||
|
pub fn gas_block_fullness_percentage(&self) -> u8 {
|
||||||
|
self.ethereum_block_information
|
||||||
|
.gas_block_fullness_percentage()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ref_time_block_fullness_percentage(&self) -> Option<u8> {
|
||||||
|
self.substrate_block_information
|
||||||
|
.as_ref()
|
||||||
|
.map(|block| block.ref_time_block_fullness_percentage())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn proof_size_block_fullness_percentage(&self) -> Option<u8> {
|
||||||
|
self.substrate_block_information
|
||||||
|
.as_ref()
|
||||||
|
.map(|block| block.proof_size_block_fullness_percentage())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct EthereumMinedBlockInformation {
|
||||||
|
/// The block number.
|
||||||
|
pub block_number: BlockNumber,
|
||||||
|
|
||||||
|
/// The block timestamp.
|
||||||
|
pub block_timestamp: BlockTimestamp,
|
||||||
|
|
||||||
|
/// The amount of gas mined in the block.
|
||||||
|
pub mined_gas: u128,
|
||||||
|
|
||||||
|
/// The gas limit of the block.
|
||||||
|
pub block_gas_limit: u128,
|
||||||
|
|
||||||
|
/// The hashes of the transactions that were mined as part of the block.
|
||||||
|
pub transaction_hashes: Vec<TxHash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EthereumMinedBlockInformation {
|
||||||
|
pub fn gas_block_fullness_percentage(&self) -> u8 {
|
||||||
|
(self.mined_gas * 100 / self.block_gas_limit) as u8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct SubstrateMinedBlockInformation {
|
||||||
|
/// The ref time for substrate based chains.
|
||||||
|
pub ref_time: u128,
|
||||||
|
|
||||||
|
/// The max ref time for substrate based chains.
|
||||||
|
pub max_ref_time: u64,
|
||||||
|
|
||||||
|
/// The proof size for substrate based chains.
|
||||||
|
pub proof_size: u128,
|
||||||
|
|
||||||
|
/// The max proof size for substrate based chains.
|
||||||
|
pub max_proof_size: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SubstrateMinedBlockInformation {
|
||||||
|
pub fn ref_time_block_fullness_percentage(&self) -> u8 {
|
||||||
|
(self.ref_time * 100 / self.max_ref_time as u128) as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn proof_size_block_fullness_percentage(&self) -> u8 {
|
||||||
|
(self.proof_size * 100 / self.max_proof_size as u128) as u8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information keyed by the platform identifier.
|
||||||
|
pub type PlatformKeyedInformation<T> = BTreeMap<PlatformIdentifier, T>;
|
||||||
@@ -1,94 +0,0 @@
|
|||||||
//! The report analyzer enriches the raw report data.
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::reporter::CompilationTask;
|
|
||||||
|
|
||||||
/// Provides insights into how well the compilers perform.
|
|
||||||
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, PartialOrd)]
|
|
||||||
pub struct CompilerStatistics {
|
|
||||||
/// The sum of contracts observed.
|
|
||||||
pub n_contracts: usize,
|
|
||||||
/// The mean size of compiled contracts.
|
|
||||||
pub mean_code_size: usize,
|
|
||||||
/// The mean size of the optimized YUL IR.
|
|
||||||
pub mean_yul_size: usize,
|
|
||||||
/// Is a proxy because the YUL also containes a lot of comments.
|
|
||||||
pub yul_to_bytecode_size_ratio: f32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CompilerStatistics {
|
|
||||||
/// Cumulatively update the statistics with the next compiler task.
|
|
||||||
pub fn sample(&mut self, compilation_task: &CompilationTask) {
|
|
||||||
let Some(output) = &compilation_task.json_output else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(contracts) = &output.contracts else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
for (_solidity, contracts) in contracts.iter() {
|
|
||||||
for (_name, contract) in contracts.iter() {
|
|
||||||
let Some(evm) = &contract.evm else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let Some(deploy_code) = &evm.deployed_bytecode else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
// The EVM bytecode can be unlinked and thus is not necessarily a decodable hex
|
|
||||||
// string; for our statistics this is a good enough approximation.
|
|
||||||
let bytecode_size = deploy_code.object.len() / 2;
|
|
||||||
|
|
||||||
let yul_size = contract
|
|
||||||
.ir_optimized
|
|
||||||
.as_ref()
|
|
||||||
.expect("if the contract has a deploy code it should also have the opimized IR")
|
|
||||||
.len();
|
|
||||||
|
|
||||||
self.update_sizes(bytecode_size, yul_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Updates the size statistics cumulatively.
|
|
||||||
fn update_sizes(&mut self, bytecode_size: usize, yul_size: usize) {
|
|
||||||
let n_previous = self.n_contracts;
|
|
||||||
let n_current = self.n_contracts + 1;
|
|
||||||
|
|
||||||
self.n_contracts = n_current;
|
|
||||||
|
|
||||||
self.mean_code_size = (n_previous * self.mean_code_size + bytecode_size) / n_current;
|
|
||||||
self.mean_yul_size = (n_previous * self.mean_yul_size + yul_size) / n_current;
|
|
||||||
|
|
||||||
if self.mean_code_size > 0 {
|
|
||||||
self.yul_to_bytecode_size_ratio =
|
|
||||||
self.mean_yul_size as f32 / self.mean_code_size as f32;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::CompilerStatistics;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn compiler_statistics() {
|
|
||||||
let mut received = CompilerStatistics::default();
|
|
||||||
received.update_sizes(0, 0);
|
|
||||||
received.update_sizes(3, 37);
|
|
||||||
received.update_sizes(123, 456);
|
|
||||||
|
|
||||||
let mean_code_size = 41; // rounding error from integer truncation
|
|
||||||
let mean_yul_size = 164;
|
|
||||||
let expected = CompilerStatistics {
|
|
||||||
n_contracts: 3,
|
|
||||||
mean_code_size,
|
|
||||||
mean_yul_size,
|
|
||||||
yul_to_bytecode_size_ratio: mean_yul_size as f32 / mean_code_size as f32,
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(received, expected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
//! Common types and functions used throughout the crate.
|
||||||
|
|
||||||
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
|
use revive_dt_common::{define_wrapper_type, types::PlatformIdentifier};
|
||||||
|
use revive_dt_compiler::Mode;
|
||||||
|
use revive_dt_format::{case::CaseIdx, steps::StepPath};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
define_wrapper_type!(
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct MetadataFilePath(PathBuf);
|
||||||
|
);
|
||||||
|
|
||||||
|
/// An absolute specifier for a test.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct TestSpecifier {
|
||||||
|
pub solc_mode: Mode,
|
||||||
|
pub metadata_file_path: PathBuf,
|
||||||
|
pub case_idx: CaseIdx,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An absolute path for a test that also includes information about the node that it's assigned to
|
||||||
|
/// and what platform it belongs to.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct ExecutionSpecifier {
|
||||||
|
pub test_specifier: Arc<TestSpecifier>,
|
||||||
|
pub node_id: usize,
|
||||||
|
pub platform_identifier: PlatformIdentifier,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct StepExecutionSpecifier {
|
||||||
|
pub execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
pub step_idx: StepPath,
|
||||||
|
}
|
||||||
@@ -1,4 +1,11 @@
|
|||||||
//! The revive differential tests reporting facility.
|
//! This crate implements the reporting infrastructure for the differential testing tool.
|
||||||
|
|
||||||
pub mod analyzer;
|
mod aggregator;
|
||||||
pub mod reporter;
|
mod common;
|
||||||
|
mod reporter_event;
|
||||||
|
mod runner_event;
|
||||||
|
|
||||||
|
pub use aggregator::*;
|
||||||
|
pub use common::*;
|
||||||
|
pub use reporter_event::*;
|
||||||
|
pub use runner_event::*;
|
||||||
|
|||||||
@@ -1,243 +0,0 @@
|
|||||||
//! The reporter is the central place observing test execution by collecting data.
|
|
||||||
//!
|
|
||||||
//! The data collected gives useful insights into the outcome of the test run
|
|
||||||
//! and helps identifying and reproducing failing cases.
|
|
||||||
|
|
||||||
use std::{
|
|
||||||
collections::HashMap,
|
|
||||||
fs::{self, File, create_dir_all},
|
|
||||||
path::PathBuf,
|
|
||||||
sync::{Mutex, OnceLock},
|
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
|
||||||
};
|
|
||||||
|
|
||||||
use anyhow::Context;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use revive_dt_config::{Arguments, TestingPlatform};
|
|
||||||
use revive_dt_format::{corpus::Corpus, mode::SolcMode};
|
|
||||||
use revive_solc_json_interface::{SolcStandardJsonInput, SolcStandardJsonOutput};
|
|
||||||
|
|
||||||
use crate::analyzer::CompilerStatistics;
|
|
||||||
|
|
||||||
pub(crate) static REPORTER: OnceLock<Mutex<Report>> = OnceLock::new();
|
|
||||||
|
|
||||||
/// The `Report` datastructure stores all relevant inforamtion required for generating reports.
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
|
||||||
pub struct Report {
|
|
||||||
/// The configuration used during the test.
|
|
||||||
pub config: Arguments,
|
|
||||||
/// The observed test corpora.
|
|
||||||
pub corpora: Vec<Corpus>,
|
|
||||||
/// The observed test definitions.
|
|
||||||
pub metadata_files: Vec<PathBuf>,
|
|
||||||
/// The observed compilation results.
|
|
||||||
pub compiler_results: HashMap<TestingPlatform, Vec<CompilationResult>>,
|
|
||||||
/// The observed compilation statistics.
|
|
||||||
pub compiler_statistics: HashMap<TestingPlatform, CompilerStatistics>,
|
|
||||||
/// The file name this is serialized to.
|
|
||||||
#[serde(skip)]
|
|
||||||
directory: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains a compiled contract.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CompilationTask {
|
|
||||||
/// The observed compiler input.
|
|
||||||
pub json_input: SolcStandardJsonInput,
|
|
||||||
/// The observed compiler output.
|
|
||||||
pub json_output: Option<SolcStandardJsonOutput>,
|
|
||||||
/// The observed compiler mode.
|
|
||||||
pub mode: SolcMode,
|
|
||||||
/// The observed compiler version.
|
|
||||||
pub compiler_version: String,
|
|
||||||
/// The observed error, if any.
|
|
||||||
pub error: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a report about a compilation task.
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct CompilationResult {
|
|
||||||
/// The observed compilation task.
|
|
||||||
pub compilation_task: CompilationTask,
|
|
||||||
/// The linked span.
|
|
||||||
pub span: Span,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The [Span] struct indicates the context of what is being reported.
|
|
||||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Span {
|
|
||||||
/// The corpus index this belongs to.
|
|
||||||
corpus: usize,
|
|
||||||
/// The metadata file this belongs to.
|
|
||||||
metadata_file: usize,
|
|
||||||
/// The index of the case definition this belongs to.
|
|
||||||
case: usize,
|
|
||||||
/// The index of the case input this belongs to.
|
|
||||||
input: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Report {
|
|
||||||
/// The file name where this report will be written to.
|
|
||||||
pub const FILE_NAME: &str = "report.json";
|
|
||||||
|
|
||||||
/// The [Span] is expected to initialize the reporter by providing the config.
|
|
||||||
const INITIALIZED_VIA_SPAN: &str = "requires a Span which initializes the reporter";
|
|
||||||
|
|
||||||
/// Create a new [Report].
|
|
||||||
fn new(config: Arguments) -> anyhow::Result<Self> {
|
|
||||||
let now = SystemTime::now()
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.unwrap()
|
|
||||||
.as_millis();
|
|
||||||
|
|
||||||
let directory = config.directory().join("report").join(format!("{now}"));
|
|
||||||
if !directory.exists() {
|
|
||||||
create_dir_all(&directory)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
config,
|
|
||||||
directory,
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Add a compilation task to the report.
|
|
||||||
pub fn compilation(span: Span, platform: TestingPlatform, compilation_task: CompilationTask) {
|
|
||||||
let mut report = REPORTER
|
|
||||||
.get()
|
|
||||||
.expect(Report::INITIALIZED_VIA_SPAN)
|
|
||||||
.lock()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
report
|
|
||||||
.compiler_statistics
|
|
||||||
.entry(platform)
|
|
||||||
.or_default()
|
|
||||||
.sample(&compilation_task);
|
|
||||||
|
|
||||||
report
|
|
||||||
.compiler_results
|
|
||||||
.entry(platform)
|
|
||||||
.or_default()
|
|
||||||
.push(CompilationResult {
|
|
||||||
compilation_task,
|
|
||||||
span,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write the report to disk.
|
|
||||||
pub fn save() -> anyhow::Result<()> {
|
|
||||||
let Some(reporter) = REPORTER.get() else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
let report = reporter.lock().unwrap();
|
|
||||||
|
|
||||||
if let Err(error) = report.write_to_file() {
|
|
||||||
anyhow::bail!("can not write report: {error}");
|
|
||||||
}
|
|
||||||
|
|
||||||
if report.config.extract_problems {
|
|
||||||
if let Err(error) = report.save_compiler_problems() {
|
|
||||||
anyhow::bail!("can not write compiler problems: {error}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write compiler problems to disk for later debugging.
|
|
||||||
pub fn save_compiler_problems(&self) -> anyhow::Result<()> {
|
|
||||||
for (platform, results) in self.compiler_results.iter() {
|
|
||||||
for result in results {
|
|
||||||
// ignore if there were no errors
|
|
||||||
if result.compilation_task.error.is_none()
|
|
||||||
&& result
|
|
||||||
.compilation_task
|
|
||||||
.json_output
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|output| output.errors.as_ref())
|
|
||||||
.map(|errors| errors.is_empty())
|
|
||||||
.unwrap_or(true)
|
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let path = &self.metadata_files[result.span.metadata_file]
|
|
||||||
.parent()
|
|
||||||
.unwrap()
|
|
||||||
.join(format!("{platform}_errors"));
|
|
||||||
if !path.exists() {
|
|
||||||
create_dir_all(path)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(error) = result.compilation_task.error.as_ref() {
|
|
||||||
fs::write(path.join("compiler_error.txt"), error)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(errors) = result.compilation_task.json_output.as_ref() {
|
|
||||||
let file = File::create(path.join("compiler_output.txt"))?;
|
|
||||||
serde_json::to_writer_pretty(file, &errors)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_to_file(&self) -> anyhow::Result<()> {
|
|
||||||
let path = self.directory.join(Self::FILE_NAME);
|
|
||||||
|
|
||||||
let file = File::create(&path).context(path.display().to_string())?;
|
|
||||||
serde_json::to_writer_pretty(file, &self)?;
|
|
||||||
|
|
||||||
tracing::info!("report written to: {}", path.display());
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Span {
|
|
||||||
/// Create a new [Span] with case and input index at 0.
|
|
||||||
///
|
|
||||||
/// Initializes the reporting facility on the first call.
|
|
||||||
pub fn new(corpus: Corpus, config: Arguments) -> anyhow::Result<Self> {
|
|
||||||
let report = Mutex::new(Report::new(config)?);
|
|
||||||
let mut reporter = REPORTER.get_or_init(|| report).lock().unwrap();
|
|
||||||
reporter.corpora.push(corpus);
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
corpus: reporter.corpora.len() - 1,
|
|
||||||
metadata_file: 0,
|
|
||||||
case: 0,
|
|
||||||
input: 0,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance to the next metadata file: Resets the case input index to 0.
|
|
||||||
pub fn next_metadata(&mut self, metadata_file: PathBuf) {
|
|
||||||
let mut reporter = REPORTER
|
|
||||||
.get()
|
|
||||||
.expect(Report::INITIALIZED_VIA_SPAN)
|
|
||||||
.lock()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
reporter.metadata_files.push(metadata_file);
|
|
||||||
|
|
||||||
self.metadata_file = reporter.metadata_files.len() - 1;
|
|
||||||
self.case = 0;
|
|
||||||
self.input = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance to the next case: Increas the case index by one and resets the input index to 0.
|
|
||||||
pub fn next_case(&mut self) {
|
|
||||||
self.case += 1;
|
|
||||||
self.input = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance to the next input.
|
|
||||||
pub fn next_input(&mut self) {
|
|
||||||
self.input += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
//! A reporter event sent by the report aggregator to the various listeners.
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use revive_dt_compiler::Mode;
|
||||||
|
use revive_dt_format::case::CaseIdx;
|
||||||
|
|
||||||
|
use crate::{MetadataFilePath, TestCaseStatus};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum ReporterEvent {
|
||||||
|
/// An event sent by the reporter once an entire metadata file and solc mode combination has
|
||||||
|
/// finished execution.
|
||||||
|
MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
|
/// The path of the metadata file.
|
||||||
|
metadata_file_path: MetadataFilePath,
|
||||||
|
/// The Solc mode that this metadata file was executed in.
|
||||||
|
mode: Mode,
|
||||||
|
/// The status of each one of the cases.
|
||||||
|
case_status: BTreeMap<CaseIdx, TestCaseStatus>,
|
||||||
|
},
|
||||||
|
}
|
||||||
@@ -0,0 +1,669 @@
|
|||||||
|
//! The types associated with the events sent by the runner to the reporter.
|
||||||
|
#![allow(dead_code)]
|
||||||
|
|
||||||
|
use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
|
use alloy::primitives::Address;
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use indexmap::IndexMap;
|
||||||
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
|
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||||
|
use revive_dt_format::metadata::ContractInstance;
|
||||||
|
use revive_dt_format::metadata::Metadata;
|
||||||
|
use revive_dt_format::steps::StepPath;
|
||||||
|
use semver::Version;
|
||||||
|
use tokio::sync::{broadcast, oneshot};
|
||||||
|
|
||||||
|
use crate::MinedBlockInformation;
|
||||||
|
use crate::TransactionInformation;
|
||||||
|
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_test_specific {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
paste::paste! {
|
||||||
|
pub fn [< report_ $variant_ident:snake _event >](
|
||||||
|
&self
|
||||||
|
$(, $bname: impl Into<$bty> )*
|
||||||
|
$(, $aname: impl Into<$aty> )*
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.report([< $variant_ident Event >] {
|
||||||
|
$skip_field: self.test_specifier.clone()
|
||||||
|
$(, $bname: $bname.into() )*
|
||||||
|
$(, $aname: $aname.into() )*
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_test_specific_by_parse {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_test_specific!(
|
||||||
|
$ident, $variant_ident, $skip_field;
|
||||||
|
$( $bname : $bty, )* ; $( $aname : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_scan_before {
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
test_specifier : $skip_ty:ty,
|
||||||
|
$( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_test_specific_by_parse!(
|
||||||
|
$ident, $variant_ident, test_specifier;
|
||||||
|
$( $before : $bty, )* ; $( $after : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
$( $before : $bty, )* $name : $ty,
|
||||||
|
;
|
||||||
|
$( $after : $aty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
;
|
||||||
|
) => {};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_for_variant {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
) => {};
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
;
|
||||||
|
$( $field_ident : $field_ty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_execution_specific {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
paste::paste! {
|
||||||
|
pub fn [< report_ $variant_ident:snake _event >](
|
||||||
|
&self
|
||||||
|
$(, $bname: impl Into<$bty> )*
|
||||||
|
$(, $aname: impl Into<$aty> )*
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.report([< $variant_ident Event >] {
|
||||||
|
$skip_field: self.execution_specifier.clone()
|
||||||
|
$(, $bname: $bname.into() )*
|
||||||
|
$(, $aname: $aname.into() )*
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_execution_specific_by_parse {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_execution_specific!(
|
||||||
|
$ident, $variant_ident, $skip_field;
|
||||||
|
$( $bname : $bty, )* ; $( $aname : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_scan_before_exec {
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
execution_specifier : $skip_ty:ty,
|
||||||
|
$( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_execution_specific_by_parse!(
|
||||||
|
$ident, $variant_ident, execution_specifier;
|
||||||
|
$( $before : $bty, )* ; $( $after : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before_exec!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
$( $before : $bty, )* $name : $ty,
|
||||||
|
;
|
||||||
|
$( $after : $aty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
;
|
||||||
|
) => {};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_for_variant_exec {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
) => {};
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before_exec!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
;
|
||||||
|
$( $field_ident : $field_ty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_step_execution_specific {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
paste::paste! {
|
||||||
|
pub fn [< report_ $variant_ident:snake _event >](
|
||||||
|
&self
|
||||||
|
$(, $bname: impl Into<$bty> )*
|
||||||
|
$(, $aname: impl Into<$aty> )*
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
self.report([< $variant_ident Event >] {
|
||||||
|
$skip_field: self.step_specifier.clone()
|
||||||
|
$(, $bname: $bname.into() )*
|
||||||
|
$(, $aname: $aname.into() )*
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_emit_step_execution_specific_by_parse {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident,
|
||||||
|
$skip_field:ident;
|
||||||
|
$( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )*
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_step_execution_specific!(
|
||||||
|
$ident, $variant_ident, $skip_field;
|
||||||
|
$( $bname : $bty, )* ; $( $aname : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_scan_before_step {
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
step_specifier : $skip_ty:ty,
|
||||||
|
$( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_emit_step_execution_specific_by_parse!(
|
||||||
|
$ident, $variant_ident, step_specifier;
|
||||||
|
$( $before : $bty, )* ; $( $after : $aty, )*
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
$name:ident : $ty:ty, $( $after:ident : $aty:ty, )*
|
||||||
|
;
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before_step!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
$( $before : $bty, )* $name : $ty,
|
||||||
|
;
|
||||||
|
$( $after : $aty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
(
|
||||||
|
$ident:ident, $variant_ident:ident;
|
||||||
|
$( $before:ident : $bty:ty, )*
|
||||||
|
;
|
||||||
|
;
|
||||||
|
) => {};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! __report_gen_for_variant_step {
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
) => {};
|
||||||
|
(
|
||||||
|
$ident:ident,
|
||||||
|
$variant_ident:ident;
|
||||||
|
$( $field_ident:ident : $field_ty:ty ),+ $(,)?
|
||||||
|
) => {
|
||||||
|
__report_gen_scan_before_step!(
|
||||||
|
$ident, $variant_ident;
|
||||||
|
;
|
||||||
|
$( $field_ident : $field_ty, )*
|
||||||
|
;
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Defines the runner-event which is sent from the test runners to the report aggregator.
|
||||||
|
///
|
||||||
|
/// This macro defines a number of things related to the reporting infrastructure and the interface
|
||||||
|
/// used. First of all, it defines the enum of all of the possible events that the runners can send
|
||||||
|
/// to the aggregator. For each one of the variants it defines a separate struct for it to allow the
|
||||||
|
/// variant field in the enum to be put in a [`Box`].
|
||||||
|
///
|
||||||
|
/// In addition to the above, it defines [`From`] implementations for the various event types for
|
||||||
|
/// the [`RunnerEvent`] enum essentially allowing for events such as [`CorpusFileDiscoveryEvent`] to
|
||||||
|
/// be converted into a [`RunnerEvent`].
|
||||||
|
///
|
||||||
|
/// In addition to the above, it also defines the [`RunnerEventReporter`] which is a wrapper around
|
||||||
|
/// an [`UnboundedSender`] allowing for events to be sent to the report aggregator.
|
||||||
|
///
|
||||||
|
/// With the above description, we can see that this macro defines almost all of the interface of
|
||||||
|
/// the reporting infrastructure, from the enum itself, to its associated types, and also to the
|
||||||
|
/// reporter that's used to report events to the aggregator.
|
||||||
|
///
|
||||||
|
/// [`UnboundedSender`]: tokio::sync::mpsc::UnboundedSender
|
||||||
|
macro_rules! define_event {
|
||||||
|
(
|
||||||
|
$(#[$enum_meta: meta])*
|
||||||
|
$vis: vis enum $ident: ident {
|
||||||
|
$(
|
||||||
|
$(#[$variant_meta: meta])*
|
||||||
|
$variant_ident: ident {
|
||||||
|
$(
|
||||||
|
$(#[$field_meta: meta])*
|
||||||
|
$field_ident: ident: $field_ty: ty
|
||||||
|
),* $(,)?
|
||||||
|
}
|
||||||
|
),* $(,)?
|
||||||
|
}
|
||||||
|
) => {
|
||||||
|
paste::paste! {
|
||||||
|
$(#[$enum_meta])*
|
||||||
|
#[derive(Debug)]
|
||||||
|
$vis enum $ident {
|
||||||
|
$(
|
||||||
|
$(#[$variant_meta])*
|
||||||
|
$variant_ident(Box<[<$variant_ident Event>]>)
|
||||||
|
),*
|
||||||
|
}
|
||||||
|
|
||||||
|
impl $ident {
|
||||||
|
pub fn variant_name(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
$(
|
||||||
|
Self::$variant_ident { .. } => stringify!($variant_ident)
|
||||||
|
),*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
#[derive(Debug)]
|
||||||
|
$(#[$variant_meta])*
|
||||||
|
$vis struct [<$variant_ident Event>] {
|
||||||
|
$(
|
||||||
|
$(#[$field_meta])*
|
||||||
|
$vis $field_ident: $field_ty
|
||||||
|
),*
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
|
||||||
|
$(
|
||||||
|
impl From<[<$variant_ident Event>]> for $ident {
|
||||||
|
fn from(value: [<$variant_ident Event>]) -> Self {
|
||||||
|
Self::$variant_ident(Box::new(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
|
||||||
|
/// Provides a way to report events to the aggregator.
|
||||||
|
///
|
||||||
|
/// Under the hood, this is a wrapper around an [`UnboundedSender`] which abstracts away
|
||||||
|
/// the fact that channels are used and that implements high-level methods for reporting
|
||||||
|
/// various events to the aggregator.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct [< $ident Reporter >]($vis tokio::sync::mpsc::UnboundedSender<$ident>);
|
||||||
|
|
||||||
|
impl From<tokio::sync::mpsc::UnboundedSender<$ident>> for [< $ident Reporter >] {
|
||||||
|
fn from(value: tokio::sync::mpsc::UnboundedSender<$ident>) -> Self {
|
||||||
|
Self(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl [< $ident Reporter >] {
|
||||||
|
pub fn test_specific_reporter(
|
||||||
|
&self,
|
||||||
|
test_specifier: impl Into<std::sync::Arc<crate::common::TestSpecifier>>
|
||||||
|
) -> [< $ident TestSpecificReporter >] {
|
||||||
|
[< $ident TestSpecificReporter >] {
|
||||||
|
reporter: self.clone(),
|
||||||
|
test_specifier: test_specifier.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||||
|
self.0.send(event.into()).map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
pub fn [< report_ $variant_ident:snake _event >](&self, $($field_ident: impl Into<$field_ty>),*) -> anyhow::Result<()> {
|
||||||
|
self.report([< $variant_ident Event >] {
|
||||||
|
$($field_ident: $field_ident.into()),*
|
||||||
|
})
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A reporter that's tied to a specific test case.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct [< $ident TestSpecificReporter >] {
|
||||||
|
$vis reporter: [< $ident Reporter >],
|
||||||
|
$vis test_specifier: std::sync::Arc<crate::common::TestSpecifier>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl [< $ident TestSpecificReporter >] {
|
||||||
|
pub fn execution_specific_reporter(
|
||||||
|
&self,
|
||||||
|
node_id: impl Into<usize>,
|
||||||
|
platform_identifier: impl Into<PlatformIdentifier>
|
||||||
|
) -> [< $ident ExecutionSpecificReporter >] {
|
||||||
|
[< $ident ExecutionSpecificReporter >] {
|
||||||
|
reporter: self.reporter.clone(),
|
||||||
|
execution_specifier: Arc::new($crate::common::ExecutionSpecifier {
|
||||||
|
test_specifier: self.test_specifier.clone(),
|
||||||
|
node_id: node_id.into(),
|
||||||
|
platform_identifier: platform_identifier.into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||||
|
self.reporter.report(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
__report_gen_for_variant! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A reporter that's tied to a specific execution of the test case such as execution on
|
||||||
|
/// a specific node from a specific platform.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct [< $ident ExecutionSpecificReporter >] {
|
||||||
|
$vis reporter: [< $ident Reporter >],
|
||||||
|
$vis execution_specifier: std::sync::Arc<$crate::common::ExecutionSpecifier>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl [< $ident ExecutionSpecificReporter >] {
|
||||||
|
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||||
|
self.reporter.report(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
__report_gen_for_variant_exec! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A reporter that's tied to a specific step execution
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct [< $ident StepExecutionSpecificReporter >] {
|
||||||
|
$vis reporter: [< $ident Reporter >],
|
||||||
|
$vis step_specifier: std::sync::Arc<$crate::common::StepExecutionSpecifier>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl [< $ident StepExecutionSpecificReporter >] {
|
||||||
|
fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> {
|
||||||
|
self.reporter.report(event)
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
__report_gen_for_variant_step! { $ident, $variant_ident; $( $field_ident : $field_ty ),* }
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
define_event! {
|
||||||
|
/// An event type that's sent by the test runners/drivers to the report aggregator.
|
||||||
|
pub(crate) enum RunnerEvent {
|
||||||
|
/// An event emitted by the reporter when it wishes to listen to events emitted by the
|
||||||
|
/// aggregator.
|
||||||
|
SubscribeToEvents {
|
||||||
|
/// The channel that the aggregator is to send the receive side of the channel on.
|
||||||
|
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
||||||
|
},
|
||||||
|
/// An event emitted by runners when they've discovered a metadata file.
|
||||||
|
MetadataFileDiscovery {
|
||||||
|
/// The path of the metadata file discovered.
|
||||||
|
path: MetadataFilePath,
|
||||||
|
/// The content of the metadata file.
|
||||||
|
metadata: Metadata
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when they discover a test case.
|
||||||
|
TestCaseDiscovery {
|
||||||
|
/// A specifier for the test that was discovered.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when a test case is ignored.
|
||||||
|
TestIgnored {
|
||||||
|
/// A specifier for the test that's been ignored.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
/// A reason for the test to be ignored.
|
||||||
|
reason: String,
|
||||||
|
/// Additional fields that describe more information on why the test was ignored.
|
||||||
|
additional_fields: IndexMap<String, serde_json::Value>
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when a test case has succeeded.
|
||||||
|
TestSucceeded {
|
||||||
|
/// A specifier for the test that succeeded.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
/// The number of steps of the case that were executed by the driver.
|
||||||
|
steps_executed: usize,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when a test case has failed.
|
||||||
|
TestFailed {
|
||||||
|
/// A specifier for the test that succeeded.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
/// A reason for the failure of the test.
|
||||||
|
reason: String,
|
||||||
|
},
|
||||||
|
/// An event emitted when the test case is assigned a platform node.
|
||||||
|
NodeAssigned {
|
||||||
|
/// A specifier for the test that the assignment is for.
|
||||||
|
test_specifier: Arc<TestSpecifier>,
|
||||||
|
/// The ID of the node that this case is being executed on.
|
||||||
|
id: usize,
|
||||||
|
/// The identifier of the platform used.
|
||||||
|
platform_identifier: PlatformIdentifier,
|
||||||
|
/// The connection string of the node.
|
||||||
|
connection_string: String,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||||
|
/// on the pre-link contracts.
|
||||||
|
PreLinkContractsCompilationSucceeded {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Version,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: PathBuf,
|
||||||
|
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||||
|
/// anew.
|
||||||
|
is_cached: bool,
|
||||||
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
|
/// contracts were obtained from the cache.
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The output of the compiler.
|
||||||
|
compiler_output: CompilerOutput
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||||
|
/// on the post-link contracts.
|
||||||
|
PostLinkContractsCompilationSucceeded {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Version,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: PathBuf,
|
||||||
|
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||||
|
/// anew.
|
||||||
|
is_cached: bool,
|
||||||
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
|
/// contracts were obtained from the cache.
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The output of the compiler.
|
||||||
|
compiler_output: CompilerOutput
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when the compilation of the pre-link contract has
|
||||||
|
/// failed.
|
||||||
|
PreLinkContractsCompilationFailed {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Option<Version>,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: Option<PathBuf>,
|
||||||
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
|
/// contracts were obtained from the cache.
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The failure reason.
|
||||||
|
reason: String,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when the compilation of the post-link contract has
|
||||||
|
/// failed.
|
||||||
|
PostLinkContractsCompilationFailed {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The version of the compiler used to compile the contracts.
|
||||||
|
compiler_version: Option<Version>,
|
||||||
|
/// The path of the compiler used to compile the contracts.
|
||||||
|
compiler_path: Option<PathBuf>,
|
||||||
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
|
/// contracts were obtained from the cache.
|
||||||
|
compiler_input: Option<CompilerInput>,
|
||||||
|
/// The failure reason.
|
||||||
|
reason: String,
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when a library has been deployed.
|
||||||
|
LibrariesDeployed {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The addresses of the libraries that were deployed.
|
||||||
|
libraries: BTreeMap<ContractInstance, Address>
|
||||||
|
},
|
||||||
|
/// An event emitted by the runners when they've deployed a new contract.
|
||||||
|
ContractDeployed {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The instance name of the contract.
|
||||||
|
contract_instance: ContractInstance,
|
||||||
|
/// The address of the contract.
|
||||||
|
address: Address
|
||||||
|
},
|
||||||
|
/// Reports the completion of the run.
|
||||||
|
Completion {},
|
||||||
|
|
||||||
|
/* Benchmarks Events */
|
||||||
|
/// An event emitted with information on a transaction that was submitted for a certain step
|
||||||
|
/// of the execution.
|
||||||
|
StepTransactionInformation {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The path of the step that this transaction belongs to.
|
||||||
|
step_path: StepPath,
|
||||||
|
/// Information about the transaction
|
||||||
|
transaction_information: TransactionInformation
|
||||||
|
},
|
||||||
|
ContractInformation {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// The path of the solidity source code that contains the contract.
|
||||||
|
source_code_path: PathBuf,
|
||||||
|
/// The name of the contract
|
||||||
|
contract_name: String,
|
||||||
|
/// The size of the contract
|
||||||
|
contract_size: usize
|
||||||
|
},
|
||||||
|
BlockMined {
|
||||||
|
/// A specifier for the execution that's taking place.
|
||||||
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
|
/// Information on the mined block,
|
||||||
|
mined_block_information: MinedBlockInformation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An extension to the [`Reporter`] implemented by the macro.
|
||||||
|
impl RunnerEventReporter {
|
||||||
|
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
|
||||||
|
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
|
||||||
|
self.report_subscribe_to_events_event(tx)
|
||||||
|
.context("Failed to send subscribe request to reporter task")?;
|
||||||
|
rx.await.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Reporter = RunnerEventReporter;
|
||||||
|
pub type TestSpecificReporter = RunnerEventTestSpecificReporter;
|
||||||
|
pub type ExecutionSpecificReporter = RunnerEventExecutionSpecificReporter;
|
||||||
@@ -9,10 +9,16 @@ repository.workspace = true
|
|||||||
rust-version.workspace = true
|
rust-version.workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
revive-dt-common = { workspace = true }
|
||||||
|
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
hex = { workspace = true }
|
hex = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
reqwest = { workspace = true }
|
reqwest = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
sha2 = { workspace = true }
|
sha2 = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|||||||
@@ -6,54 +6,79 @@ use std::{
|
|||||||
io::{BufWriter, Write},
|
io::{BufWriter, Write},
|
||||||
os::unix::fs::PermissionsExt,
|
os::unix::fs::PermissionsExt,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
sync::{LazyLock, Mutex},
|
sync::LazyLock,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::download::GHDownloader;
|
use semver::Version;
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
|
use crate::download::SolcDownloader;
|
||||||
|
use anyhow::Context as _;
|
||||||
|
|
||||||
pub const SOLC_CACHE_DIRECTORY: &str = "solc";
|
pub const SOLC_CACHE_DIRECTORY: &str = "solc";
|
||||||
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
|
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
|
||||||
|
|
||||||
pub(crate) fn get_or_download(
|
pub(crate) async fn get_or_download(
|
||||||
working_directory: &Path,
|
working_directory: &Path,
|
||||||
downloader: &GHDownloader,
|
downloader: &SolcDownloader,
|
||||||
) -> anyhow::Result<PathBuf> {
|
) -> anyhow::Result<(Version, PathBuf)> {
|
||||||
let target_directory = working_directory
|
let target_directory = working_directory
|
||||||
.join(SOLC_CACHE_DIRECTORY)
|
.join(SOLC_CACHE_DIRECTORY)
|
||||||
.join(downloader.version.to_string());
|
.join(downloader.version.to_string());
|
||||||
let target_file = target_directory.join(downloader.target);
|
let target_file = target_directory.join(downloader.target);
|
||||||
|
|
||||||
let mut cache = SOLC_CACHER.lock().unwrap();
|
let mut cache = SOLC_CACHER.lock().await;
|
||||||
if cache.contains(&target_file) {
|
if cache.contains(&target_file) {
|
||||||
tracing::debug!("using cached solc: {}", target_file.display());
|
tracing::debug!("using cached solc: {}", target_file.display());
|
||||||
return Ok(target_file);
|
return Ok((downloader.version.clone(), target_file));
|
||||||
}
|
}
|
||||||
|
|
||||||
create_dir_all(target_directory)?;
|
create_dir_all(&target_directory).with_context(|| {
|
||||||
download_to_file(&target_file, downloader)?;
|
format!(
|
||||||
|
"Failed to create solc cache directory: {}",
|
||||||
|
target_directory.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
download_to_file(&target_file, downloader)
|
||||||
|
.await
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to write downloaded solc to {}",
|
||||||
|
target_file.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
cache.insert(target_file.clone());
|
cache.insert(target_file.clone());
|
||||||
|
|
||||||
Ok(target_file)
|
Ok((downloader.version.clone(), target_file))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<()> {
|
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
|
||||||
tracing::info!("caching file: {}", path.display());
|
|
||||||
|
|
||||||
let Ok(file) = File::create_new(path) else {
|
let Ok(file) = File::create_new(path) else {
|
||||||
tracing::debug!("cache file already exists: {}", path.display());
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
{
|
{
|
||||||
let mut permissions = file.metadata()?.permissions();
|
let mut permissions = file
|
||||||
|
.metadata()
|
||||||
|
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
|
||||||
|
.permissions();
|
||||||
permissions.set_mode(permissions.mode() | 0o111);
|
permissions.set_mode(permissions.mode() | 0o111);
|
||||||
file.set_permissions(permissions)?;
|
file.set_permissions(permissions).with_context(|| {
|
||||||
|
format!("Failed to set executable permissions on {}", path.display())
|
||||||
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut file = BufWriter::new(file);
|
let mut file = BufWriter::new(file);
|
||||||
file.write_all(&downloader.download()?)?;
|
file.write_all(
|
||||||
file.flush()?;
|
&downloader
|
||||||
|
.download()
|
||||||
|
.await
|
||||||
|
.context("Failed to download solc binary bytes")?,
|
||||||
|
)
|
||||||
|
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
|
||||||
|
file.flush()
|
||||||
|
.with_context(|| format!("Failed to flush file {}", path.display()))?;
|
||||||
drop(file);
|
drop(file);
|
||||||
|
|
||||||
#[cfg(target_os = "macos")]
|
#[cfg(target_os = "macos")]
|
||||||
@@ -64,8 +89,20 @@ fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<()
|
|||||||
.stderr(std::process::Stdio::null())
|
.stderr(std::process::Stdio::null())
|
||||||
.stdout(std::process::Stdio::null())
|
.stdout(std::process::Stdio::null())
|
||||||
.stdout(std::process::Stdio::null())
|
.stdout(std::process::Stdio::null())
|
||||||
.spawn()?
|
.spawn()
|
||||||
.wait()?;
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to spawn xattr to remove quarantine attribute on {}",
|
||||||
|
path.display()
|
||||||
|
)
|
||||||
|
})?
|
||||||
|
.wait()
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed waiting for xattr operation to complete on {}",
|
||||||
|
path.display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user