mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-23 00:17:57 +00:00
Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 1659164310 | |||
| 0a68800856 | |||
| 8303d789cd | |||
| 40bf44fe58 | |||
| ba8ad03290 | |||
| 3dd99f3ac8 | |||
| 6618463c68 | |||
| dffb80ac0a | |||
| 43a1114337 | |||
| 3a07ea042b | |||
| 9e2aa972db | |||
| 86f2173e8b | |||
| 6e658aec49 | |||
| 1aba74ec3e | |||
| 180bd64bc5 | |||
| 967cbac349 | |||
| a8d84c8360 | |||
| c83a755416 | |||
| 0711216539 | |||
| b40c17c0af | |||
| 8ae994f9de | |||
| 3f3cbfa934 | |||
| c676114fe1 | |||
| 92885351ed | |||
| e16f8ebf59 | |||
| d482808eb2 | |||
| 1f84ce6f61 | |||
| 765569a8b6 | |||
| 6e64f678ee |
+168
-143
@@ -18,95 +18,134 @@ env:
|
|||||||
POLKADOT_VERSION: polkadot-stable2506-2
|
POLKADOT_VERSION: polkadot-stable2506-2
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
machete:
|
cache-polkadot:
|
||||||
name: Check for Unneeded Dependencies
|
name: Build and cache Polkadot binaries on ${{ matrix.os }}
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
steps:
|
|
||||||
- name: Checkout This Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Install the Cargo Make Binary
|
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Run Cargo Machete
|
|
||||||
run: cargo make machete
|
|
||||||
check-fmt:
|
|
||||||
name: Check Formatting
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
steps:
|
|
||||||
- name: Checkout This Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Install the Cargo Make Binary
|
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Run Cargo Formatter
|
|
||||||
run: cargo make fmt-check
|
|
||||||
check-clippy:
|
|
||||||
name: Check Clippy Lints
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
steps:
|
|
||||||
- name: Checkout This Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Install the Cargo Make Binary
|
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Run Cargo Clippy
|
|
||||||
run: cargo make clippy
|
|
||||||
test:
|
|
||||||
name: Unit Tests
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
needs: cache-polkadot
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-24.04, macos-14]
|
os: [ubuntu-24.04, macos-14]
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout This Repository
|
- name: Checkout repo and submodules
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
- name: Install dependencies (Linux)
|
||||||
- name: Install the Rust Toolchain
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
run: |
|
||||||
with:
|
sudo apt-get update
|
||||||
target: "wasm32-unknown-unknown"
|
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
||||||
components: "rust-src,rust-std"
|
rustup target add wasm32-unknown-unknown
|
||||||
- name: Install the Cargo Make Binary
|
rustup component add rust-src
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Caching Step
|
- name: Install dependencies (macOS)
|
||||||
uses: actions/cache@v4
|
if: matrix.os == 'macos-14'
|
||||||
|
run: |
|
||||||
|
brew install protobuf
|
||||||
|
rustup target add wasm32-unknown-unknown
|
||||||
|
rustup component add rust-src
|
||||||
|
|
||||||
|
- name: Cache binaries
|
||||||
|
id: cache
|
||||||
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
|
~/.cargo/bin/substrate-node
|
||||||
~/.cargo/bin/eth-rpc
|
~/.cargo/bin/eth-rpc
|
||||||
~/.cargo/bin/revive-dev-node
|
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
||||||
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
|
||||||
|
- name: Build substrate-node
|
||||||
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
cd polkadot-sdk
|
||||||
|
cargo install --locked --force --profile=production --path substrate/bin/node/cli --bin substrate-node --features cli
|
||||||
|
|
||||||
|
- name: Build eth-rpc
|
||||||
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
cd polkadot-sdk
|
||||||
|
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
|
||||||
|
|
||||||
|
- name: Cache downloaded Polkadot binaries
|
||||||
|
id: cache-polkadot
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/polkadot-cache/polkadot
|
||||||
|
~/polkadot-cache/polkadot-execute-worker
|
||||||
|
~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
~/polkadot-cache/polkadot-parachain
|
||||||
|
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||||
|
|
||||||
|
- name: Download Polkadot binaries on macOS
|
||||||
|
if: matrix.os == 'macos-14' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/polkadot-cache
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-aarch64-apple-darwin -o ~/polkadot-cache/polkadot
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-execute-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-parachain
|
||||||
|
chmod +x ~/polkadot-cache/*
|
||||||
|
|
||||||
|
- name: Download Polkadot binaries on Ubuntu
|
||||||
|
if: matrix.os == 'ubuntu-24.04' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/polkadot-cache
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot -o ~/polkadot-cache/polkadot
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker -o ~/polkadot-cache/polkadot-execute-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker -o ~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain -o ~/polkadot-cache/polkadot-parachain
|
||||||
|
chmod +x ~/polkadot-cache/*
|
||||||
|
|
||||||
|
ci:
|
||||||
|
name: CI on ${{ matrix.os }}
|
||||||
|
needs: cache-polkadot
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-24.04, macos-14]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Restore binaries from cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/bin/substrate-node
|
||||||
|
~/.cargo/bin/eth-rpc
|
||||||
|
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
||||||
|
|
||||||
|
- name: Restore downloaded Polkadot binaries from cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/polkadot-cache/polkadot
|
||||||
|
~/polkadot-cache/polkadot-execute-worker
|
||||||
|
~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
~/polkadot-cache/polkadot-parachain
|
||||||
|
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||||
|
|
||||||
|
- name: Install Polkadot binaries
|
||||||
|
run: |
|
||||||
|
sudo cp ~/polkadot-cache/polkadot /usr/local/bin/
|
||||||
|
sudo cp ~/polkadot-cache/polkadot-execute-worker /usr/local/bin/
|
||||||
|
sudo cp ~/polkadot-cache/polkadot-prepare-worker /usr/local/bin/
|
||||||
|
sudo cp ~/polkadot-cache/polkadot-parachain /usr/local/bin/
|
||||||
|
sudo chmod +x /usr/local/bin/polkadot*
|
||||||
|
|
||||||
|
- name: Setup Rust toolchain
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
rustflags: ""
|
||||||
|
|
||||||
|
- name: Add wasm32 target and formatting
|
||||||
|
run: |
|
||||||
|
rustup target add wasm32-unknown-unknown
|
||||||
|
rustup component add rust-src rustfmt clippy
|
||||||
|
|
||||||
- name: Install Geth on Ubuntu
|
- name: Install Geth on Ubuntu
|
||||||
if: matrix.os == 'ubuntu-24.04'
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
run: |
|
run: |
|
||||||
@@ -139,6 +178,7 @@ jobs:
|
|||||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-x86_64-unknown-linux-musl -o resolc
|
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-x86_64-unknown-linux-musl -o resolc
|
||||||
chmod +x resolc
|
chmod +x resolc
|
||||||
sudo mv resolc /usr/local/bin
|
sudo mv resolc /usr/local/bin
|
||||||
|
|
||||||
- name: Install Geth on macOS
|
- name: Install Geth on macOS
|
||||||
if: matrix.os == 'macos-14'
|
if: matrix.os == 'macos-14'
|
||||||
run: |
|
run: |
|
||||||
@@ -150,79 +190,64 @@ jobs:
|
|||||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-universal-apple-darwin -o resolc
|
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-universal-apple-darwin -o resolc
|
||||||
chmod +x resolc
|
chmod +x resolc
|
||||||
sudo mv resolc /usr/local/bin
|
sudo mv resolc /usr/local/bin
|
||||||
|
|
||||||
- name: Install Kurtosis on macOS
|
- name: Install Kurtosis on macOS
|
||||||
if: matrix.os == 'macos-14'
|
if: matrix.os == 'macos-14'
|
||||||
run: brew install kurtosis-tech/tap/kurtosis-cli
|
run: brew install kurtosis-tech/tap/kurtosis-cli
|
||||||
|
|
||||||
- name: Install Kurtosis on Ubuntu
|
- name: Install Kurtosis on Ubuntu
|
||||||
if: matrix.os == 'ubuntu-24.04'
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
run: |
|
run: |
|
||||||
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install kurtosis-cli
|
sudo apt install kurtosis-cli
|
||||||
- name: Run Tests
|
|
||||||
run: cargo make test
|
|
||||||
cache-polkadot:
|
|
||||||
name: Build and Cache Polkadot Binaries on ${{ matrix.os }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-24.04, macos-14]
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
RUSTFLAGS: "-Awarnings"
|
|
||||||
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
|
||||||
steps:
|
|
||||||
- name: Caching Step
|
|
||||||
id: cache-step
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/eth-rpc
|
|
||||||
~/.cargo/bin/revive-dev-node
|
|
||||||
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
|
||||||
- name: Checkout the Polkadot SDK Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
with:
|
|
||||||
repository: paritytech/polkadot-sdk
|
|
||||||
ref: ${{ env.POLKADOT_SDK_COMMIT_HASH }}
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
with:
|
|
||||||
target: "wasm32-unknown-unknown"
|
|
||||||
components: "rust-src"
|
|
||||||
toolchain: "1.90.0"
|
|
||||||
|
|
||||||
- name: Install dependencies (Linux)
|
- name: Machete
|
||||||
if: matrix.os == 'ubuntu-24.04' && steps.cache-step.outputs.cache-hit != 'true'
|
uses: bnjbvr/cargo-machete@v0.7.1
|
||||||
|
|
||||||
|
- name: Format
|
||||||
|
run: make format
|
||||||
|
|
||||||
|
- name: Clippy
|
||||||
|
run: make clippy
|
||||||
|
|
||||||
|
- name: Check substrate-node version
|
||||||
|
run: substrate-node --version
|
||||||
|
|
||||||
|
- name: Check eth-rpc version
|
||||||
|
run: eth-rpc --version
|
||||||
|
|
||||||
|
- name: Check resolc version
|
||||||
|
run: resolc --version
|
||||||
|
|
||||||
|
- name: Check polkadot version
|
||||||
|
run: polkadot --version
|
||||||
|
|
||||||
|
- name: Check polkadot-parachain version
|
||||||
|
run: polkadot-parachain --version
|
||||||
|
|
||||||
|
- name: Check polkadot-execute-worker version
|
||||||
|
run: polkadot-execute-worker --version
|
||||||
|
|
||||||
|
- name: Check polkadot-prepare-worker version
|
||||||
|
run: polkadot-prepare-worker --version
|
||||||
|
|
||||||
|
- name: Test Formatting
|
||||||
|
run: make format
|
||||||
|
|
||||||
|
- name: Test Clippy
|
||||||
|
run: make clippy
|
||||||
|
|
||||||
|
- name: Test Machete
|
||||||
|
run: make machete
|
||||||
|
|
||||||
|
- name: Unit Tests
|
||||||
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
|
run: cargo test --workspace -- --nocapture
|
||||||
|
|
||||||
|
# We can't install docker in the MacOS image used in CI and therefore we need to skip the
|
||||||
|
# Kurtosis and lighthouse related tests when running the CI on MacOS.
|
||||||
|
- name: Unit Tests
|
||||||
|
if: matrix.os == 'macos-14'
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
cargo test --workspace -- --nocapture --skip lighthouse_geth::tests::
|
||||||
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
|
||||||
- name: Install dependencies (macOS)
|
|
||||||
if: matrix.os == 'macos-14' && steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
brew install protobuf llvm
|
|
||||||
LLVM_PREFIX="$(brew --prefix llvm)"
|
|
||||||
echo "LDFLAGS=-L${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
|
||||||
echo "CPPFLAGS=-I${LLVM_PREFIX}/include" >> "$GITHUB_ENV"
|
|
||||||
echo "CMAKE_PREFIX_PATH=${LLVM_PREFIX}" >> "$GITHUB_ENV"
|
|
||||||
echo "LIBCLANG_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
|
||||||
echo "DYLD_FALLBACK_LIBRARY_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
|
||||||
echo "${LLVM_PREFIX}/bin" >> "$GITHUB_PATH"
|
|
||||||
- name: Build Polkadot Dependencies
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
cargo build \
|
|
||||||
--locked \
|
|
||||||
--profile production \
|
|
||||||
--package revive-dev-node \
|
|
||||||
--package pallet-revive-eth-rpc;
|
|
||||||
mv ./target/production/revive-dev-node ~/.cargo/bin
|
|
||||||
mv ./target/production/eth-rpc ~/.cargo/bin
|
|
||||||
chmod +x ~/.cargo/bin/*
|
|
||||||
|
|||||||
+1
-3
@@ -3,15 +3,13 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
node_modules
|
node_modules
|
||||||
/*.json
|
/*.json
|
||||||
*.sh
|
|
||||||
|
|
||||||
# We do not want to commit any log files that we produce from running the code locally so this is
|
# We do not want to commit any log files that we produce from running the code locally so this is
|
||||||
# added to the .gitignore file.
|
# added to the .gitignore file.
|
||||||
*.log
|
*.log
|
||||||
|
|
||||||
profile.json.gz
|
profile.json.gz
|
||||||
|
resolc-compiler-tests
|
||||||
workdir
|
workdir
|
||||||
|
|
||||||
!/schema.json
|
!/schema.json
|
||||||
!/dev-genesis.json
|
|
||||||
!/scripts/*
|
|
||||||
|
|||||||
+3
-3
@@ -1,3 +1,3 @@
|
|||||||
[submodule "resolc-compiler-tests"]
|
[submodule "polkadot-sdk"]
|
||||||
path = resolc-compiler-tests
|
path = polkadot-sdk
|
||||||
url = https://github.com/paritytech/resolc-compiler-tests
|
url = https://github.com/paritytech/polkadot-sdk.git
|
||||||
|
|||||||
@@ -0,0 +1,25 @@
|
|||||||
|
# Basic
|
||||||
|
edition = "2024"
|
||||||
|
hard_tabs = true
|
||||||
|
max_width = 100
|
||||||
|
use_small_heuristics = "Max"
|
||||||
|
# Imports
|
||||||
|
imports_granularity = "Crate"
|
||||||
|
reorder_imports = true
|
||||||
|
# Consistency
|
||||||
|
newline_style = "Unix"
|
||||||
|
# Misc
|
||||||
|
chain_width = 80
|
||||||
|
spaces_around_ranges = false
|
||||||
|
binop_separator = "Back"
|
||||||
|
reorder_impl_items = false
|
||||||
|
match_arm_leading_pipes = "Preserve"
|
||||||
|
match_arm_blocks = false
|
||||||
|
match_block_trailing_comma = true
|
||||||
|
trailing_comma = "Vertical"
|
||||||
|
trailing_semicolon = false
|
||||||
|
use_field_init_shorthand = true
|
||||||
|
# Format comments
|
||||||
|
comment_width = 100
|
||||||
|
wrap_comments = true
|
||||||
|
|
||||||
Generated
+41
-232
@@ -782,15 +782,6 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "ansi_term"
|
|
||||||
version = "0.12.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
|
|
||||||
dependencies = [
|
|
||||||
"winapi",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "anstream"
|
name = "anstream"
|
||||||
version = "0.6.18"
|
version = "0.6.18"
|
||||||
@@ -1920,7 +1911,6 @@ dependencies = [
|
|||||||
"anstyle",
|
"anstyle",
|
||||||
"clap_lex",
|
"clap_lex",
|
||||||
"strsim",
|
"strsim",
|
||||||
"terminal_size",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2338,7 +2328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976"
|
checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"data-encoding",
|
"data-encoding",
|
||||||
"syn 2.0.101",
|
"syn 1.0.109",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2937,22 +2927,6 @@ dependencies = [
|
|||||||
"sp-crypto-hashing",
|
"sp-crypto-hashing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "frame-decode"
|
|
||||||
version = "0.9.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "c470df86cf28818dd3cd2fc4667b80dbefe2236c722c3dc1d09e7c6c82d6dfcd"
|
|
||||||
dependencies = [
|
|
||||||
"frame-metadata",
|
|
||||||
"parity-scale-codec",
|
|
||||||
"scale-decode",
|
|
||||||
"scale-encode",
|
|
||||||
"scale-info",
|
|
||||||
"scale-type-resolver",
|
|
||||||
"sp-crypto-hashing",
|
|
||||||
"thiserror 2.0.12",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "frame-metadata"
|
name = "frame-metadata"
|
||||||
version = "23.0.0"
|
version = "23.0.0"
|
||||||
@@ -4552,6 +4526,27 @@ dependencies = [
|
|||||||
"windows-sys 0.59.0",
|
"windows-sys 0.59.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ml-test-runner"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"alloy",
|
||||||
|
"anyhow",
|
||||||
|
"clap",
|
||||||
|
"revive-dt-common",
|
||||||
|
"revive-dt-compiler",
|
||||||
|
"revive-dt-config",
|
||||||
|
"revive-dt-core",
|
||||||
|
"revive-dt-format",
|
||||||
|
"revive-dt-node",
|
||||||
|
"revive-dt-node-interaction",
|
||||||
|
"revive-dt-report",
|
||||||
|
"temp-dir",
|
||||||
|
"tokio",
|
||||||
|
"tracing",
|
||||||
|
"tracing-subscriber",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "moka"
|
name = "moka"
|
||||||
version = "0.12.10"
|
version = "0.12.10"
|
||||||
@@ -5590,7 +5585,6 @@ dependencies = [
|
|||||||
"clap",
|
"clap",
|
||||||
"moka",
|
"moka",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"regex",
|
|
||||||
"schemars 1.0.4",
|
"schemars 1.0.4",
|
||||||
"semver 1.0.26",
|
"semver 1.0.26",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -5629,7 +5623,6 @@ dependencies = [
|
|||||||
"semver 1.0.26",
|
"semver 1.0.26",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_with",
|
|
||||||
"strum",
|
"strum",
|
||||||
"temp-dir",
|
"temp-dir",
|
||||||
]
|
]
|
||||||
@@ -5639,7 +5632,6 @@ name = "revive-dt-core"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"alloy",
|
"alloy",
|
||||||
"ansi_term",
|
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bson",
|
"bson",
|
||||||
"cacache",
|
"cacache",
|
||||||
@@ -5657,7 +5649,6 @@ dependencies = [
|
|||||||
"semver 1.0.26",
|
"semver 1.0.26",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"subxt 0.44.0",
|
|
||||||
"tokio",
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-appender",
|
"tracing-appender",
|
||||||
@@ -5671,7 +5662,7 @@ dependencies = [
|
|||||||
"alloy",
|
"alloy",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"futures",
|
"futures",
|
||||||
"itertools 0.14.0",
|
"regex",
|
||||||
"revive-common",
|
"revive-common",
|
||||||
"revive-dt-common",
|
"revive-dt-common",
|
||||||
"schemars 1.0.4",
|
"schemars 1.0.4",
|
||||||
@@ -5694,14 +5685,12 @@ dependencies = [
|
|||||||
"revive-dt-config",
|
"revive-dt-config",
|
||||||
"revive-dt-format",
|
"revive-dt-format",
|
||||||
"revive-dt-node-interaction",
|
"revive-dt-node-interaction",
|
||||||
"revive-dt-report",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_with",
|
"serde_with",
|
||||||
"serde_yaml_ng",
|
"serde_yaml_ng",
|
||||||
"sp-core",
|
"sp-core",
|
||||||
"sp-runtime",
|
"sp-runtime",
|
||||||
"subxt 0.44.0",
|
|
||||||
"temp-dir",
|
"temp-dir",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tower 0.5.2",
|
"tower 0.5.2",
|
||||||
@@ -5718,7 +5707,6 @@ dependencies = [
|
|||||||
"futures",
|
"futures",
|
||||||
"revive-common",
|
"revive-common",
|
||||||
"revive-dt-format",
|
"revive-dt-format",
|
||||||
"revive-dt-report",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -5728,7 +5716,6 @@ dependencies = [
|
|||||||
"alloy",
|
"alloy",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"indexmap 2.10.0",
|
"indexmap 2.10.0",
|
||||||
"itertools 0.14.0",
|
|
||||||
"paste",
|
"paste",
|
||||||
"revive-dt-common",
|
"revive-dt-common",
|
||||||
"revive-dt-compiler",
|
"revive-dt-compiler",
|
||||||
@@ -5976,7 +5963,7 @@ dependencies = [
|
|||||||
"security-framework 3.3.0",
|
"security-framework 3.3.0",
|
||||||
"security-framework-sys",
|
"security-framework-sys",
|
||||||
"webpki-root-certs 0.26.11",
|
"webpki-root-certs 0.26.11",
|
||||||
"windows-sys 0.59.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -7373,48 +7360,11 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"sp-crypto-hashing",
|
"sp-crypto-hashing",
|
||||||
"subxt-core 0.43.0",
|
"subxt-core",
|
||||||
"subxt-lightclient 0.43.0",
|
"subxt-lightclient",
|
||||||
"subxt-macro 0.43.0",
|
"subxt-macro",
|
||||||
"subxt-metadata 0.43.0",
|
"subxt-metadata",
|
||||||
"subxt-rpcs 0.43.0",
|
"subxt-rpcs",
|
||||||
"thiserror 2.0.12",
|
|
||||||
"tokio",
|
|
||||||
"tokio-util",
|
|
||||||
"tracing",
|
|
||||||
"url",
|
|
||||||
"wasm-bindgen-futures",
|
|
||||||
"web-time",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "subxt"
|
|
||||||
version = "0.44.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ddbf938ac1d86a361a84709a71cdbae5d87f370770b563651d1ec052eed9d0b4"
|
|
||||||
dependencies = [
|
|
||||||
"async-trait",
|
|
||||||
"derive-where",
|
|
||||||
"either",
|
|
||||||
"frame-metadata",
|
|
||||||
"futures",
|
|
||||||
"hex",
|
|
||||||
"jsonrpsee",
|
|
||||||
"parity-scale-codec",
|
|
||||||
"primitive-types 0.13.1",
|
|
||||||
"scale-bits",
|
|
||||||
"scale-decode",
|
|
||||||
"scale-encode",
|
|
||||||
"scale-info",
|
|
||||||
"scale-value",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
"sp-crypto-hashing",
|
|
||||||
"subxt-core 0.44.0",
|
|
||||||
"subxt-lightclient 0.44.0",
|
|
||||||
"subxt-macro 0.44.0",
|
|
||||||
"subxt-metadata 0.44.0",
|
|
||||||
"subxt-rpcs 0.44.0",
|
|
||||||
"thiserror 2.0.12",
|
"thiserror 2.0.12",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
@@ -7436,24 +7386,7 @@ dependencies = [
|
|||||||
"quote",
|
"quote",
|
||||||
"scale-info",
|
"scale-info",
|
||||||
"scale-typegen",
|
"scale-typegen",
|
||||||
"subxt-metadata 0.43.0",
|
"subxt-metadata",
|
||||||
"syn 2.0.101",
|
|
||||||
"thiserror 2.0.12",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "subxt-codegen"
|
|
||||||
version = "0.44.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "9c250ad8cd102d40ae47977b03295a2ff791375f30ddc7474d399fb56efb793b"
|
|
||||||
dependencies = [
|
|
||||||
"heck",
|
|
||||||
"parity-scale-codec",
|
|
||||||
"proc-macro2",
|
|
||||||
"quote",
|
|
||||||
"scale-info",
|
|
||||||
"scale-typegen",
|
|
||||||
"subxt-metadata 0.44.0",
|
|
||||||
"syn 2.0.101",
|
"syn 2.0.101",
|
||||||
"thiserror 2.0.12",
|
"thiserror 2.0.12",
|
||||||
]
|
]
|
||||||
@@ -7467,7 +7400,7 @@ dependencies = [
|
|||||||
"base58",
|
"base58",
|
||||||
"blake2",
|
"blake2",
|
||||||
"derive-where",
|
"derive-where",
|
||||||
"frame-decode 0.8.3",
|
"frame-decode",
|
||||||
"frame-metadata",
|
"frame-metadata",
|
||||||
"hashbrown 0.14.5",
|
"hashbrown 0.14.5",
|
||||||
"hex",
|
"hex",
|
||||||
@@ -7483,37 +7416,7 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"sp-crypto-hashing",
|
"sp-crypto-hashing",
|
||||||
"subxt-metadata 0.43.0",
|
"subxt-metadata",
|
||||||
"thiserror 2.0.12",
|
|
||||||
"tracing",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "subxt-core"
|
|
||||||
version = "0.44.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5705c5b420294524e41349bf23c6b11aa474ce731de7317f4153390e1927f702"
|
|
||||||
dependencies = [
|
|
||||||
"base58",
|
|
||||||
"blake2",
|
|
||||||
"derive-where",
|
|
||||||
"frame-decode 0.9.0",
|
|
||||||
"frame-metadata",
|
|
||||||
"hashbrown 0.14.5",
|
|
||||||
"hex",
|
|
||||||
"impl-serde",
|
|
||||||
"keccak-hash",
|
|
||||||
"parity-scale-codec",
|
|
||||||
"primitive-types 0.13.1",
|
|
||||||
"scale-bits",
|
|
||||||
"scale-decode",
|
|
||||||
"scale-encode",
|
|
||||||
"scale-info",
|
|
||||||
"scale-value",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
"sp-crypto-hashing",
|
|
||||||
"subxt-metadata 0.44.0",
|
|
||||||
"thiserror 2.0.12",
|
"thiserror 2.0.12",
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
@@ -7535,23 +7438,6 @@ dependencies = [
|
|||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "subxt-lightclient"
|
|
||||||
version = "0.44.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "64e02732a6c9ae46bc282c1a741b3d3e494021b3e87e7e92cfb3620116d92911"
|
|
||||||
dependencies = [
|
|
||||||
"futures",
|
|
||||||
"futures-util",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
"smoldot-light",
|
|
||||||
"thiserror 2.0.12",
|
|
||||||
"tokio",
|
|
||||||
"tokio-stream",
|
|
||||||
"tracing",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "subxt-macro"
|
name = "subxt-macro"
|
||||||
version = "0.43.0"
|
version = "0.43.0"
|
||||||
@@ -7563,26 +7449,9 @@ dependencies = [
|
|||||||
"proc-macro-error2",
|
"proc-macro-error2",
|
||||||
"quote",
|
"quote",
|
||||||
"scale-typegen",
|
"scale-typegen",
|
||||||
"subxt-codegen 0.43.0",
|
"subxt-codegen",
|
||||||
"subxt-metadata 0.43.0",
|
"subxt-metadata",
|
||||||
"subxt-utils-fetchmetadata 0.43.0",
|
"subxt-utils-fetchmetadata",
|
||||||
"syn 2.0.101",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "subxt-macro"
|
|
||||||
version = "0.44.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "501bf358698f5ab02a6199a1fcd3f1b482e2f5b6eb5d185411e6a74a175ec8e8"
|
|
||||||
dependencies = [
|
|
||||||
"darling 0.20.11",
|
|
||||||
"parity-scale-codec",
|
|
||||||
"proc-macro-error2",
|
|
||||||
"quote",
|
|
||||||
"scale-typegen",
|
|
||||||
"subxt-codegen 0.44.0",
|
|
||||||
"subxt-metadata 0.44.0",
|
|
||||||
"subxt-utils-fetchmetadata 0.44.0",
|
|
||||||
"syn 2.0.101",
|
"syn 2.0.101",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -7592,22 +7461,7 @@ version = "0.43.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2c134068711c0c46906abc0e6e4911204420331530738e18ca903a5469364d9f"
|
checksum = "2c134068711c0c46906abc0e6e4911204420331530738e18ca903a5469364d9f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"frame-decode 0.8.3",
|
"frame-decode",
|
||||||
"frame-metadata",
|
|
||||||
"hashbrown 0.14.5",
|
|
||||||
"parity-scale-codec",
|
|
||||||
"scale-info",
|
|
||||||
"sp-crypto-hashing",
|
|
||||||
"thiserror 2.0.12",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "subxt-metadata"
|
|
||||||
version = "0.44.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "01fb7c0bfafad78dda7084c6a2444444744af3bbf7b2502399198b9b4c20eddf"
|
|
||||||
dependencies = [
|
|
||||||
"frame-decode 0.9.0",
|
|
||||||
"frame-metadata",
|
"frame-metadata",
|
||||||
"hashbrown 0.14.5",
|
"hashbrown 0.14.5",
|
||||||
"parity-scale-codec",
|
"parity-scale-codec",
|
||||||
@@ -7632,32 +7486,8 @@ dependencies = [
|
|||||||
"primitive-types 0.13.1",
|
"primitive-types 0.13.1",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"subxt-core 0.43.0",
|
"subxt-core",
|
||||||
"subxt-lightclient 0.43.0",
|
"subxt-lightclient",
|
||||||
"thiserror 2.0.12",
|
|
||||||
"tokio-util",
|
|
||||||
"tracing",
|
|
||||||
"url",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "subxt-rpcs"
|
|
||||||
version = "0.44.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ab68a9c20ecedb0cb7d62d64f884e6add91bb70485783bf40aa8eac5c389c6e0"
|
|
||||||
dependencies = [
|
|
||||||
"derive-where",
|
|
||||||
"frame-metadata",
|
|
||||||
"futures",
|
|
||||||
"hex",
|
|
||||||
"impl-serde",
|
|
||||||
"jsonrpsee",
|
|
||||||
"parity-scale-codec",
|
|
||||||
"primitive-types 0.13.1",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
"subxt-core 0.44.0",
|
|
||||||
"subxt-lightclient 0.44.0",
|
|
||||||
"thiserror 2.0.12",
|
"thiserror 2.0.12",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"tracing",
|
"tracing",
|
||||||
@@ -7687,7 +7517,7 @@ dependencies = [
|
|||||||
"serde_json",
|
"serde_json",
|
||||||
"sha2 0.10.9",
|
"sha2 0.10.9",
|
||||||
"sp-crypto-hashing",
|
"sp-crypto-hashing",
|
||||||
"subxt-core 0.43.0",
|
"subxt-core",
|
||||||
"thiserror 2.0.12",
|
"thiserror 2.0.12",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
@@ -7703,17 +7533,6 @@ dependencies = [
|
|||||||
"thiserror 2.0.12",
|
"thiserror 2.0.12",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "subxt-utils-fetchmetadata"
|
|
||||||
version = "0.44.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "e450f6812a653c5a3e63a079aa3b60a3f4c362722753c3222286eaa1800f9002"
|
|
||||||
dependencies = [
|
|
||||||
"hex",
|
|
||||||
"parity-scale-codec",
|
|
||||||
"thiserror 2.0.12",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "syn"
|
name = "syn"
|
||||||
version = "1.0.109"
|
version = "1.0.109"
|
||||||
@@ -7840,16 +7659,6 @@ dependencies = [
|
|||||||
"winapi-util",
|
"winapi-util",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "terminal_size"
|
|
||||||
version = "0.4.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed"
|
|
||||||
dependencies = [
|
|
||||||
"rustix",
|
|
||||||
"windows-sys 0.59.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror"
|
name = "thiserror"
|
||||||
version = "1.0.69"
|
version = "1.0.69"
|
||||||
@@ -9517,7 +9326,7 @@ dependencies = [
|
|||||||
"serde_json",
|
"serde_json",
|
||||||
"sha2 0.10.9",
|
"sha2 0.10.9",
|
||||||
"sp-core",
|
"sp-core",
|
||||||
"subxt 0.43.0",
|
"subxt",
|
||||||
"subxt-signer",
|
"subxt-signer",
|
||||||
"thiserror 1.0.69",
|
"thiserror 1.0.69",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -9577,7 +9386,7 @@ dependencies = [
|
|||||||
"async-trait",
|
"async-trait",
|
||||||
"futures",
|
"futures",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"subxt 0.43.0",
|
"subxt",
|
||||||
"subxt-signer",
|
"subxt-signer",
|
||||||
"tokio",
|
"tokio",
|
||||||
"zombienet-configuration",
|
"zombienet-configuration",
|
||||||
|
|||||||
+2
-5
@@ -22,11 +22,10 @@ revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
|
|||||||
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
revive-dt-report = { version = "0.1.0", path = "crates/report" }
|
||||||
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
|
||||||
|
|
||||||
ansi_term = "0.12.1"
|
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bson = { version = "2.15.0" }
|
bson = { version = "2.15.0" }
|
||||||
cacache = { version = "13.1.0" }
|
cacache = { version = "13.1.0" }
|
||||||
clap = { version = "4", features = ["derive", "wrap_help"] }
|
clap = { version = "4", features = ["derive"] }
|
||||||
dashmap = { version = "6.1.0" }
|
dashmap = { version = "6.1.0" }
|
||||||
foundry-compilers-artifacts = { version = "0.18.0" }
|
foundry-compilers-artifacts = { version = "0.18.0" }
|
||||||
futures = { version = "0.3.31" }
|
futures = { version = "0.3.31" }
|
||||||
@@ -50,7 +49,6 @@ sha2 = { version = "0.10.9" }
|
|||||||
sp-core = "36.1.0"
|
sp-core = "36.1.0"
|
||||||
sp-runtime = "41.1.0"
|
sp-runtime = "41.1.0"
|
||||||
strum = { version = "0.27.2", features = ["derive"] }
|
strum = { version = "0.27.2", features = ["derive"] }
|
||||||
subxt = { version = "0.44.0" }
|
|
||||||
temp-dir = { version = "0.1.16" }
|
temp-dir = { version = "0.1.16" }
|
||||||
tempfile = "3.3"
|
tempfile = "3.3"
|
||||||
thiserror = "2"
|
thiserror = "2"
|
||||||
@@ -69,14 +67,13 @@ tracing-subscriber = { version = "0.3.19", default-features = false, features =
|
|||||||
"env-filter",
|
"env-filter",
|
||||||
] }
|
] }
|
||||||
indexmap = { version = "2.10.0", default-features = false }
|
indexmap = { version = "2.10.0", default-features = false }
|
||||||
itertools = { version = "0.14.0" }
|
|
||||||
|
|
||||||
# revive compiler
|
# revive compiler
|
||||||
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||||
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||||
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
|
||||||
|
|
||||||
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
|
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev ="891f6554354ce466abd496366dbf8b4f82141241" }
|
||||||
|
|
||||||
[workspace.dependencies.alloy]
|
[workspace.dependencies.alloy]
|
||||||
version = "1.0.37"
|
version = "1.0.37"
|
||||||
|
|||||||
@@ -0,0 +1,15 @@
|
|||||||
|
.PHONY: format clippy test machete
|
||||||
|
|
||||||
|
format:
|
||||||
|
cargo +nightly fmt --all -- --check
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
cargo clippy --all-features --workspace -- --deny warnings
|
||||||
|
|
||||||
|
machete:
|
||||||
|
cargo install cargo-machete
|
||||||
|
cargo machete crates
|
||||||
|
|
||||||
|
test: format clippy machete
|
||||||
|
cargo test --workspace -- --nocapture
|
||||||
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
[config]
|
|
||||||
default_to_workspace = false
|
|
||||||
|
|
||||||
[tasks.machete]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["machete", "crates"]
|
|
||||||
install_crate = "cargo-machete"
|
|
||||||
|
|
||||||
[tasks.fmt-check]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["fmt", "--all", "--", "--check"]
|
|
||||||
install_crate = "rustfmt"
|
|
||||||
|
|
||||||
[tasks.clippy]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["clippy", "--all-features", "--workspace", "--", "--deny", "warnings"]
|
|
||||||
install_crate = "clippy"
|
|
||||||
|
|
||||||
[tasks.test]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["test", "--workspace", "--", "--nocapture"]
|
|
||||||
@@ -9,7 +9,7 @@
|
|||||||
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
||||||
|
|
||||||
- Geth (EVM reference implementation)
|
- Geth (EVM reference implementation)
|
||||||
- Revive Dev Node (Substrate-based PolkaVM + `eth-rpc` proxy)
|
- Revive Kitchensink (Substrate-based PolkaVM + `eth-rpc` proxy)
|
||||||
|
|
||||||
Use it to:
|
Use it to:
|
||||||
|
|
||||||
@@ -39,9 +39,9 @@ This repository contains none of the tests and only contains the testing framewo
|
|||||||
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
||||||
|
|
||||||
- Stable Rust
|
- Stable Rust
|
||||||
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||||
- Revive Dev Node - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
- Kitchensink - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||||
- ETH-RPC - All communication with Revive Dev Node is done through the ETH RPC.
|
- ETH-RPC - All communication with Kitchensink is done through the ETH RPC.
|
||||||
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
||||||
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
||||||
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
||||||
@@ -52,69 +52,192 @@ All of the above need to be installed and available in the path in order for the
|
|||||||
|
|
||||||
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cargo run --release -- execute-tests --help
|
||||||
|
Error: Executes tests in the MatterLabs format differentially on multiple targets concurrently
|
||||||
|
|
||||||
|
Usage: retester execute-tests [OPTIONS]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
-w, --working-directory <WORKING_DIRECTORY>
|
||||||
|
The working directory that the program will use for all of the temporary artifacts needed at runtime.
|
||||||
|
|
||||||
|
If not specified, then a temporary directory will be created and used by the program for all temporary artifacts.
|
||||||
|
|
||||||
|
[default: ]
|
||||||
|
|
||||||
|
-p, --platform <PLATFORMS>
|
||||||
|
The set of platforms that the differential tests should run on
|
||||||
|
|
||||||
|
[default: geth-evm-solc,revive-dev-node-polkavm-resolc]
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
- geth-evm-solc: The Go-ethereum reference full node EVM implementation with the solc compiler
|
||||||
|
- kitchensink-polkavm-resolc: The kitchensink node with the PolkaVM backend with the resolc compiler
|
||||||
|
- kitchensink-revm-solc: The kitchensink node with the REVM backend with the solc compiler
|
||||||
|
- revive-dev-node-polkavm-resolc: The revive dev node with the PolkaVM backend with the resolc compiler
|
||||||
|
- revive-dev-node-revm-solc: The revive dev node with the REVM backend with the solc compiler
|
||||||
|
|
||||||
|
-c, --corpus <CORPUS>
|
||||||
|
A list of test corpus JSON files to be tested
|
||||||
|
|
||||||
|
-h, --help
|
||||||
|
Print help (see a summary with '-h')
|
||||||
|
|
||||||
|
Solc Configuration:
|
||||||
|
--solc.version <VERSION>
|
||||||
|
Specifies the default version of the Solc compiler that should be used if there is no override specified by one of the test cases
|
||||||
|
|
||||||
|
[default: 0.8.29]
|
||||||
|
|
||||||
|
Resolc Configuration:
|
||||||
|
--resolc.path <resolc.path>
|
||||||
|
Specifies the path of the resolc compiler to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the resolc binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: resolc]
|
||||||
|
|
||||||
|
Geth Configuration:
|
||||||
|
--geth.path <geth.path>
|
||||||
|
Specifies the path of the geth node to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the geth binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: geth]
|
||||||
|
|
||||||
|
--geth.start-timeout-ms <geth.start-timeout-ms>
|
||||||
|
The amount of time to wait upon startup before considering that the node timed out
|
||||||
|
|
||||||
|
[default: 5000]
|
||||||
|
|
||||||
|
Kitchensink Configuration:
|
||||||
|
--kitchensink.path <kitchensink.path>
|
||||||
|
Specifies the path of the kitchensink node to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the kitchensink binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: substrate-node]
|
||||||
|
|
||||||
|
--kitchensink.start-timeout-ms <kitchensink.start-timeout-ms>
|
||||||
|
The amount of time to wait upon startup before considering that the node timed out
|
||||||
|
|
||||||
|
[default: 5000]
|
||||||
|
|
||||||
|
--kitchensink.dont-use-dev-node
|
||||||
|
This configures the tool to use Kitchensink instead of using the revive-dev-node
|
||||||
|
|
||||||
|
Revive Dev Node Configuration:
|
||||||
|
--revive-dev-node.path <revive-dev-node.path>
|
||||||
|
Specifies the path of the revive dev node to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the revive dev node binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: revive-dev-node]
|
||||||
|
|
||||||
|
--revive-dev-node.start-timeout-ms <revive-dev-node.start-timeout-ms>
|
||||||
|
The amount of time to wait upon startup before considering that the node timed out
|
||||||
|
|
||||||
|
[default: 5000]
|
||||||
|
|
||||||
|
Eth RPC Configuration:
|
||||||
|
--eth-rpc.path <eth-rpc.path>
|
||||||
|
Specifies the path of the ETH RPC to be used by the tool.
|
||||||
|
|
||||||
|
If this is not specified, then the tool assumes that it should use the ETH RPC binary that's provided in the user's $PATH.
|
||||||
|
|
||||||
|
[default: eth-rpc]
|
||||||
|
|
||||||
|
--eth-rpc.start-timeout-ms <eth-rpc.start-timeout-ms>
|
||||||
|
The amount of time to wait upon startup before considering that the node timed out
|
||||||
|
|
||||||
|
[default: 5000]
|
||||||
|
|
||||||
|
Genesis Configuration:
|
||||||
|
--genesis.path <genesis.path>
|
||||||
|
Specifies the path of the genesis file to use for the nodes that are started.
|
||||||
|
|
||||||
|
This is expected to be the path of a JSON geth genesis file.
|
||||||
|
|
||||||
|
Wallet Configuration:
|
||||||
|
--wallet.default-private-key <DEFAULT_KEY>
|
||||||
|
The private key of the default signer
|
||||||
|
|
||||||
|
[default: 0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d]
|
||||||
|
|
||||||
|
--wallet.additional-keys <ADDITIONAL_KEYS>
|
||||||
|
This argument controls which private keys the nodes should have access to and be added to its wallet signers. With a value of N, private keys (0, N] will be added to the signer set of the node
|
||||||
|
|
||||||
|
[default: 100000]
|
||||||
|
|
||||||
|
Concurrency Configuration:
|
||||||
|
--concurrency.number-of-nodes <NUMBER_OF_NODES>
|
||||||
|
Determines the amount of nodes that will be spawned for each chain
|
||||||
|
|
||||||
|
[default: 5]
|
||||||
|
|
||||||
|
--concurrency.number-of-threads <NUMBER_OF_THREADS>
|
||||||
|
Determines the amount of tokio worker threads that will will be used
|
||||||
|
|
||||||
|
[default: 16]
|
||||||
|
|
||||||
|
--concurrency.number-of-concurrent-tasks <NUMBER_CONCURRENT_TASKS>
|
||||||
|
Determines the amount of concurrent tasks that will be spawned to run tests.
|
||||||
|
|
||||||
|
Defaults to 10 x the number of nodes.
|
||||||
|
|
||||||
|
--concurrency.ignore-concurrency-limit
|
||||||
|
Determines if the concurrency limit should be ignored or not
|
||||||
|
|
||||||
|
Compilation Configuration:
|
||||||
|
--compilation.invalidate-cache
|
||||||
|
Controls if the compilation cache should be invalidated or not
|
||||||
|
|
||||||
|
Report Configuration:
|
||||||
|
--report.include-compiler-input
|
||||||
|
Controls if the compiler input is included in the final report
|
||||||
|
|
||||||
|
--report.include-compiler-output
|
||||||
|
Controls if the compiler output is included in the final report
|
||||||
|
```
|
||||||
|
|
||||||
|
To run tests with this tool you need a corpus JSON file that defines the tests included in the corpus. The simplest corpus file looks like the following:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||||
|
"path": "resolc-compiler-tests/fixtures/solidity"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
|
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
|
||||||
|
|
||||||
|
The above corpus file instructs the tool to look for all of the test cases contained within all of the metadata files of the specified directory.
|
||||||
|
|
||||||
The simplest command to run this tool is the following:
|
The simplest command to run this tool is the following:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
RUST_LOG="info" cargo run --release -- test \
|
RUST_LOG="info" cargo run --release -- execute-tests \
|
||||||
--test ./resolc-compiler-tests/fixtures/solidity \
|
|
||||||
--platform geth-evm-solc \
|
--platform geth-evm-solc \
|
||||||
|
--corpus corp.json \
|
||||||
--working-directory workdir \
|
--working-directory workdir \
|
||||||
|
--concurrency.number-of-nodes 5 \
|
||||||
|
--concurrency.ignore-concurrency-limit \
|
||||||
> logs.log \
|
> logs.log \
|
||||||
2> output.log
|
2> output.log
|
||||||
```
|
```
|
||||||
|
|
||||||
The above command will run the tool executing every one of the tests discovered in the path provided to the tool. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
|
The above command will run the tool executing every one of the tests discovered in the path specified in the corpus file. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
|
||||||
|
|
||||||
<details>
|
If you only want to run a subset of tests, then you can specify that in your corpus file. The following is an example:
|
||||||
<summary>User Managed Nodes</summary>
|
|
||||||
|
|
||||||
This section describes how the user can make use of nodes that they manage rather than allowing the tool to spawn and manage the nodes on the user's behalf.
|
```json
|
||||||
|
{
|
||||||
> ⚠️ This is an advanced feature of the tool and could lead test successes or failures to not be reproducible. Please use this feature with caution and only if you understand the implications of running your own node instead of having the framework manage your nodes. ⚠️
|
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||||
|
"paths": [
|
||||||
If you're an advanced user and you'd like to manage your own nodes instead of having the tool initialize, spawn, and manage them, then you can choose to run your own nodes and then provide them to the tool to make use of just like the following:
|
"path/to/a/single/metadata/file/I/want/to/run.json",
|
||||||
|
"path/to/a/directory/to/find/all/metadata/files/within"
|
||||||
```bash
|
]
|
||||||
#!/usr/bin/env bash
|
}
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
PLATFORM="revive-dev-node-revm-solc"
|
|
||||||
|
|
||||||
retester export-genesis "$PLATFORM" > chainspec.json
|
|
||||||
|
|
||||||
# Start revive-dev-node in a detached tmux session
|
|
||||||
tmux new-session -d -s revive-dev-node \
|
|
||||||
'RUST_LOG="error,evm=debug,sc_rpc_server=info,runtime::revive=debug" revive-dev-node \
|
|
||||||
--dev \
|
|
||||||
--chain chainspec.json \
|
|
||||||
--force-authoring \
|
|
||||||
--rpc-methods Unsafe \
|
|
||||||
--rpc-cors all \
|
|
||||||
--rpc-max-connections 4294967295 \
|
|
||||||
--pool-limit 4294967295 \
|
|
||||||
--pool-kbytes 4294967295'
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# Start eth-rpc in a detached tmux session
|
|
||||||
tmux new-session -d -s eth-rpc \
|
|
||||||
'RUST_LOG="info,eth-rpc=debug" eth-rpc \
|
|
||||||
--dev \
|
|
||||||
--node-rpc-url ws://127.0.0.1:9944 \
|
|
||||||
--rpc-max-connections 4294967295'
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# Run the tests (logs to files as before)
|
|
||||||
RUST_LOG="info" retester test \
|
|
||||||
--platform "$PLATFORM" \
|
|
||||||
--corpus ./revive-differential-tests/fixtures/solidity \
|
|
||||||
--working-directory ./workdir \
|
|
||||||
--concurrency.number-of-nodes 1 \
|
|
||||||
--concurrency.number-of-concurrent-tasks 5 \
|
|
||||||
--revive-dev-node.existing-rpc-url "http://localhost:8545" \
|
|
||||||
> logs.log
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|||||||
Binary file not shown.
@@ -14,7 +14,6 @@ anyhow = { workspace = true }
|
|||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
moka = { workspace = true, features = ["sync"] }
|
moka = { workspace = true, features = ["sync"] }
|
||||||
once_cell = { workspace = true }
|
once_cell = { workspace = true }
|
||||||
regex = { workspace = true }
|
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
schemars = { workspace = true }
|
schemars = { workspace = true }
|
||||||
|
|||||||
@@ -1,49 +1,48 @@
|
|||||||
//! This module implements a cached file system allowing for results to be stored in-memory rather
|
//! This module implements a cached file system allowing for results to be stored in-memory rather
|
||||||
//! rather being queried from the file system again.
|
//! rather being queried from the file system again.
|
||||||
|
|
||||||
use std::fs;
|
use std::{
|
||||||
use std::io::{Error, Result};
|
fs,
|
||||||
use std::path::{Path, PathBuf};
|
io::{Error, Result},
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
};
|
||||||
|
|
||||||
use moka::sync::Cache;
|
use moka::sync::Cache;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||||
static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000));
|
static READ_CACHE: Lazy<Cache<PathBuf, Vec<u8>>> = Lazy::new(|| Cache::new(10_000));
|
||||||
|
|
||||||
let path = path.as_ref().canonicalize()?;
|
let path = path.as_ref().canonicalize()?;
|
||||||
match READ_CACHE.get(path.as_path()) {
|
match READ_CACHE.get(path.as_path()) {
|
||||||
Some(content) => Ok(content),
|
Some(content) => Ok(content),
|
||||||
None => {
|
None => {
|
||||||
let content = fs::read(path.as_path())?;
|
let content = fs::read(path.as_path())?;
|
||||||
READ_CACHE.insert(path, content.clone());
|
READ_CACHE.insert(path, content.clone());
|
||||||
Ok(content)
|
Ok(content)
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
|
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
|
||||||
let content = read(path)?;
|
let content = read(path)?;
|
||||||
String::from_utf8(content).map_err(|_| {
|
String::from_utf8(content).map_err(|_| {
|
||||||
Error::new(
|
Error::new(std::io::ErrorKind::InvalidData, "The contents of the file are not valid UTF8")
|
||||||
std::io::ErrorKind::InvalidData,
|
})
|
||||||
"The contents of the file are not valid UTF8",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result<PathBuf>>>> {
|
pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result<PathBuf>>>> {
|
||||||
static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000));
|
static READ_DIR_CACHE: Lazy<Cache<PathBuf, Vec<PathBuf>>> = Lazy::new(|| Cache::new(10_000));
|
||||||
|
|
||||||
let path = path.as_ref().canonicalize()?;
|
let path = path.as_ref().canonicalize()?;
|
||||||
match READ_DIR_CACHE.get(path.as_path()) {
|
match READ_DIR_CACHE.get(path.as_path()) {
|
||||||
Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>),
|
Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>),
|
||||||
None => {
|
None => {
|
||||||
let entries = fs::read_dir(path.as_path())?
|
let entries = fs::read_dir(path.as_path())?
|
||||||
.flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path()))
|
.flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path()))
|
||||||
.collect();
|
.collect();
|
||||||
READ_DIR_CACHE.insert(path.clone(), entries);
|
READ_DIR_CACHE.insert(path.clone(), entries);
|
||||||
Ok(read_dir(path).unwrap())
|
Ok(read_dir(path).unwrap())
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::{
|
use std::{
|
||||||
fs::{read_dir, remove_dir_all, remove_file},
|
fs::{read_dir, remove_dir_all, remove_file},
|
||||||
path::Path,
|
path::Path,
|
||||||
};
|
};
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
@@ -8,24 +8,21 @@ use anyhow::{Context, Result};
|
|||||||
/// This method clears the passed directory of all of the files and directories contained within
|
/// This method clears the passed directory of all of the files and directories contained within
|
||||||
/// without deleting the directory.
|
/// without deleting the directory.
|
||||||
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
|
pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
|
||||||
for entry in read_dir(path.as_ref())
|
for entry in read_dir(path.as_ref())
|
||||||
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
|
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
|
||||||
{
|
{
|
||||||
let entry = entry.with_context(|| {
|
let entry = entry.with_context(|| {
|
||||||
format!(
|
format!("Failed to read an entry in directory: {}", path.as_ref().display())
|
||||||
"Failed to read an entry in directory: {}",
|
})?;
|
||||||
path.as_ref().display()
|
let entry_path = entry.path();
|
||||||
)
|
|
||||||
})?;
|
|
||||||
let entry_path = entry.path();
|
|
||||||
|
|
||||||
if entry_path.is_file() {
|
if entry_path.is_file() {
|
||||||
remove_file(&entry_path)
|
remove_file(&entry_path)
|
||||||
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
|
.with_context(|| format!("Failed to remove file: {}", entry_path.display()))?
|
||||||
} else {
|
} else {
|
||||||
remove_dir_all(&entry_path)
|
remove_dir_all(&entry_path)
|
||||||
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
|
.with_context(|| format!("Failed to remove directory: {}", entry_path.display()))?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use std::ops::ControlFlow;
|
use std::{ops::ControlFlow, time::Duration};
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use anyhow::{Context as _, Result, anyhow};
|
use anyhow::{Context as _, Result, anyhow};
|
||||||
|
|
||||||
@@ -18,55 +17,51 @@ const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60);
|
|||||||
/// [`Break`]: ControlFlow::Break
|
/// [`Break`]: ControlFlow::Break
|
||||||
/// [`Continue`]: ControlFlow::Continue
|
/// [`Continue`]: ControlFlow::Continue
|
||||||
pub async fn poll<F, O>(
|
pub async fn poll<F, O>(
|
||||||
polling_duration: Duration,
|
polling_duration: Duration,
|
||||||
polling_wait_behavior: PollingWaitBehavior,
|
polling_wait_behavior: PollingWaitBehavior,
|
||||||
mut future: impl FnMut() -> F,
|
mut future: impl FnMut() -> F,
|
||||||
) -> Result<O>
|
) -> Result<O>
|
||||||
where
|
where
|
||||||
F: Future<Output = Result<ControlFlow<O, ()>>>,
|
F: Future<Output = Result<ControlFlow<O, ()>>>,
|
||||||
{
|
{
|
||||||
let mut retries = 0;
|
let mut retries = 0;
|
||||||
let mut total_wait_duration = Duration::ZERO;
|
let mut total_wait_duration = Duration::ZERO;
|
||||||
let max_allowed_wait_duration = polling_duration;
|
let max_allowed_wait_duration = polling_duration;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if total_wait_duration >= max_allowed_wait_duration {
|
if total_wait_duration >= max_allowed_wait_duration {
|
||||||
break Err(anyhow!(
|
break Err(anyhow!(
|
||||||
"Polling failed after {} retries and a total of {:?} of wait time",
|
"Polling failed after {} retries and a total of {:?} of wait time",
|
||||||
retries,
|
retries,
|
||||||
total_wait_duration
|
total_wait_duration
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
match future()
|
match future().await.context("Polled future returned an error during polling loop")? {
|
||||||
.await
|
ControlFlow::Continue(()) => {
|
||||||
.context("Polled future returned an error during polling loop")?
|
let next_wait_duration = match polling_wait_behavior {
|
||||||
{
|
PollingWaitBehavior::Constant(duration) => duration,
|
||||||
ControlFlow::Continue(()) => {
|
PollingWaitBehavior::ExponentialBackoff =>
|
||||||
let next_wait_duration = match polling_wait_behavior {
|
Duration::from_secs(2u64.pow(retries))
|
||||||
PollingWaitBehavior::Constant(duration) => duration,
|
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION),
|
||||||
PollingWaitBehavior::ExponentialBackoff => {
|
};
|
||||||
Duration::from_secs(2u64.pow(retries))
|
let next_wait_duration =
|
||||||
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION)
|
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
|
||||||
}
|
total_wait_duration += next_wait_duration;
|
||||||
};
|
retries += 1;
|
||||||
let next_wait_duration =
|
|
||||||
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
|
|
||||||
total_wait_duration += next_wait_duration;
|
|
||||||
retries += 1;
|
|
||||||
|
|
||||||
tokio::time::sleep(next_wait_duration).await;
|
tokio::time::sleep(next_wait_duration).await;
|
||||||
}
|
},
|
||||||
ControlFlow::Break(output) => {
|
ControlFlow::Break(output) => {
|
||||||
break Ok(output);
|
break Ok(output);
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
|
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
|
||||||
pub enum PollingWaitBehavior {
|
pub enum PollingWaitBehavior {
|
||||||
Constant(Duration),
|
Constant(Duration),
|
||||||
#[default]
|
#[default]
|
||||||
ExponentialBackoff,
|
ExponentialBackoff,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,21 @@
|
|||||||
/// An iterator that could be either of two iterators.
|
/// An iterator that could be either of two iterators.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum EitherIter<A, B> {
|
pub enum EitherIter<A, B> {
|
||||||
A(A),
|
A(A),
|
||||||
B(B),
|
B(B),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<A, B, T> Iterator for EitherIter<A, B>
|
impl<A, B, T> Iterator for EitherIter<A, B>
|
||||||
where
|
where
|
||||||
A: Iterator<Item = T>,
|
A: Iterator<Item = T>,
|
||||||
B: Iterator<Item = T>,
|
B: Iterator<Item = T>,
|
||||||
{
|
{
|
||||||
type Item = T;
|
type Item = T;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
match self {
|
match self {
|
||||||
EitherIter::A(iter) => iter.next(),
|
EitherIter::A(iter) => iter.next(),
|
||||||
EitherIter::B(iter) => iter.next(),
|
EitherIter::B(iter) => iter.next(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,91 +1,90 @@
|
|||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
borrow::Cow,
|
||||||
collections::HashSet,
|
collections::HashSet,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// An iterator that finds files of a certain extension in the provided directory. You can think of
|
/// An iterator that finds files of a certain extension in the provided directory. You can think of
|
||||||
/// this a glob pattern similar to: `${path}/**/*.md`
|
/// this a glob pattern similar to: `${path}/**/*.md`
|
||||||
pub struct FilesWithExtensionIterator {
|
pub struct FilesWithExtensionIterator {
|
||||||
/// The set of allowed extensions that that match the requirement and that should be returned
|
/// The set of allowed extensions that that match the requirement and that should be returned
|
||||||
/// when found.
|
/// when found.
|
||||||
allowed_extensions: HashSet<Cow<'static, str>>,
|
allowed_extensions: HashSet<Cow<'static, str>>,
|
||||||
|
|
||||||
/// The set of directories to visit next. This iterator does BFS and so these directories will
|
/// The set of directories to visit next. This iterator does BFS and so these directories will
|
||||||
/// only be visited if we can't find any files in our state.
|
/// only be visited if we can't find any files in our state.
|
||||||
directories_to_search: Vec<PathBuf>,
|
directories_to_search: Vec<PathBuf>,
|
||||||
|
|
||||||
/// The set of files matching the allowed extensions that were found. If there are entries in
|
/// The set of files matching the allowed extensions that were found. If there are entries in
|
||||||
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
|
/// this vector then they will be returned when the [`Iterator::next`] method is called. If not
|
||||||
/// then we visit one of the next directories to visit.
|
/// then we visit one of the next directories to visit.
|
||||||
files_matching_allowed_extensions: Vec<PathBuf>,
|
files_matching_allowed_extensions: Vec<PathBuf>,
|
||||||
|
|
||||||
/// This option controls if the the cached file system should be used or not. This could be
|
/// This option controls if the the cached file system should be used or not. This could be
|
||||||
/// better for certain cases where the entries in the directories do not change and therefore
|
/// better for certain cases where the entries in the directories do not change and therefore
|
||||||
/// caching can be used.
|
/// caching can be used.
|
||||||
use_cached_fs: bool,
|
use_cached_fs: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FilesWithExtensionIterator {
|
impl FilesWithExtensionIterator {
|
||||||
pub fn new(root_directory: impl AsRef<Path>) -> Self {
|
pub fn new(root_directory: impl AsRef<Path>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
allowed_extensions: Default::default(),
|
allowed_extensions: Default::default(),
|
||||||
directories_to_search: vec![root_directory.as_ref().to_path_buf()],
|
directories_to_search: vec![root_directory.as_ref().to_path_buf()],
|
||||||
files_matching_allowed_extensions: Default::default(),
|
files_matching_allowed_extensions: Default::default(),
|
||||||
use_cached_fs: Default::default(),
|
use_cached_fs: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_allowed_extension(
|
pub fn with_allowed_extension(
|
||||||
mut self,
|
mut self,
|
||||||
allowed_extension: impl Into<Cow<'static, str>>,
|
allowed_extension: impl Into<Cow<'static, str>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
self.allowed_extensions.insert(allowed_extension.into());
|
self.allowed_extensions.insert(allowed_extension.into());
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self {
|
pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self {
|
||||||
self.use_cached_fs = use_cached_fs;
|
self.use_cached_fs = use_cached_fs;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Iterator for FilesWithExtensionIterator {
|
impl Iterator for FilesWithExtensionIterator {
|
||||||
type Item = PathBuf;
|
type Item = PathBuf;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
if let Some(file_path) = self.files_matching_allowed_extensions.pop() {
|
if let Some(file_path) = self.files_matching_allowed_extensions.pop() {
|
||||||
return Some(file_path);
|
return Some(file_path);
|
||||||
};
|
};
|
||||||
|
|
||||||
let directory_to_search = self.directories_to_search.pop()?;
|
let directory_to_search = self.directories_to_search.pop()?;
|
||||||
|
|
||||||
let iterator = if self.use_cached_fs {
|
let iterator = if self.use_cached_fs {
|
||||||
let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else {
|
let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else {
|
||||||
return self.next();
|
return self.next();
|
||||||
};
|
};
|
||||||
Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>>
|
Box::new(dir_entries) as Box<dyn Iterator<Item = std::io::Result<PathBuf>>>
|
||||||
} else {
|
} else {
|
||||||
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
|
let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else {
|
||||||
return self.next();
|
return self.next();
|
||||||
};
|
};
|
||||||
Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_>
|
Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_>
|
||||||
};
|
};
|
||||||
|
|
||||||
for entry_path in iterator.flatten() {
|
for entry_path in iterator.flatten() {
|
||||||
if entry_path.is_dir() {
|
if entry_path.is_dir() {
|
||||||
self.directories_to_search.push(entry_path)
|
self.directories_to_search.push(entry_path)
|
||||||
} else if entry_path.is_file()
|
} else if entry_path.is_file() &&
|
||||||
&& entry_path.extension().is_some_and(|ext| {
|
entry_path.extension().is_some_and(|ext| {
|
||||||
self.allowed_extensions
|
self.allowed_extensions
|
||||||
.iter()
|
.iter()
|
||||||
.any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref()))
|
.any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref()))
|
||||||
})
|
}) {
|
||||||
{
|
self.files_matching_allowed_extensions.push(entry_path)
|
||||||
self.files_matching_allowed_extensions.push(entry_path)
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
self.next()
|
self.next()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,23 +1,23 @@
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! impl_for_wrapper {
|
macro_rules! impl_for_wrapper {
|
||||||
(Display, $ident: ident) => {
|
(Display, $ident: ident) => {
|
||||||
#[automatically_derived]
|
#[automatically_derived]
|
||||||
impl std::fmt::Display for $ident {
|
impl std::fmt::Display for $ident {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
std::fmt::Display::fmt(&self.0, f)
|
std::fmt::Display::fmt(&self.0, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
(FromStr, $ident: ident) => {
|
(FromStr, $ident: ident) => {
|
||||||
#[automatically_derived]
|
#[automatically_derived]
|
||||||
impl std::str::FromStr for $ident {
|
impl std::str::FromStr for $ident {
|
||||||
type Err = anyhow::Error;
|
type Err = anyhow::Error;
|
||||||
|
|
||||||
fn from_str(s: &str) -> anyhow::Result<Self> {
|
fn from_str(s: &str) -> anyhow::Result<Self> {
|
||||||
s.parse().map(Self).map_err(Into::into)
|
s.parse().map(Self).map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Defines wrappers around types.
|
/// Defines wrappers around types.
|
||||||
@@ -135,6 +135,6 @@ macro_rules! define_wrapper_type {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Technically not needed but this allows for the macro to be found in the `macros` module of the
|
/// Technically not needed but this allows for the macro to be found in the `macros` module of
|
||||||
/// crate in addition to being found in the root of the crate.
|
/// the crate in addition to being found in the root of the crate.
|
||||||
pub use {define_wrapper_type, impl_for_wrapper};
|
pub use {define_wrapper_type, impl_for_wrapper};
|
||||||
|
|||||||
@@ -7,122 +7,128 @@ use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
|||||||
/// could be thought of like the target triple from Rust and LLVM where it specifies the platform
|
/// could be thought of like the target triple from Rust and LLVM where it specifies the platform
|
||||||
/// completely starting with the node, the vm, and finally the compiler used for this combination.
|
/// completely starting with the node, the vm, and finally the compiler used for this combination.
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
Copy,
|
Copy,
|
||||||
Debug,
|
Debug,
|
||||||
PartialEq,
|
PartialEq,
|
||||||
Eq,
|
Eq,
|
||||||
PartialOrd,
|
PartialOrd,
|
||||||
Ord,
|
Ord,
|
||||||
Hash,
|
Hash,
|
||||||
Serialize,
|
Serialize,
|
||||||
Deserialize,
|
Deserialize,
|
||||||
ValueEnum,
|
ValueEnum,
|
||||||
EnumString,
|
EnumString,
|
||||||
Display,
|
Display,
|
||||||
AsRefStr,
|
AsRefStr,
|
||||||
IntoStaticStr,
|
IntoStaticStr,
|
||||||
JsonSchema,
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
#[strum(serialize_all = "kebab-case")]
|
#[strum(serialize_all = "kebab-case")]
|
||||||
pub enum PlatformIdentifier {
|
pub enum PlatformIdentifier {
|
||||||
/// The Go-ethereum reference full node EVM implementation with the solc compiler.
|
/// The Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||||
GethEvmSolc,
|
GethEvmSolc,
|
||||||
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||||
LighthouseGethEvmSolc,
|
LighthouseGethEvmSolc,
|
||||||
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
/// The kitchensink node with the PolkaVM backend with the resolc compiler.
|
||||||
ReviveDevNodePolkavmResolc,
|
KitchensinkPolkavmResolc,
|
||||||
/// The revive dev node with the REVM backend with the solc compiler.
|
/// The kitchensink node with the REVM backend with the solc compiler.
|
||||||
ReviveDevNodeRevmSolc,
|
KitchensinkRevmSolc,
|
||||||
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
|
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
||||||
ZombienetPolkavmResolc,
|
ReviveDevNodePolkavmResolc,
|
||||||
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
|
/// The revive dev node with the REVM backend with the solc compiler.
|
||||||
ZombienetRevmSolc,
|
ReviveDevNodeRevmSolc,
|
||||||
|
/// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler.
|
||||||
|
ZombienetPolkavmResolc,
|
||||||
|
/// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler.
|
||||||
|
ZombienetRevmSolc,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
/// An enum of the platform identifiers of all of the platforms supported by this framework.
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
Copy,
|
Copy,
|
||||||
Debug,
|
Debug,
|
||||||
PartialEq,
|
PartialEq,
|
||||||
Eq,
|
Eq,
|
||||||
PartialOrd,
|
PartialOrd,
|
||||||
Ord,
|
Ord,
|
||||||
Hash,
|
Hash,
|
||||||
Serialize,
|
Serialize,
|
||||||
Deserialize,
|
Deserialize,
|
||||||
ValueEnum,
|
ValueEnum,
|
||||||
EnumString,
|
EnumString,
|
||||||
Display,
|
Display,
|
||||||
AsRefStr,
|
AsRefStr,
|
||||||
IntoStaticStr,
|
IntoStaticStr,
|
||||||
JsonSchema,
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub enum CompilerIdentifier {
|
pub enum CompilerIdentifier {
|
||||||
/// The solc compiler.
|
/// The solc compiler.
|
||||||
Solc,
|
Solc,
|
||||||
/// The resolc compiler.
|
/// The resolc compiler.
|
||||||
Resolc,
|
Resolc,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An enum representing the identifiers of the supported nodes.
|
/// An enum representing the identifiers of the supported nodes.
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
Copy,
|
Copy,
|
||||||
Debug,
|
Debug,
|
||||||
PartialEq,
|
PartialEq,
|
||||||
Eq,
|
Eq,
|
||||||
PartialOrd,
|
PartialOrd,
|
||||||
Ord,
|
Ord,
|
||||||
Hash,
|
Hash,
|
||||||
Serialize,
|
Serialize,
|
||||||
Deserialize,
|
Deserialize,
|
||||||
ValueEnum,
|
ValueEnum,
|
||||||
EnumString,
|
EnumString,
|
||||||
Display,
|
Display,
|
||||||
AsRefStr,
|
AsRefStr,
|
||||||
IntoStaticStr,
|
IntoStaticStr,
|
||||||
JsonSchema,
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
pub enum NodeIdentifier {
|
pub enum NodeIdentifier {
|
||||||
/// The go-ethereum node implementation.
|
/// The go-ethereum node implementation.
|
||||||
Geth,
|
Geth,
|
||||||
/// The go-ethereum node implementation.
|
/// The go-ethereum node implementation.
|
||||||
LighthouseGeth,
|
LighthouseGeth,
|
||||||
/// The revive dev node implementation.
|
/// The Kitchensink node implementation.
|
||||||
ReviveDevNode,
|
Kitchensink,
|
||||||
/// A zombienet spawned nodes
|
/// The revive dev node implementation.
|
||||||
Zombienet,
|
ReviveDevNode,
|
||||||
|
/// A zombienet spawned nodes
|
||||||
|
Zombienet,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An enum representing the identifiers of the supported VMs.
|
/// An enum representing the identifiers of the supported VMs.
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
Copy,
|
Copy,
|
||||||
Debug,
|
Debug,
|
||||||
PartialEq,
|
PartialEq,
|
||||||
Eq,
|
Eq,
|
||||||
PartialOrd,
|
PartialOrd,
|
||||||
Ord,
|
Ord,
|
||||||
Hash,
|
Hash,
|
||||||
Serialize,
|
Serialize,
|
||||||
Deserialize,
|
Deserialize,
|
||||||
ValueEnum,
|
ValueEnum,
|
||||||
EnumString,
|
EnumString,
|
||||||
Display,
|
Display,
|
||||||
AsRefStr,
|
AsRefStr,
|
||||||
IntoStaticStr,
|
IntoStaticStr,
|
||||||
JsonSchema,
|
JsonSchema,
|
||||||
)]
|
)]
|
||||||
#[serde(rename_all = "lowercase")]
|
#[serde(rename_all = "lowercase")]
|
||||||
#[strum(serialize_all = "lowercase")]
|
#[strum(serialize_all = "lowercase")]
|
||||||
pub enum VmIdentifier {
|
pub enum VmIdentifier {
|
||||||
/// The ethereum virtual machine.
|
/// The ethereum virtual machine.
|
||||||
Evm,
|
Evm,
|
||||||
/// The EraVM virtual machine.
|
/// The EraVM virtual machine.
|
||||||
EraVM,
|
EraVM,
|
||||||
/// Polkadot's PolaVM Risc-v based virtual machine.
|
/// Polkadot's PolaVM Risc-v based virtual machine.
|
||||||
PolkaVM,
|
PolkaVM,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,11 @@
|
|||||||
mod identifiers;
|
mod identifiers;
|
||||||
mod mode;
|
mod mode;
|
||||||
mod parsed_test_specifier;
|
|
||||||
mod private_key_allocator;
|
mod private_key_allocator;
|
||||||
mod round_robin_pool;
|
mod round_robin_pool;
|
||||||
mod version_or_requirement;
|
mod version_or_requirement;
|
||||||
|
|
||||||
pub use identifiers::*;
|
pub use identifiers::*;
|
||||||
pub use mode::*;
|
pub use mode::*;
|
||||||
pub use parsed_test_specifier::*;
|
|
||||||
pub use private_key_allocator::*;
|
pub use private_key_allocator::*;
|
||||||
pub use round_robin_pool::*;
|
pub use round_robin_pool::*;
|
||||||
pub use version_or_requirement::*;
|
pub use version_or_requirement::*;
|
||||||
|
|||||||
+118
-387
@@ -1,14 +1,7 @@
|
|||||||
use crate::iterators::EitherIter;
|
|
||||||
use crate::types::VersionOrRequirement;
|
use crate::types::VersionOrRequirement;
|
||||||
use anyhow::{Context as _, bail};
|
|
||||||
use regex::Regex;
|
|
||||||
use schemars::JsonSchema;
|
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::HashSet;
|
use std::{fmt::Display, str::FromStr, sync::LazyLock};
|
||||||
use std::fmt::Display;
|
|
||||||
use std::str::FromStr;
|
|
||||||
use std::sync::LazyLock;
|
|
||||||
|
|
||||||
/// This represents a mode that a given test should be run with, if possible.
|
/// This represents a mode that a given test should be run with, if possible.
|
||||||
///
|
///
|
||||||
@@ -18,421 +11,159 @@ use std::sync::LazyLock;
|
|||||||
/// Use [`ParsedMode::to_test_modes()`] to do this.
|
/// Use [`ParsedMode::to_test_modes()`] to do this.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
pub struct Mode {
|
pub struct Mode {
|
||||||
pub pipeline: ModePipeline,
|
pub pipeline: ModePipeline,
|
||||||
pub optimize_setting: ModeOptimizerSetting,
|
pub optimize_setting: ModeOptimizerSetting,
|
||||||
pub version: Option<semver::VersionReq>,
|
pub version: Option<semver::VersionReq>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for Mode {
|
impl Display for Mode {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
self.pipeline.fmt(f)?;
|
self.pipeline.fmt(f)?;
|
||||||
f.write_str(" ")?;
|
f.write_str(" ")?;
|
||||||
self.optimize_setting.fmt(f)?;
|
self.optimize_setting.fmt(f)?;
|
||||||
|
|
||||||
if let Some(version) = &self.version {
|
if let Some(version) = &self.version {
|
||||||
f.write_str(" ")?;
|
f.write_str(" ")?;
|
||||||
version.fmt(f)?;
|
version.fmt(f)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for Mode {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
let parsed_mode = ParsedMode::from_str(s)?;
|
|
||||||
let mut iter = parsed_mode.to_modes();
|
|
||||||
let (Some(mode), None) = (iter.next(), iter.next()) else {
|
|
||||||
bail!("Failed to parse the mode")
|
|
||||||
};
|
|
||||||
Ok(mode)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Mode {
|
impl Mode {
|
||||||
/// Return all of the available mode combinations.
|
/// Return all of the available mode combinations.
|
||||||
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
pub fn all() -> impl Iterator<Item = &'static Mode> {
|
||||||
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| {
|
static ALL_MODES: LazyLock<Vec<Mode>> = LazyLock::new(|| {
|
||||||
ModePipeline::test_cases()
|
ModePipeline::test_cases()
|
||||||
.flat_map(|pipeline| {
|
.flat_map(|pipeline| {
|
||||||
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
|
ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode {
|
||||||
pipeline,
|
pipeline,
|
||||||
optimize_setting,
|
optimize_setting,
|
||||||
version: None,
|
version: None,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
});
|
});
|
||||||
ALL_MODES.iter()
|
ALL_MODES.iter()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
|
/// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if
|
||||||
/// the requirement is present on the object. Otherwise, the passed default version is used.
|
/// the requirement is present on the object. Otherwise, the passed default version is used.
|
||||||
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
|
pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement {
|
||||||
match self.version {
|
match self.version {
|
||||||
Some(ref requirement) => requirement.clone().into(),
|
Some(ref requirement) => requirement.clone().into(),
|
||||||
None => default.into(),
|
None => default.into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// What do we want the compiler to do?
|
/// What do we want the compiler to do?
|
||||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||||
pub enum ModePipeline {
|
pub enum ModePipeline {
|
||||||
/// Compile Solidity code via Yul IR
|
/// Compile Solidity code via Yul IR
|
||||||
ViaYulIR,
|
ViaYulIR,
|
||||||
/// Compile Solidity direct to assembly
|
/// Compile Solidity direct to assembly
|
||||||
ViaEVMAssembly,
|
ViaEVMAssembly,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromStr for ModePipeline {
|
impl FromStr for ModePipeline {
|
||||||
type Err = anyhow::Error;
|
type Err = anyhow::Error;
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
match s {
|
match s {
|
||||||
// via Yul IR
|
// via Yul IR
|
||||||
"Y" => Ok(ModePipeline::ViaYulIR),
|
"Y" => Ok(ModePipeline::ViaYulIR),
|
||||||
// Don't go via Yul IR
|
// Don't go via Yul IR
|
||||||
"E" => Ok(ModePipeline::ViaEVMAssembly),
|
"E" => Ok(ModePipeline::ViaEVMAssembly),
|
||||||
// Anything else that we see isn't a mode at all
|
// Anything else that we see isn't a mode at all
|
||||||
_ => Err(anyhow::anyhow!(
|
_ => Err(anyhow::anyhow!("Unsupported pipeline '{s}': expected 'Y' or 'E'")),
|
||||||
"Unsupported pipeline '{s}': expected 'Y' or 'E'"
|
}
|
||||||
)),
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for ModePipeline {
|
impl Display for ModePipeline {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
ModePipeline::ViaYulIR => f.write_str("Y"),
|
ModePipeline::ViaYulIR => f.write_str("Y"),
|
||||||
ModePipeline::ViaEVMAssembly => f.write_str("E"),
|
ModePipeline::ViaEVMAssembly => f.write_str("E"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ModePipeline {
|
impl ModePipeline {
|
||||||
/// Should we go via Yul IR?
|
/// Should we go via Yul IR?
|
||||||
pub fn via_yul_ir(&self) -> bool {
|
pub fn via_yul_ir(&self) -> bool {
|
||||||
matches!(self, ModePipeline::ViaYulIR)
|
matches!(self, ModePipeline::ViaYulIR)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An iterator over the available pipelines that we'd like to test,
|
/// An iterator over the available pipelines that we'd like to test,
|
||||||
/// when an explicit pipeline was not specified.
|
/// when an explicit pipeline was not specified.
|
||||||
pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone {
|
pub fn test_cases() -> impl Iterator<Item = ModePipeline> + Clone {
|
||||||
[ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter()
|
[ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||||
pub enum ModeOptimizerSetting {
|
pub enum ModeOptimizerSetting {
|
||||||
/// 0 / -: Don't apply any optimizations
|
/// 0 / -: Don't apply any optimizations
|
||||||
M0,
|
M0,
|
||||||
/// 1: Apply less than default optimizations
|
/// 1: Apply less than default optimizations
|
||||||
M1,
|
M1,
|
||||||
/// 2: Apply the default optimizations
|
/// 2: Apply the default optimizations
|
||||||
M2,
|
M2,
|
||||||
/// 3 / +: Apply aggressive optimizations
|
/// 3 / +: Apply aggressive optimizations
|
||||||
M3,
|
M3,
|
||||||
/// s: Optimize for size
|
/// s: Optimize for size
|
||||||
Ms,
|
Ms,
|
||||||
/// z: Aggressively optimize for size
|
/// z: Aggressively optimize for size
|
||||||
Mz,
|
Mz,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromStr for ModeOptimizerSetting {
|
impl FromStr for ModeOptimizerSetting {
|
||||||
type Err = anyhow::Error;
|
type Err = anyhow::Error;
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
match s {
|
match s {
|
||||||
"M0" => Ok(ModeOptimizerSetting::M0),
|
"M0" => Ok(ModeOptimizerSetting::M0),
|
||||||
"M1" => Ok(ModeOptimizerSetting::M1),
|
"M1" => Ok(ModeOptimizerSetting::M1),
|
||||||
"M2" => Ok(ModeOptimizerSetting::M2),
|
"M2" => Ok(ModeOptimizerSetting::M2),
|
||||||
"M3" => Ok(ModeOptimizerSetting::M3),
|
"M3" => Ok(ModeOptimizerSetting::M3),
|
||||||
"Ms" => Ok(ModeOptimizerSetting::Ms),
|
"Ms" => Ok(ModeOptimizerSetting::Ms),
|
||||||
"Mz" => Ok(ModeOptimizerSetting::Mz),
|
"Mz" => Ok(ModeOptimizerSetting::Mz),
|
||||||
_ => Err(anyhow::anyhow!(
|
_ => Err(anyhow::anyhow!(
|
||||||
"Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'"
|
"Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'"
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for ModeOptimizerSetting {
|
impl Display for ModeOptimizerSetting {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
ModeOptimizerSetting::M0 => f.write_str("M0"),
|
ModeOptimizerSetting::M0 => f.write_str("M0"),
|
||||||
ModeOptimizerSetting::M1 => f.write_str("M1"),
|
ModeOptimizerSetting::M1 => f.write_str("M1"),
|
||||||
ModeOptimizerSetting::M2 => f.write_str("M2"),
|
ModeOptimizerSetting::M2 => f.write_str("M2"),
|
||||||
ModeOptimizerSetting::M3 => f.write_str("M3"),
|
ModeOptimizerSetting::M3 => f.write_str("M3"),
|
||||||
ModeOptimizerSetting::Ms => f.write_str("Ms"),
|
ModeOptimizerSetting::Ms => f.write_str("Ms"),
|
||||||
ModeOptimizerSetting::Mz => f.write_str("Mz"),
|
ModeOptimizerSetting::Mz => f.write_str("Mz"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ModeOptimizerSetting {
|
impl ModeOptimizerSetting {
|
||||||
/// An iterator over the available optimizer settings that we'd like to test,
|
/// An iterator over the available optimizer settings that we'd like to test,
|
||||||
/// when an explicit optimizer setting was not specified.
|
/// when an explicit optimizer setting was not specified.
|
||||||
pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone {
|
pub fn test_cases() -> impl Iterator<Item = ModeOptimizerSetting> + Clone {
|
||||||
[
|
[
|
||||||
// No optimizations:
|
// No optimizations:
|
||||||
ModeOptimizerSetting::M0,
|
ModeOptimizerSetting::M0,
|
||||||
// Aggressive optimizations:
|
// Aggressive optimizations:
|
||||||
ModeOptimizerSetting::M3,
|
ModeOptimizerSetting::M3,
|
||||||
]
|
]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Are any optimizations enabled?
|
/// Are any optimizations enabled?
|
||||||
pub fn optimizations_enabled(&self) -> bool {
|
pub fn optimizations_enabled(&self) -> bool {
|
||||||
!matches!(self, ModeOptimizerSetting::M0)
|
!matches!(self, ModeOptimizerSetting::M0)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// This represents a mode that has been parsed from test metadata.
|
|
||||||
///
|
|
||||||
/// Mode strings can take the following form (in pseudo-regex):
|
|
||||||
///
|
|
||||||
/// ```text
|
|
||||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
|
||||||
#[serde(try_from = "String", into = "String")]
|
|
||||||
pub struct ParsedMode {
|
|
||||||
pub pipeline: Option<ModePipeline>,
|
|
||||||
pub optimize_flag: Option<bool>,
|
|
||||||
pub optimize_setting: Option<ModeOptimizerSetting>,
|
|
||||||
pub version: Option<semver::VersionReq>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for ParsedMode {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
|
||||||
Regex::new(r"(?x)
|
|
||||||
^
|
|
||||||
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
|
||||||
\s*
|
|
||||||
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
|
||||||
\s*
|
|
||||||
(?P<version>[>=<^]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
|
||||||
$
|
|
||||||
").unwrap()
|
|
||||||
});
|
|
||||||
|
|
||||||
let Some(caps) = REGEX.captures(s) else {
|
|
||||||
anyhow::bail!("Cannot parse mode '{s}' from string");
|
|
||||||
};
|
|
||||||
|
|
||||||
let pipeline = match caps.name("pipeline") {
|
|
||||||
Some(m) => Some(
|
|
||||||
ModePipeline::from_str(m.as_str())
|
|
||||||
.context("Failed to parse mode pipeline from string")?,
|
|
||||||
),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
|
||||||
|
|
||||||
let optimize_setting = match caps.name("optimize_setting") {
|
|
||||||
Some(m) => Some(
|
|
||||||
ModeOptimizerSetting::from_str(m.as_str())
|
|
||||||
.context("Failed to parse optimizer setting from string")?,
|
|
||||||
),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let version = match caps.name("version") {
|
|
||||||
Some(m) => Some(
|
|
||||||
semver::VersionReq::parse(m.as_str())
|
|
||||||
.map_err(|e| {
|
|
||||||
anyhow::anyhow!(
|
|
||||||
"Cannot parse the version requirement '{}': {e}",
|
|
||||||
m.as_str()
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.context("Failed to parse semver requirement from mode string")?,
|
|
||||||
),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(ParsedMode {
|
|
||||||
pipeline,
|
|
||||||
optimize_flag,
|
|
||||||
optimize_setting,
|
|
||||||
version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for ParsedMode {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
let mut has_written = false;
|
|
||||||
|
|
||||||
if let Some(pipeline) = self.pipeline {
|
|
||||||
pipeline.fmt(f)?;
|
|
||||||
if let Some(optimize_flag) = self.optimize_flag {
|
|
||||||
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
|
||||||
}
|
|
||||||
has_written = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(optimize_setting) = self.optimize_setting {
|
|
||||||
if has_written {
|
|
||||||
f.write_str(" ")?;
|
|
||||||
}
|
|
||||||
optimize_setting.fmt(f)?;
|
|
||||||
has_written = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(version) = &self.version {
|
|
||||||
if has_written {
|
|
||||||
f.write_str(" ")?;
|
|
||||||
}
|
|
||||||
version.fmt(f)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ParsedMode> for String {
|
|
||||||
fn from(parsed_mode: ParsedMode) -> Self {
|
|
||||||
parsed_mode.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<String> for ParsedMode {
|
|
||||||
type Error = anyhow::Error;
|
|
||||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
|
||||||
ParsedMode::from_str(&value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ParsedMode {
|
|
||||||
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
|
||||||
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
|
||||||
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
|
||||||
|| EitherIter::A(ModePipeline::test_cases()),
|
|
||||||
|p| EitherIter::B(std::iter::once(*p)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let optimize_flag_setting = self.optimize_flag.map(|flag| {
|
|
||||||
if flag {
|
|
||||||
ModeOptimizerSetting::M3
|
|
||||||
} else {
|
|
||||||
ModeOptimizerSetting::M0
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let optimize_flag_iter = match optimize_flag_setting {
|
|
||||||
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
|
||||||
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
|
||||||
|| EitherIter::A(optimize_flag_iter),
|
|
||||||
|s| EitherIter::B(std::iter::once(*s)),
|
|
||||||
);
|
|
||||||
|
|
||||||
pipeline_iter.flat_map(move |pipeline| {
|
|
||||||
optimize_settings_iter
|
|
||||||
.clone()
|
|
||||||
.map(move |optimize_setting| Mode {
|
|
||||||
pipeline,
|
|
||||||
optimize_setting,
|
|
||||||
version: self.version.clone(),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
|
||||||
/// This avoids any duplicate entries.
|
|
||||||
pub fn many_to_modes<'a>(
|
|
||||||
parsed: impl Iterator<Item = &'a ParsedMode>,
|
|
||||||
) -> impl Iterator<Item = Mode> {
|
|
||||||
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
|
||||||
modes.into_iter()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parsed_mode_from_str() {
|
|
||||||
let strings = vec![
|
|
||||||
("Mz", "Mz"),
|
|
||||||
("Y", "Y"),
|
|
||||||
("Y+", "Y+"),
|
|
||||||
("Y-", "Y-"),
|
|
||||||
("E", "E"),
|
|
||||||
("E+", "E+"),
|
|
||||||
("E-", "E-"),
|
|
||||||
("Y M0", "Y M0"),
|
|
||||||
("Y M1", "Y M1"),
|
|
||||||
("Y M2", "Y M2"),
|
|
||||||
("Y M3", "Y M3"),
|
|
||||||
("Y Ms", "Y Ms"),
|
|
||||||
("Y Mz", "Y Mz"),
|
|
||||||
("E M0", "E M0"),
|
|
||||||
("E M1", "E M1"),
|
|
||||||
("E M2", "E M2"),
|
|
||||||
("E M3", "E M3"),
|
|
||||||
("E Ms", "E Ms"),
|
|
||||||
("E Mz", "E Mz"),
|
|
||||||
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
|
||||||
("Y 0.8.0", "Y ^0.8.0"),
|
|
||||||
("E+ 0.8.0", "E+ ^0.8.0"),
|
|
||||||
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
|
||||||
("E Mz <0.7.0", "E Mz <0.7.0"),
|
|
||||||
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
|
||||||
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
|
||||||
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
|
||||||
// We don't see this in the wild but it is parsed.
|
|
||||||
("<=0.8", "<=0.8"),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (actual, expected) in strings {
|
|
||||||
let parsed = ParsedMode::from_str(actual)
|
|
||||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
|
||||||
assert_eq!(
|
|
||||||
expected,
|
|
||||||
parsed.to_string(),
|
|
||||||
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_parsed_mode_to_test_modes() {
|
|
||||||
let strings = vec![
|
|
||||||
("Mz", vec!["Y Mz", "E Mz"]),
|
|
||||||
("Y", vec!["Y M0", "Y M3"]),
|
|
||||||
("E", vec!["E M0", "E M3"]),
|
|
||||||
("Y+", vec!["Y M3"]),
|
|
||||||
("Y-", vec!["Y M0"]),
|
|
||||||
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
|
||||||
(
|
|
||||||
"<=0.8",
|
|
||||||
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
|
|
||||||
),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (actual, expected) in strings {
|
|
||||||
let parsed = ParsedMode::from_str(actual)
|
|
||||||
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
|
||||||
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
|
||||||
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
expected_set, actual_set,
|
|
||||||
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,133 +0,0 @@
|
|||||||
use std::{fmt::Display, path::PathBuf, str::FromStr};
|
|
||||||
|
|
||||||
use anyhow::{Context as _, bail};
|
|
||||||
|
|
||||||
use crate::types::Mode;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
|
||||||
pub enum ParsedTestSpecifier {
|
|
||||||
/// All of the test cases in the file should be ran across all of the specified modes
|
|
||||||
FileOrDirectory {
|
|
||||||
/// The path of the metadata file containing the test cases.
|
|
||||||
metadata_or_directory_file_path: PathBuf,
|
|
||||||
},
|
|
||||||
/// Only a specific case within the metadata file should be ran across all of the modes in the
|
|
||||||
/// file.
|
|
||||||
Case {
|
|
||||||
/// The path of the metadata file containing the test cases.
|
|
||||||
metadata_file_path: PathBuf,
|
|
||||||
|
|
||||||
/// The index of the specific case to run.
|
|
||||||
case_idx: usize,
|
|
||||||
},
|
|
||||||
/// A specific case and a specific mode should be ran. This is the most specific out of all of
|
|
||||||
/// the specifier types.
|
|
||||||
CaseWithMode {
|
|
||||||
/// The path of the metadata file containing the test cases.
|
|
||||||
metadata_file_path: PathBuf,
|
|
||||||
|
|
||||||
/// The index of the specific case to run.
|
|
||||||
case_idx: usize,
|
|
||||||
|
|
||||||
/// The parsed mode that the test should be run in.
|
|
||||||
mode: Mode,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for ParsedTestSpecifier {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
ParsedTestSpecifier::FileOrDirectory {
|
|
||||||
metadata_or_directory_file_path,
|
|
||||||
} => {
|
|
||||||
write!(f, "{}", metadata_or_directory_file_path.display())
|
|
||||||
}
|
|
||||||
ParsedTestSpecifier::Case {
|
|
||||||
metadata_file_path,
|
|
||||||
case_idx,
|
|
||||||
} => {
|
|
||||||
write!(f, "{}::{}", metadata_file_path.display(), case_idx)
|
|
||||||
}
|
|
||||||
ParsedTestSpecifier::CaseWithMode {
|
|
||||||
metadata_file_path,
|
|
||||||
case_idx,
|
|
||||||
mode,
|
|
||||||
} => {
|
|
||||||
write!(
|
|
||||||
f,
|
|
||||||
"{}::{}::{}",
|
|
||||||
metadata_file_path.display(),
|
|
||||||
case_idx,
|
|
||||||
mode
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for ParsedTestSpecifier {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
let mut split_iter = s.split("::");
|
|
||||||
|
|
||||||
let Some(path_string) = split_iter.next() else {
|
|
||||||
bail!("Could not find the path in the test specifier")
|
|
||||||
};
|
|
||||||
let path = PathBuf::from(path_string)
|
|
||||||
.canonicalize()
|
|
||||||
.context("Failed to canonicalize the path of the test")?;
|
|
||||||
|
|
||||||
let Some(case_idx_string) = split_iter.next() else {
|
|
||||||
return Ok(Self::FileOrDirectory {
|
|
||||||
metadata_or_directory_file_path: path,
|
|
||||||
});
|
|
||||||
};
|
|
||||||
let case_idx = usize::from_str(case_idx_string)
|
|
||||||
.context("Failed to parse the case idx of the test specifier from string")?;
|
|
||||||
|
|
||||||
// At this point the provided path must be a file.
|
|
||||||
if !path.is_file() {
|
|
||||||
bail!(
|
|
||||||
"Test specifier with a path and case idx must point to a file and not a directory"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(mode_string) = split_iter.next() else {
|
|
||||||
return Ok(Self::Case {
|
|
||||||
metadata_file_path: path,
|
|
||||||
case_idx,
|
|
||||||
});
|
|
||||||
};
|
|
||||||
let mode = Mode::from_str(mode_string)
|
|
||||||
.context("Failed to parse the mode string in the parsed test specifier")?;
|
|
||||||
|
|
||||||
Ok(Self::CaseWithMode {
|
|
||||||
metadata_file_path: path,
|
|
||||||
case_idx,
|
|
||||||
mode,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ParsedTestSpecifier> for String {
|
|
||||||
fn from(value: ParsedTestSpecifier) -> Self {
|
|
||||||
value.to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<String> for ParsedTestSpecifier {
|
|
||||||
type Error = anyhow::Error;
|
|
||||||
|
|
||||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
|
||||||
value.parse()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<&str> for ParsedTestSpecifier {
|
|
||||||
type Error = anyhow::Error;
|
|
||||||
|
|
||||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
|
||||||
value.parse()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,36 +1,32 @@
|
|||||||
use alloy::primitives::U256;
|
use alloy::{primitives::U256, signers::local::PrivateKeySigner};
|
||||||
use alloy::signers::local::PrivateKeySigner;
|
|
||||||
use anyhow::{Context, Result, bail};
|
use anyhow::{Context, Result, bail};
|
||||||
|
|
||||||
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
|
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
|
||||||
/// sequentially and in order until the maximum private key specified is reached.
|
/// sequentially and in order until the maximum private key specified is reached.
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
pub struct PrivateKeyAllocator {
|
pub struct PrivateKeyAllocator {
|
||||||
/// The next private key to be returned by the allocator when requested.
|
/// The next private key to be returned by the allocator when requested.
|
||||||
next_private_key: U256,
|
next_private_key: U256,
|
||||||
|
|
||||||
/// The highest private key (exclusive) that can be returned by this allocator.
|
/// The highest private key (exclusive) that can be returned by this allocator.
|
||||||
highest_private_key_inclusive: U256,
|
highest_private_key_inclusive: U256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PrivateKeyAllocator {
|
impl PrivateKeyAllocator {
|
||||||
/// Creates a new instance of the private key allocator.
|
/// Creates a new instance of the private key allocator.
|
||||||
pub fn new(highest_private_key_inclusive: U256) -> Self {
|
pub fn new(highest_private_key_inclusive: U256) -> Self {
|
||||||
Self {
|
Self { next_private_key: U256::ONE, highest_private_key_inclusive }
|
||||||
next_private_key: U256::ONE,
|
}
|
||||||
highest_private_key_inclusive,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Allocates a new private key and errors out if the maximum private key has been reached.
|
/// Allocates a new private key and errors out if the maximum private key has been reached.
|
||||||
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
|
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
|
||||||
if self.next_private_key > self.highest_private_key_inclusive {
|
if self.next_private_key > self.highest_private_key_inclusive {
|
||||||
bail!("Attempted to allocate a private key but failed since all have been allocated");
|
bail!("Attempted to allocate a private key but failed since all have been allocated");
|
||||||
};
|
};
|
||||||
let private_key =
|
let private_key =
|
||||||
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())
|
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())
|
||||||
.context("Failed to convert the private key digits into a private key")?;
|
.context("Failed to convert the private key digits into a private key")?;
|
||||||
self.next_private_key += U256::ONE;
|
self.next_private_key += U256::ONE;
|
||||||
Ok(private_key)
|
Ok(private_key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,24 +1,21 @@
|
|||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
pub struct RoundRobinPool<T> {
|
pub struct RoundRobinPool<T> {
|
||||||
next_index: AtomicUsize,
|
next_index: AtomicUsize,
|
||||||
items: Vec<T>,
|
items: Vec<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> RoundRobinPool<T> {
|
impl<T> RoundRobinPool<T> {
|
||||||
pub fn new(items: Vec<T>) -> Self {
|
pub fn new(items: Vec<T>) -> Self {
|
||||||
Self {
|
Self { next_index: Default::default(), items }
|
||||||
next_index: Default::default(),
|
}
|
||||||
items,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn round_robin(&self) -> &T {
|
pub fn round_robin(&self) -> &T {
|
||||||
let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len();
|
let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len();
|
||||||
self.items.get(current).unwrap()
|
self.items.get(current).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn iter(&self) -> impl Iterator<Item = &T> {
|
pub fn iter(&self) -> impl Iterator<Item = &T> {
|
||||||
self.items.iter()
|
self.items.iter()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,40 +2,40 @@ use semver::{Version, VersionReq};
|
|||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum VersionOrRequirement {
|
pub enum VersionOrRequirement {
|
||||||
Version(Version),
|
Version(Version),
|
||||||
Requirement(VersionReq),
|
Requirement(VersionReq),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Version> for VersionOrRequirement {
|
impl From<Version> for VersionOrRequirement {
|
||||||
fn from(value: Version) -> Self {
|
fn from(value: Version) -> Self {
|
||||||
Self::Version(value)
|
Self::Version(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<VersionReq> for VersionOrRequirement {
|
impl From<VersionReq> for VersionOrRequirement {
|
||||||
fn from(value: VersionReq) -> Self {
|
fn from(value: VersionReq) -> Self {
|
||||||
Self::Requirement(value)
|
Self::Requirement(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<VersionOrRequirement> for Version {
|
impl TryFrom<VersionOrRequirement> for Version {
|
||||||
type Error = anyhow::Error;
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||||
let VersionOrRequirement::Version(version) = value else {
|
let VersionOrRequirement::Version(version) = value else {
|
||||||
anyhow::bail!("Version or requirement was not a version");
|
anyhow::bail!("Version or requirement was not a version");
|
||||||
};
|
};
|
||||||
Ok(version)
|
Ok(version)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TryFrom<VersionOrRequirement> for VersionReq {
|
impl TryFrom<VersionOrRequirement> for VersionReq {
|
||||||
type Error = anyhow::Error;
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
fn try_from(value: VersionOrRequirement) -> Result<Self, Self::Error> {
|
||||||
let VersionOrRequirement::Requirement(requirement) = value else {
|
let VersionOrRequirement::Requirement(requirement) = value else {
|
||||||
anyhow::bail!("Version or requirement was not a requirement");
|
anyhow::bail!("Version or requirement was not a requirement");
|
||||||
};
|
};
|
||||||
Ok(requirement)
|
Ok(requirement)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
+111
-112
@@ -4,14 +4,13 @@
|
|||||||
//! - Polkadot revive Wasm compiler
|
//! - Polkadot revive Wasm compiler
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
hash::Hash,
|
hash::Hash,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
};
|
};
|
||||||
|
|
||||||
use alloy::json_abi::JsonAbi;
|
use alloy::{json_abi::JsonAbi, primitives::Address};
|
||||||
use alloy::primitives::Address;
|
|
||||||
use anyhow::{Context as _, Result};
|
use anyhow::{Context as _, Result};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@@ -28,149 +27,149 @@ pub mod solc;
|
|||||||
|
|
||||||
/// A common interface for all supported Solidity compilers.
|
/// A common interface for all supported Solidity compilers.
|
||||||
pub trait SolidityCompiler {
|
pub trait SolidityCompiler {
|
||||||
/// Returns the version of the compiler.
|
/// Returns the version of the compiler.
|
||||||
fn version(&self) -> &Version;
|
fn version(&self) -> &Version;
|
||||||
|
|
||||||
/// Returns the path of the compiler executable.
|
/// Returns the path of the compiler executable.
|
||||||
fn path(&self) -> &Path;
|
fn path(&self) -> &Path;
|
||||||
|
|
||||||
/// The low-level compiler interface.
|
/// The low-level compiler interface.
|
||||||
fn build(
|
fn build(
|
||||||
&self,
|
&self,
|
||||||
input: CompilerInput,
|
input: CompilerInput,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>>;
|
||||||
|
|
||||||
/// Does the compiler support the provided mode and version settings.
|
/// Does the compiler support the provided mode and version settings.
|
||||||
fn supports_mode(
|
fn supports_mode(
|
||||||
&self,
|
&self,
|
||||||
optimizer_setting: ModeOptimizerSetting,
|
optimizer_setting: ModeOptimizerSetting,
|
||||||
pipeline: ModePipeline,
|
pipeline: ModePipeline,
|
||||||
) -> bool;
|
) -> bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The generic compilation input configuration.
|
/// The generic compilation input configuration.
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||||
pub struct CompilerInput {
|
pub struct CompilerInput {
|
||||||
pub pipeline: Option<ModePipeline>,
|
pub pipeline: Option<ModePipeline>,
|
||||||
pub optimization: Option<ModeOptimizerSetting>,
|
pub optimization: Option<ModeOptimizerSetting>,
|
||||||
pub evm_version: Option<EVMVersion>,
|
pub evm_version: Option<EVMVersion>,
|
||||||
pub allow_paths: Vec<PathBuf>,
|
pub allow_paths: Vec<PathBuf>,
|
||||||
pub base_path: Option<PathBuf>,
|
pub base_path: Option<PathBuf>,
|
||||||
pub sources: HashMap<PathBuf, String>,
|
pub sources: HashMap<PathBuf, String>,
|
||||||
pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
|
pub libraries: HashMap<PathBuf, HashMap<String, Address>>,
|
||||||
pub revert_string_handling: Option<RevertString>,
|
pub revert_string_handling: Option<RevertString>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The generic compilation output configuration.
|
/// The generic compilation output configuration.
|
||||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
pub struct CompilerOutput {
|
pub struct CompilerOutput {
|
||||||
/// The compiled contracts. The bytecode of the contract is kept as a string in case linking is
|
/// The compiled contracts. The bytecode of the contract is kept as a string in case linking is
|
||||||
/// required and the compiled source has placeholders.
|
/// required and the compiled source has placeholders.
|
||||||
pub contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
pub contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A generic builder style interface for configuring the supported compiler options.
|
/// A generic builder style interface for configuring the supported compiler options.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct Compiler {
|
pub struct Compiler {
|
||||||
input: CompilerInput,
|
input: CompilerInput,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Compiler {
|
impl Compiler {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
input: CompilerInput {
|
input: CompilerInput {
|
||||||
pipeline: Default::default(),
|
pipeline: Default::default(),
|
||||||
optimization: Default::default(),
|
optimization: Default::default(),
|
||||||
evm_version: Default::default(),
|
evm_version: Default::default(),
|
||||||
allow_paths: Default::default(),
|
allow_paths: Default::default(),
|
||||||
base_path: Default::default(),
|
base_path: Default::default(),
|
||||||
sources: Default::default(),
|
sources: Default::default(),
|
||||||
libraries: Default::default(),
|
libraries: Default::default(),
|
||||||
revert_string_handling: Default::default(),
|
revert_string_handling: Default::default(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
|
pub fn with_optimization(mut self, value: impl Into<Option<ModeOptimizerSetting>>) -> Self {
|
||||||
self.input.optimization = value.into();
|
self.input.optimization = value.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self {
|
pub fn with_pipeline(mut self, value: impl Into<Option<ModePipeline>>) -> Self {
|
||||||
self.input.pipeline = value.into();
|
self.input.pipeline = value.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_evm_version(mut self, version: impl Into<Option<EVMVersion>>) -> Self {
|
pub fn with_evm_version(mut self, version: impl Into<Option<EVMVersion>>) -> Self {
|
||||||
self.input.evm_version = version.into();
|
self.input.evm_version = version.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_allow_path(mut self, path: impl AsRef<Path>) -> Self {
|
pub fn with_allow_path(mut self, path: impl AsRef<Path>) -> Self {
|
||||||
self.input.allow_paths.push(path.as_ref().into());
|
self.input.allow_paths.push(path.as_ref().into());
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_base_path(mut self, path: impl Into<Option<PathBuf>>) -> Self {
|
pub fn with_base_path(mut self, path: impl Into<Option<PathBuf>>) -> Self {
|
||||||
self.input.base_path = path.into();
|
self.input.base_path = path.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> {
|
pub fn with_source(mut self, path: impl AsRef<Path>) -> Result<Self> {
|
||||||
self.input.sources.insert(
|
self.input.sources.insert(
|
||||||
path.as_ref().to_path_buf(),
|
path.as_ref().to_path_buf(),
|
||||||
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
|
read_to_string(path.as_ref()).context("Failed to read the contract source")?,
|
||||||
);
|
);
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_library(
|
pub fn with_library(
|
||||||
mut self,
|
mut self,
|
||||||
path: impl AsRef<Path>,
|
path: impl AsRef<Path>,
|
||||||
name: impl AsRef<str>,
|
name: impl AsRef<str>,
|
||||||
address: Address,
|
address: Address,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
self.input
|
self.input
|
||||||
.libraries
|
.libraries
|
||||||
.entry(path.as_ref().to_path_buf())
|
.entry(path.as_ref().to_path_buf())
|
||||||
.or_default()
|
.or_default()
|
||||||
.insert(name.as_ref().into(), address);
|
.insert(name.as_ref().into(), address);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_revert_string_handling(
|
pub fn with_revert_string_handling(
|
||||||
mut self,
|
mut self,
|
||||||
revert_string_handling: impl Into<Option<RevertString>>,
|
revert_string_handling: impl Into<Option<RevertString>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
self.input.revert_string_handling = revert_string_handling.into();
|
self.input.revert_string_handling = revert_string_handling.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
|
pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self {
|
||||||
callback(self)
|
callback(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> {
|
pub fn try_then<E>(self, callback: impl FnOnce(Self) -> Result<Self, E>) -> Result<Self, E> {
|
||||||
callback(self)
|
callback(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result<CompilerOutput> {
|
pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result<CompilerOutput> {
|
||||||
compiler.build(self.input).await
|
compiler.build(self.input).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn input(&self) -> &CompilerInput {
|
pub fn input(&self) -> &CompilerInput {
|
||||||
&self.input
|
&self.input
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Defines how the compiler should handle revert strings.
|
/// Defines how the compiler should handle revert strings.
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
|
Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize,
|
||||||
)]
|
)]
|
||||||
pub enum RevertString {
|
pub enum RevertString {
|
||||||
#[default]
|
#[default]
|
||||||
Default,
|
Default,
|
||||||
Debug,
|
Debug,
|
||||||
Strip,
|
Strip,
|
||||||
VerboseDebug,
|
VerboseDebug,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,24 +2,24 @@
|
|||||||
//! compiling contracts to PolkaVM (PVM) bytecode.
|
//! compiling contracts to PolkaVM (PVM) bytecode.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
process::Stdio,
|
process::Stdio,
|
||||||
sync::{Arc, LazyLock},
|
sync::{Arc, LazyLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration};
|
||||||
use revive_solc_json_interface::{
|
use revive_solc_json_interface::{
|
||||||
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings,
|
||||||
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection,
|
||||||
SolcStandardJsonOutput,
|
SolcStandardJsonOutput,
|
||||||
};
|
};
|
||||||
use tracing::{Span, field::display};
|
use tracing::{Span, field::display};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use alloy::json_abi::JsonAbi;
|
use alloy::json_abi::JsonAbi;
|
||||||
@@ -33,55 +33,52 @@ pub struct Resolc(Arc<ResolcInner>);
|
|||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
struct ResolcInner {
|
struct ResolcInner {
|
||||||
/// The internal solc compiler that the resolc compiler uses as a compiler frontend.
|
/// The internal solc compiler that the resolc compiler uses as a compiler frontend.
|
||||||
solc: Solc,
|
solc: Solc,
|
||||||
/// Path to the `resolc` executable
|
/// Path to the `resolc` executable
|
||||||
resolc_path: PathBuf,
|
resolc_path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Resolc {
|
impl Resolc {
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
context: impl AsRef<SolcConfiguration>
|
context: impl AsRef<SolcConfiguration>
|
||||||
+ AsRef<ResolcConfiguration>
|
+ AsRef<ResolcConfiguration>
|
||||||
+ AsRef<WorkingDirectoryConfiguration>,
|
+ AsRef<WorkingDirectoryConfiguration>,
|
||||||
version: impl Into<Option<VersionOrRequirement>>,
|
version: impl Into<Option<VersionOrRequirement>>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
|
/// This is a cache of all of the resolc compiler objects. Since we do not currently support
|
||||||
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
|
/// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and
|
||||||
/// its version to the resolc compiler.
|
/// its version to the resolc compiler.
|
||||||
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
|
static COMPILERS_CACHE: LazyLock<DashMap<Solc, Resolc>> = LazyLock::new(Default::default);
|
||||||
|
|
||||||
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
|
let resolc_configuration = AsRef::<ResolcConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
let solc = Solc::new(&context, version)
|
let solc = Solc::new(&context, version)
|
||||||
.await
|
.await
|
||||||
.context("Failed to create the solc compiler frontend for resolc")?;
|
.context("Failed to create the solc compiler frontend for resolc")?;
|
||||||
|
|
||||||
Ok(COMPILERS_CACHE
|
Ok(COMPILERS_CACHE
|
||||||
.entry(solc.clone())
|
.entry(solc.clone())
|
||||||
.or_insert_with(|| {
|
.or_insert_with(|| {
|
||||||
Self(Arc::new(ResolcInner {
|
Self(Arc::new(ResolcInner { solc, resolc_path: resolc_configuration.path.clone() }))
|
||||||
solc,
|
})
|
||||||
resolc_path: resolc_configuration.path.clone(),
|
.clone())
|
||||||
}))
|
}
|
||||||
})
|
|
||||||
.clone())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SolidityCompiler for Resolc {
|
impl SolidityCompiler for Resolc {
|
||||||
fn version(&self) -> &Version {
|
fn version(&self) -> &Version {
|
||||||
// We currently return the solc compiler version since we do not support multiple resolc
|
// We currently return the solc compiler version since we do not support multiple resolc
|
||||||
// compiler versions.
|
// compiler versions.
|
||||||
SolidityCompiler::version(&self.0.solc)
|
SolidityCompiler::version(&self.0.solc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn path(&self) -> &std::path::Path {
|
fn path(&self) -> &std::path::Path {
|
||||||
&self.0.resolc_path
|
&self.0.resolc_path
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", ret)]
|
#[tracing::instrument(level = "debug", ret)]
|
||||||
#[tracing::instrument(
|
#[tracing::instrument(
|
||||||
level = "error",
|
level = "error",
|
||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
@@ -91,221 +88,216 @@ impl SolidityCompiler for Resolc {
|
|||||||
),
|
),
|
||||||
err(Debug)
|
err(Debug)
|
||||||
)]
|
)]
|
||||||
fn build(
|
fn build(
|
||||||
&self,
|
&self,
|
||||||
CompilerInput {
|
CompilerInput {
|
||||||
pipeline,
|
pipeline,
|
||||||
optimization,
|
optimization,
|
||||||
evm_version,
|
evm_version,
|
||||||
allow_paths,
|
allow_paths,
|
||||||
base_path,
|
base_path,
|
||||||
sources,
|
sources,
|
||||||
libraries,
|
libraries,
|
||||||
// TODO: this is currently not being handled since there is no way to pass it into
|
// TODO: this is currently not being handled since there is no way to pass it into
|
||||||
// resolc. So, we need to go back to this later once it's supported.
|
// resolc. So, we need to go back to this later once it's supported.
|
||||||
revert_string_handling: _,
|
revert_string_handling: _,
|
||||||
}: CompilerInput,
|
}: CompilerInput,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
|
if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) {
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
|
"Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let input = SolcStandardJsonInput {
|
let input = SolcStandardJsonInput {
|
||||||
language: SolcStandardJsonInputLanguage::Solidity,
|
language: SolcStandardJsonInputLanguage::Solidity,
|
||||||
sources: sources
|
sources: sources
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(path, source)| (path.display().to_string(), source.into()))
|
.map(|(path, source)| (path.display().to_string(), source.into()))
|
||||||
.collect(),
|
.collect(),
|
||||||
settings: SolcStandardJsonInputSettings {
|
settings: SolcStandardJsonInputSettings {
|
||||||
evm_version,
|
evm_version,
|
||||||
libraries: Some(
|
libraries: Some(
|
||||||
libraries
|
libraries
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(source_code, libraries_map)| {
|
.map(|(source_code, libraries_map)| {
|
||||||
(
|
(
|
||||||
source_code.display().to_string(),
|
source_code.display().to_string(),
|
||||||
libraries_map
|
libraries_map
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(library_ident, library_address)| {
|
.map(|(library_ident, library_address)| {
|
||||||
(library_ident, library_address.to_string())
|
(library_ident, library_address.to_string())
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
),
|
),
|
||||||
remappings: None,
|
remappings: None,
|
||||||
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
|
||||||
via_ir: Some(true),
|
via_ir: Some(true),
|
||||||
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
optimizer: SolcStandardJsonInputSettingsOptimizer::new(
|
||||||
optimization
|
optimization.unwrap_or(ModeOptimizerSetting::M0).optimizations_enabled(),
|
||||||
.unwrap_or(ModeOptimizerSetting::M0)
|
None,
|
||||||
.optimizations_enabled(),
|
&Version::new(0, 0, 0),
|
||||||
None,
|
false,
|
||||||
&Version::new(0, 0, 0),
|
),
|
||||||
false,
|
metadata: None,
|
||||||
),
|
polkavm: None,
|
||||||
metadata: None,
|
},
|
||||||
polkavm: None,
|
};
|
||||||
},
|
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||||
};
|
|
||||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
|
||||||
|
|
||||||
let path = &self.0.resolc_path;
|
let path = &self.0.resolc_path;
|
||||||
let mut command = AsyncCommand::new(path);
|
let mut command = AsyncCommand::new(path);
|
||||||
command
|
command
|
||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::piped())
|
||||||
.arg("--solc")
|
.arg("--solc")
|
||||||
.arg(self.0.solc.path())
|
.arg(self.0.solc.path())
|
||||||
.arg("--standard-json");
|
.arg("--standard-json");
|
||||||
|
|
||||||
if let Some(ref base_path) = base_path {
|
if let Some(ref base_path) = base_path {
|
||||||
command.arg("--base-path").arg(base_path);
|
command.arg("--base-path").arg(base_path);
|
||||||
}
|
}
|
||||||
if !allow_paths.is_empty() {
|
if !allow_paths.is_empty() {
|
||||||
command.arg("--allow-paths").arg(
|
command.arg("--allow-paths").arg(
|
||||||
allow_paths
|
allow_paths
|
||||||
.iter()
|
.iter()
|
||||||
.map(|path| path.display().to_string())
|
.map(|path| path.display().to_string())
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(","),
|
.join(","),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
let mut child = command
|
let mut child = command
|
||||||
.spawn()
|
.spawn()
|
||||||
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
.with_context(|| format!("Failed to spawn resolc at {}", path.display()))?;
|
||||||
|
|
||||||
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped");
|
||||||
let serialized_input = serde_json::to_vec(&input)
|
let serialized_input = serde_json::to_vec(&input)
|
||||||
.context("Failed to serialize Standard JSON input for resolc")?;
|
.context("Failed to serialize Standard JSON input for resolc")?;
|
||||||
stdin_pipe
|
stdin_pipe
|
||||||
.write_all(&serialized_input)
|
.write_all(&serialized_input)
|
||||||
.await
|
.await
|
||||||
.context("Failed to write Standard JSON to resolc stdin")?;
|
.context("Failed to write Standard JSON to resolc stdin")?;
|
||||||
|
|
||||||
let output = child
|
let output = child
|
||||||
.wait_with_output()
|
.wait_with_output()
|
||||||
.await
|
.await
|
||||||
.context("Failed while waiting for resolc process to finish")?;
|
.context("Failed while waiting for resolc process to finish")?;
|
||||||
let stdout = output.stdout;
|
let stdout = output.stdout;
|
||||||
let stderr = output.stderr;
|
let stderr = output.stderr;
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
let json_in = serde_json::to_string_pretty(&input)
|
let json_in = serde_json::to_string_pretty(&input)
|
||||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||||
let message = String::from_utf8_lossy(&stderr);
|
let message = String::from_utf8_lossy(&stderr);
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
status = %output.status,
|
status = %output.status,
|
||||||
message = %message,
|
message = %message,
|
||||||
json_input = json_in,
|
json_input = json_in,
|
||||||
"Compilation using resolc failed"
|
"Compilation using resolc failed"
|
||||||
);
|
);
|
||||||
anyhow::bail!("Compilation failed with an error: {message}");
|
anyhow::bail!("Compilation failed with an error: {message}");
|
||||||
}
|
}
|
||||||
|
|
||||||
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
let parsed = serde_json::from_slice::<SolcStandardJsonOutput>(&stdout)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
anyhow::anyhow!(
|
anyhow::anyhow!(
|
||||||
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
"failed to parse resolc JSON output: {e}\nstderr: {}",
|
||||||
String::from_utf8_lossy(&stderr)
|
String::from_utf8_lossy(&stderr)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.context("Failed to parse resolc standard JSON output")?;
|
.context("Failed to parse resolc standard JSON output")?;
|
||||||
|
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
output = %serde_json::to_string(&parsed).unwrap(),
|
output = %serde_json::to_string(&parsed).unwrap(),
|
||||||
"Compiled successfully"
|
"Compiled successfully"
|
||||||
);
|
);
|
||||||
|
|
||||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||||
// errors instead of returning the compiler output that might contain errors.
|
// errors instead of returning the compiler output that might contain errors.
|
||||||
for error in parsed.errors.iter().flatten() {
|
for error in parsed.errors.iter().flatten() {
|
||||||
if error.severity == "error" {
|
if error.severity == "error" {
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
?error,
|
?error,
|
||||||
?input,
|
?input,
|
||||||
output = %serde_json::to_string(&parsed).unwrap(),
|
output = %serde_json::to_string(&parsed).unwrap(),
|
||||||
"Encountered an error in the compilation"
|
"Encountered an error in the compilation"
|
||||||
);
|
);
|
||||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(contracts) = parsed.contracts else {
|
let Some(contracts) = parsed.contracts else {
|
||||||
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section");
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut compiler_output = CompilerOutput::default();
|
let mut compiler_output = CompilerOutput::default();
|
||||||
for (source_path, contracts) in contracts.into_iter() {
|
for (source_path, contracts) in contracts.into_iter() {
|
||||||
let src_for_msg = source_path.clone();
|
let src_for_msg = source_path.clone();
|
||||||
let source_path = PathBuf::from(source_path)
|
let source_path = PathBuf::from(source_path)
|
||||||
.canonicalize()
|
.canonicalize()
|
||||||
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
|
.with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?;
|
||||||
|
|
||||||
let map = compiler_output.contracts.entry(source_path).or_default();
|
let map = compiler_output.contracts.entry(source_path).or_default();
|
||||||
for (contract_name, contract_information) in contracts.into_iter() {
|
for (contract_name, contract_information) in contracts.into_iter() {
|
||||||
let bytecode = contract_information
|
let bytecode = contract_information
|
||||||
.evm
|
.evm
|
||||||
.and_then(|evm| evm.bytecode.clone())
|
.and_then(|evm| evm.bytecode.clone())
|
||||||
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
.context("Unexpected - Contract compiled with resolc has no bytecode")?;
|
||||||
let abi = {
|
let abi = {
|
||||||
let metadata = contract_information
|
let metadata = contract_information
|
||||||
.metadata
|
.metadata
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.context("No metadata found for the contract")?;
|
.context("No metadata found for the contract")?;
|
||||||
let solc_metadata_str = match metadata {
|
let solc_metadata_str = match metadata {
|
||||||
serde_json::Value::String(solc_metadata_str) => {
|
serde_json::Value::String(solc_metadata_str) =>
|
||||||
solc_metadata_str.as_str()
|
solc_metadata_str.as_str(),
|
||||||
}
|
serde_json::Value::Object(metadata_object) => {
|
||||||
serde_json::Value::Object(metadata_object) => {
|
let solc_metadata_value = metadata_object
|
||||||
let solc_metadata_value = metadata_object
|
.get("solc_metadata")
|
||||||
.get("solc_metadata")
|
.context("Contract doesn't have a 'solc_metadata' field")?;
|
||||||
.context("Contract doesn't have a 'solc_metadata' field")?;
|
solc_metadata_value
|
||||||
solc_metadata_value
|
.as_str()
|
||||||
.as_str()
|
.context("The 'solc_metadata' field is not a string")?
|
||||||
.context("The 'solc_metadata' field is not a string")?
|
},
|
||||||
}
|
serde_json::Value::Null |
|
||||||
serde_json::Value::Null
|
serde_json::Value::Bool(_) |
|
||||||
| serde_json::Value::Bool(_)
|
serde_json::Value::Number(_) |
|
||||||
| serde_json::Value::Number(_)
|
serde_json::Value::Array(_) => {
|
||||||
| serde_json::Value::Array(_) => {
|
anyhow::bail!("Unsupported type of metadata {metadata:?}")
|
||||||
anyhow::bail!("Unsupported type of metadata {metadata:?}")
|
},
|
||||||
}
|
};
|
||||||
};
|
let solc_metadata =
|
||||||
let solc_metadata = serde_json::from_str::<serde_json::Value>(
|
serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
|
||||||
solc_metadata_str,
|
"Failed to deserialize the solc_metadata as a serde_json generic value",
|
||||||
)
|
)?;
|
||||||
.context(
|
let output_value = solc_metadata
|
||||||
"Failed to deserialize the solc_metadata as a serde_json generic value",
|
.get("output")
|
||||||
)?;
|
.context("solc_metadata doesn't have an output field")?;
|
||||||
let output_value = solc_metadata
|
let abi_value = output_value
|
||||||
.get("output")
|
.get("abi")
|
||||||
.context("solc_metadata doesn't have an output field")?;
|
.context("solc_metadata output doesn't contain an abi field")?;
|
||||||
let abi_value = output_value
|
serde_json::from_value::<JsonAbi>(abi_value.clone())
|
||||||
.get("abi")
|
.context("ABI found in solc_metadata output is not valid ABI")?
|
||||||
.context("solc_metadata output doesn't contain an abi field")?;
|
};
|
||||||
serde_json::from_value::<JsonAbi>(abi_value.clone())
|
map.insert(contract_name, (bytecode.object, abi));
|
||||||
.context("ABI found in solc_metadata output is not valid ABI")?
|
}
|
||||||
};
|
}
|
||||||
map.insert(contract_name, (bytecode.object, abi));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(compiler_output)
|
Ok(compiler_output)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn supports_mode(
|
fn supports_mode(
|
||||||
&self,
|
&self,
|
||||||
optimize_setting: ModeOptimizerSetting,
|
optimize_setting: ModeOptimizerSetting,
|
||||||
pipeline: ModePipeline,
|
pipeline: ModePipeline,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
pipeline == ModePipeline::ViaYulIR
|
pipeline == ModePipeline::ViaYulIR &&
|
||||||
&& SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline)
|
SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
+234
-242
@@ -2,10 +2,10 @@
|
|||||||
//! compiling contracts to EVM bytecode.
|
//! compiling contracts to EVM bytecode.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
process::Stdio,
|
process::Stdio,
|
||||||
sync::{Arc, LazyLock},
|
sync::{Arc, LazyLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
@@ -18,11 +18,10 @@ use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, S
|
|||||||
|
|
||||||
use anyhow::{Context as _, Result};
|
use anyhow::{Context as _, Result};
|
||||||
use foundry_compilers_artifacts::{
|
use foundry_compilers_artifacts::{
|
||||||
output_selection::{
|
output_selection::{
|
||||||
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
|
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
|
||||||
},
|
},
|
||||||
solc::CompilerOutput as SolcOutput,
|
solc::{CompilerOutput as SolcOutput, *},
|
||||||
solc::*,
|
|
||||||
};
|
};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
|
||||||
@@ -32,268 +31,261 @@ pub struct Solc(Arc<SolcInner>);
|
|||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
struct SolcInner {
|
struct SolcInner {
|
||||||
/// The path of the solidity compiler executable that this object uses.
|
/// The path of the solidity compiler executable that this object uses.
|
||||||
solc_path: PathBuf,
|
solc_path: PathBuf,
|
||||||
/// The version of the solidity compiler executable that this object uses.
|
/// The version of the solidity compiler executable that this object uses.
|
||||||
solc_version: Version,
|
solc_version: Version,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Solc {
|
impl Solc {
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>,
|
context: impl AsRef<SolcConfiguration> + AsRef<WorkingDirectoryConfiguration>,
|
||||||
version: impl Into<Option<VersionOrRequirement>>,
|
version: impl Into<Option<VersionOrRequirement>>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
// This is a cache for the compiler objects so that whenever the same compiler version is
|
// This is a cache for the compiler objects so that whenever the same compiler version is
|
||||||
// requested the same object is returned. We do this as we do not want to keep cloning the
|
// requested the same object is returned. We do this as we do not want to keep cloning the
|
||||||
// compiler around.
|
// compiler around.
|
||||||
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
|
static COMPILERS_CACHE: LazyLock<DashMap<(PathBuf, Version), Solc>> =
|
||||||
LazyLock::new(Default::default);
|
LazyLock::new(Default::default);
|
||||||
|
|
||||||
let working_directory_configuration =
|
let working_directory_configuration =
|
||||||
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
|
||||||
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
|
let solc_configuration = AsRef::<SolcConfiguration>::as_ref(&context);
|
||||||
|
|
||||||
// We attempt to download the solc binary. Note the following: this call does the version
|
// We attempt to download the solc binary. Note the following: this call does the version
|
||||||
// resolution for us. Therefore, even if the download didn't proceed, this function will
|
// resolution for us. Therefore, even if the download didn't proceed, this function will
|
||||||
// resolve the version requirement into a canonical version of the compiler. It's then up
|
// resolve the version requirement into a canonical version of the compiler. It's then up
|
||||||
// to us to either use the provided path or not.
|
// to us to either use the provided path or not.
|
||||||
let version = version
|
let version = version.into().unwrap_or_else(|| solc_configuration.version.clone().into());
|
||||||
.into()
|
let (version, path) =
|
||||||
.unwrap_or_else(|| solc_configuration.version.clone().into());
|
download_solc(working_directory_configuration.as_path(), version, false)
|
||||||
let (version, path) =
|
.await
|
||||||
download_solc(working_directory_configuration.as_path(), version, false)
|
.context("Failed to download/get path to solc binary")?;
|
||||||
.await
|
|
||||||
.context("Failed to download/get path to solc binary")?;
|
|
||||||
|
|
||||||
Ok(COMPILERS_CACHE
|
Ok(COMPILERS_CACHE
|
||||||
.entry((path.clone(), version.clone()))
|
.entry((path.clone(), version.clone()))
|
||||||
.or_insert_with(|| {
|
.or_insert_with(|| {
|
||||||
info!(
|
info!(
|
||||||
solc_path = %path.display(),
|
solc_path = %path.display(),
|
||||||
solc_version = %version,
|
solc_version = %version,
|
||||||
"Created a new solc compiler object"
|
"Created a new solc compiler object"
|
||||||
);
|
);
|
||||||
Self(Arc::new(SolcInner {
|
Self(Arc::new(SolcInner { solc_path: path, solc_version: version }))
|
||||||
solc_path: path,
|
})
|
||||||
solc_version: version,
|
.clone())
|
||||||
}))
|
}
|
||||||
})
|
|
||||||
.clone())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SolidityCompiler for Solc {
|
impl SolidityCompiler for Solc {
|
||||||
fn version(&self) -> &Version {
|
fn version(&self) -> &Version {
|
||||||
&self.0.solc_version
|
&self.0.solc_version
|
||||||
}
|
}
|
||||||
|
|
||||||
fn path(&self) -> &std::path::Path {
|
fn path(&self) -> &std::path::Path {
|
||||||
&self.0.solc_path
|
&self.0.solc_path
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", ret)]
|
#[tracing::instrument(level = "debug", ret)]
|
||||||
#[tracing::instrument(
|
#[tracing::instrument(
|
||||||
level = "error",
|
level = "error",
|
||||||
skip_all,
|
skip_all,
|
||||||
fields(json_in = tracing::field::Empty),
|
fields(json_in = tracing::field::Empty),
|
||||||
err(Debug)
|
err(Debug)
|
||||||
)]
|
)]
|
||||||
fn build(
|
fn build(
|
||||||
&self,
|
&self,
|
||||||
CompilerInput {
|
CompilerInput {
|
||||||
pipeline,
|
pipeline,
|
||||||
optimization,
|
optimization,
|
||||||
evm_version,
|
evm_version,
|
||||||
allow_paths,
|
allow_paths,
|
||||||
base_path,
|
base_path,
|
||||||
sources,
|
sources,
|
||||||
libraries,
|
libraries,
|
||||||
revert_string_handling,
|
revert_string_handling,
|
||||||
}: CompilerInput,
|
}: CompilerInput,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
) -> Pin<Box<dyn Future<Output = Result<CompilerOutput>> + '_>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
// Be careful to entirely omit the viaIR field if the compiler does not support it,
|
// Be careful to entirely omit the viaIR field if the compiler does not support it,
|
||||||
// as it will error if you provide fields it does not know about. Because
|
// as it will error if you provide fields it does not know about. Because
|
||||||
// `supports_mode` is called prior to instantiating a compiler, we should never
|
// `supports_mode` is called prior to instantiating a compiler, we should never
|
||||||
// ask for something which is invalid.
|
// ask for something which is invalid.
|
||||||
let via_ir = match (pipeline, self.compiler_supports_yul()) {
|
let via_ir = match (pipeline, self.compiler_supports_yul()) {
|
||||||
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
|
(pipeline, true) => pipeline.map(|p| p.via_yul_ir()),
|
||||||
(_pipeline, false) => None,
|
(_pipeline, false) => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let input = SolcInput {
|
let input = SolcInput {
|
||||||
language: SolcLanguage::Solidity,
|
language: SolcLanguage::Solidity,
|
||||||
sources: Sources(
|
sources: Sources(
|
||||||
sources
|
sources
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(source_path, source_code)| (source_path, Source::new(source_code)))
|
.map(|(source_path, source_code)| (source_path, Source::new(source_code)))
|
||||||
.collect(),
|
.collect(),
|
||||||
),
|
),
|
||||||
settings: Settings {
|
settings: Settings {
|
||||||
optimizer: Optimizer {
|
optimizer: Optimizer {
|
||||||
enabled: optimization.map(|o| o.optimizations_enabled()),
|
enabled: optimization.map(|o| o.optimizations_enabled()),
|
||||||
details: Some(Default::default()),
|
details: Some(Default::default()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
output_selection: OutputSelection::common_output_selection(
|
output_selection: OutputSelection::common_output_selection(
|
||||||
[
|
[
|
||||||
ContractOutputSelection::Abi,
|
ContractOutputSelection::Abi,
|
||||||
ContractOutputSelection::Evm(EvmOutputSelection::ByteCode(
|
ContractOutputSelection::Evm(EvmOutputSelection::ByteCode(
|
||||||
BytecodeOutputSelection::Object,
|
BytecodeOutputSelection::Object,
|
||||||
)),
|
)),
|
||||||
]
|
]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|item| item.to_string()),
|
.map(|item| item.to_string()),
|
||||||
),
|
),
|
||||||
evm_version: evm_version.map(|version| version.to_string().parse().unwrap()),
|
evm_version: evm_version.map(|version| version.to_string().parse().unwrap()),
|
||||||
via_ir,
|
via_ir,
|
||||||
libraries: Libraries {
|
libraries: Libraries {
|
||||||
libs: libraries
|
libs: libraries
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(file_path, libraries)| {
|
.map(|(file_path, libraries)| {
|
||||||
(
|
(
|
||||||
file_path,
|
file_path,
|
||||||
libraries
|
libraries
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(library_name, library_address)| {
|
.map(|(library_name, library_address)| {
|
||||||
(library_name, library_address.to_string())
|
(library_name, library_address.to_string())
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
},
|
},
|
||||||
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
|
debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings {
|
||||||
revert_strings: match revert_string_handling {
|
revert_strings: match revert_string_handling {
|
||||||
crate::RevertString::Default => Some(RevertStrings::Default),
|
crate::RevertString::Default => Some(RevertStrings::Default),
|
||||||
crate::RevertString::Debug => Some(RevertStrings::Debug),
|
crate::RevertString::Debug => Some(RevertStrings::Debug),
|
||||||
crate::RevertString::Strip => Some(RevertStrings::Strip),
|
crate::RevertString::Strip => Some(RevertStrings::Strip),
|
||||||
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
|
crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug),
|
||||||
},
|
},
|
||||||
debug_info: Default::default(),
|
debug_info: Default::default(),
|
||||||
}),
|
}),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
Span::current().record("json_in", display(serde_json::to_string(&input).unwrap()));
|
||||||
|
|
||||||
let path = &self.0.solc_path;
|
let path = &self.0.solc_path;
|
||||||
let mut command = AsyncCommand::new(path);
|
let mut command = AsyncCommand::new(path);
|
||||||
command
|
command
|
||||||
.stdin(Stdio::piped())
|
.stdin(Stdio::piped())
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::null())
|
.stderr(Stdio::null())
|
||||||
.arg("--standard-json");
|
.arg("--standard-json");
|
||||||
|
|
||||||
if let Some(ref base_path) = base_path {
|
if let Some(ref base_path) = base_path {
|
||||||
command.arg("--base-path").arg(base_path);
|
command.arg("--base-path").arg(base_path);
|
||||||
}
|
}
|
||||||
if !allow_paths.is_empty() {
|
if !allow_paths.is_empty() {
|
||||||
command.arg("--allow-paths").arg(
|
command.arg("--allow-paths").arg(
|
||||||
allow_paths
|
allow_paths
|
||||||
.iter()
|
.iter()
|
||||||
.map(|path| path.display().to_string())
|
.map(|path| path.display().to_string())
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(","),
|
.join(","),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
let mut child = command
|
let mut child = command
|
||||||
.spawn()
|
.spawn()
|
||||||
.with_context(|| format!("Failed to spawn solc at {}", path.display()))?;
|
.with_context(|| format!("Failed to spawn solc at {}", path.display()))?;
|
||||||
|
|
||||||
let stdin = child.stdin.as_mut().expect("should be piped");
|
let stdin = child.stdin.as_mut().expect("should be piped");
|
||||||
let serialized_input = serde_json::to_vec(&input)
|
let serialized_input = serde_json::to_vec(&input)
|
||||||
.context("Failed to serialize Standard JSON input for solc")?;
|
.context("Failed to serialize Standard JSON input for solc")?;
|
||||||
stdin
|
stdin
|
||||||
.write_all(&serialized_input)
|
.write_all(&serialized_input)
|
||||||
.await
|
.await
|
||||||
.context("Failed to write Standard JSON to solc stdin")?;
|
.context("Failed to write Standard JSON to solc stdin")?;
|
||||||
let output = child
|
let output = child
|
||||||
.wait_with_output()
|
.wait_with_output()
|
||||||
.await
|
.await
|
||||||
.context("Failed while waiting for solc process to finish")?;
|
.context("Failed while waiting for solc process to finish")?;
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
let json_in = serde_json::to_string_pretty(&input)
|
let json_in = serde_json::to_string_pretty(&input)
|
||||||
.context("Failed to pretty-print Standard JSON input for logging")?;
|
.context("Failed to pretty-print Standard JSON input for logging")?;
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
status = %output.status,
|
status = %output.status,
|
||||||
json_input = json_in,
|
json_input = json_in,
|
||||||
"Compilation using solc failed"
|
"Compilation using solc failed"
|
||||||
);
|
);
|
||||||
anyhow::bail!("Compilation failed");
|
anyhow::bail!("Compilation failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
let parsed = serde_json::from_slice::<SolcOutput>(&output.stdout)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
anyhow::anyhow!(
|
anyhow::anyhow!(
|
||||||
"failed to parse resolc JSON output: {e}\nstdout: {}",
|
"failed to parse resolc JSON output: {e}\nstdout: {}",
|
||||||
String::from_utf8_lossy(&output.stdout)
|
String::from_utf8_lossy(&output.stdout)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.context("Failed to parse solc standard JSON output")?;
|
.context("Failed to parse solc standard JSON output")?;
|
||||||
|
|
||||||
// Detecting if the compiler output contained errors and reporting them through logs and
|
// Detecting if the compiler output contained errors and reporting them through logs and
|
||||||
// errors instead of returning the compiler output that might contain errors.
|
// errors instead of returning the compiler output that might contain errors.
|
||||||
for error in parsed.errors.iter() {
|
for error in parsed.errors.iter() {
|
||||||
if error.severity == Severity::Error {
|
if error.severity == Severity::Error {
|
||||||
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
tracing::error!(?error, ?input, "Encountered an error in the compilation");
|
||||||
anyhow::bail!("Encountered an error in the compilation: {error}")
|
anyhow::bail!("Encountered an error in the compilation: {error}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
output = %String::from_utf8_lossy(&output.stdout).to_string(),
|
output = %String::from_utf8_lossy(&output.stdout).to_string(),
|
||||||
"Compiled successfully"
|
"Compiled successfully"
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut compiler_output = CompilerOutput::default();
|
let mut compiler_output = CompilerOutput::default();
|
||||||
for (contract_path, contracts) in parsed.contracts {
|
for (contract_path, contracts) in parsed.contracts {
|
||||||
let map = compiler_output
|
let map = compiler_output
|
||||||
.contracts
|
.contracts
|
||||||
.entry(contract_path.canonicalize().with_context(|| {
|
.entry(contract_path.canonicalize().with_context(|| {
|
||||||
format!(
|
format!("Failed to canonicalize contract path {}", contract_path.display())
|
||||||
"Failed to canonicalize contract path {}",
|
})?)
|
||||||
contract_path.display()
|
.or_default();
|
||||||
)
|
for (contract_name, contract_info) in contracts.into_iter() {
|
||||||
})?)
|
let source_code = contract_info
|
||||||
.or_default();
|
.evm
|
||||||
for (contract_name, contract_info) in contracts.into_iter() {
|
.and_then(|evm| evm.bytecode)
|
||||||
let source_code = contract_info
|
.map(|bytecode| match bytecode.object {
|
||||||
.evm
|
BytecodeObject::Bytecode(bytecode) => bytecode.to_string(),
|
||||||
.and_then(|evm| evm.bytecode)
|
BytecodeObject::Unlinked(unlinked) => unlinked,
|
||||||
.map(|bytecode| match bytecode.object {
|
})
|
||||||
BytecodeObject::Bytecode(bytecode) => bytecode.to_string(),
|
.context("Unexpected - contract compiled with solc has no source code")?;
|
||||||
BytecodeObject::Unlinked(unlinked) => unlinked,
|
let abi = contract_info
|
||||||
})
|
.abi
|
||||||
.context("Unexpected - contract compiled with solc has no source code")?;
|
.context("Unexpected - contract compiled with solc as no ABI")?;
|
||||||
let abi = contract_info
|
map.insert(contract_name, (source_code, abi));
|
||||||
.abi
|
}
|
||||||
.context("Unexpected - contract compiled with solc as no ABI")?;
|
}
|
||||||
map.insert(contract_name, (source_code, abi));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(compiler_output)
|
Ok(compiler_output)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn supports_mode(
|
fn supports_mode(
|
||||||
&self,
|
&self,
|
||||||
_optimize_setting: ModeOptimizerSetting,
|
_optimize_setting: ModeOptimizerSetting,
|
||||||
pipeline: ModePipeline,
|
pipeline: ModePipeline,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E
|
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support
|
||||||
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough.
|
// mode E (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler
|
||||||
pipeline == ModePipeline::ViaEVMAssembly
|
// is new enough.
|
||||||
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
|
pipeline == ModePipeline::ViaEVMAssembly ||
|
||||||
}
|
(pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Solc {
|
impl Solc {
|
||||||
fn compiler_supports_yul(&self) -> bool {
|
fn compiler_supports_yul(&self) -> bool {
|
||||||
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
|
const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13);
|
||||||
SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
|
SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,10 +7,7 @@ pragma solidity >=0.6.9;
|
|||||||
import "./callable.sol";
|
import "./callable.sol";
|
||||||
|
|
||||||
contract Main {
|
contract Main {
|
||||||
function main(
|
function main(uint[1] calldata p1, Callable callable) public returns(uint) {
|
||||||
uint[1] calldata p1,
|
|
||||||
Callable callable
|
|
||||||
) public pure returns (uint) {
|
|
||||||
return callable.f(p1);
|
return callable.f(p1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,82 +7,82 @@ use semver::Version;
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn contracts_can_be_compiled_with_solc() {
|
async fn contracts_can_be_compiled_with_solc() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let args = TestExecutionContext::default();
|
let args = TestExecutionContext::default();
|
||||||
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let output = Compiler::new()
|
let output = Compiler::new()
|
||||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_source("./tests/assets/array_one_element/main.sol")
|
.with_source("./tests/assets/array_one_element/main.sol")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.try_build(&solc)
|
.try_build(&solc)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
let output = output.expect("Failed to compile");
|
let output = output.expect("Failed to compile");
|
||||||
assert_eq!(output.contracts.len(), 2);
|
assert_eq!(output.contracts.len(), 2);
|
||||||
|
|
||||||
let main_file_contracts = output
|
let main_file_contracts = output
|
||||||
.contracts
|
.contracts
|
||||||
.get(
|
.get(
|
||||||
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||||
.canonicalize()
|
.canonicalize()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let callable_file_contracts = output
|
let callable_file_contracts = output
|
||||||
.contracts
|
.contracts
|
||||||
.get(
|
.get(
|
||||||
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||||
.canonicalize()
|
.canonicalize()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(main_file_contracts.contains_key("Main"));
|
assert!(main_file_contracts.contains_key("Main"));
|
||||||
assert!(callable_file_contracts.contains_key("Callable"));
|
assert!(callable_file_contracts.contains_key("Callable"));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn contracts_can_be_compiled_with_resolc() {
|
async fn contracts_can_be_compiled_with_resolc() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let args = TestExecutionContext::default();
|
let args = TestExecutionContext::default();
|
||||||
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30)))
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let output = Compiler::new()
|
let output = Compiler::new()
|
||||||
.with_source("./tests/assets/array_one_element/callable.sol")
|
.with_source("./tests/assets/array_one_element/callable.sol")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_source("./tests/assets/array_one_element/main.sol")
|
.with_source("./tests/assets/array_one_element/main.sol")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.try_build(&resolc)
|
.try_build(&resolc)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
let output = output.expect("Failed to compile");
|
let output = output.expect("Failed to compile");
|
||||||
assert_eq!(output.contracts.len(), 2);
|
assert_eq!(output.contracts.len(), 2);
|
||||||
|
|
||||||
let main_file_contracts = output
|
let main_file_contracts = output
|
||||||
.contracts
|
.contracts
|
||||||
.get(
|
.get(
|
||||||
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
&PathBuf::from("./tests/assets/array_one_element/main.sol")
|
||||||
.canonicalize()
|
.canonicalize()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let callable_file_contracts = output
|
let callable_file_contracts = output
|
||||||
.contracts
|
.contracts
|
||||||
.get(
|
.get(
|
||||||
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
&PathBuf::from("./tests/assets/array_one_element/callable.sol")
|
||||||
.canonicalize()
|
.canonicalize()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(main_file_contracts.contains_key("Main"));
|
assert!(main_file_contracts.contains_key("Main"));
|
||||||
assert!(callable_file_contracts.contains_key("Callable"));
|
assert!(callable_file_contracts.contains_key("Callable"));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ semver = { workspace = true }
|
|||||||
temp-dir = { workspace = true }
|
temp-dir = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
serde_with = { workspace = true }
|
|
||||||
strum = { workspace = true }
|
strum = { workspace = true }
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
|
|||||||
+613
-838
File diff suppressed because it is too large
Load Diff
@@ -21,7 +21,6 @@ revive-dt-node = { workspace = true }
|
|||||||
revive-dt-node-interaction = { workspace = true }
|
revive-dt-node-interaction = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
revive-dt-report = { workspace = true }
|
||||||
|
|
||||||
ansi_term = { workspace = true }
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
bson = { workspace = true }
|
bson = { workspace = true }
|
||||||
@@ -37,7 +36,6 @@ schemars = { workspace = true }
|
|||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
subxt = { workspace = true }
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -6,189 +6,165 @@ use anyhow::Context as _;
|
|||||||
use futures::{FutureExt, StreamExt};
|
use futures::{FutureExt, StreamExt};
|
||||||
use revive_dt_common::types::PrivateKeyAllocator;
|
use revive_dt_common::types::PrivateKeyAllocator;
|
||||||
use revive_dt_core::Platform;
|
use revive_dt_core::Platform;
|
||||||
use revive_dt_format::{
|
use revive_dt_format::steps::{Step, StepIdx, StepPath};
|
||||||
corpus::Corpus,
|
|
||||||
steps::{Step, StepIdx, StepPath},
|
|
||||||
};
|
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
use tracing::{Instrument, error, info, info_span, instrument, warn};
|
use tracing::{error, info, info_span, instrument, warn};
|
||||||
|
|
||||||
use revive_dt_config::{BenchmarkingContext, Context};
|
use revive_dt_config::{BenchmarkingContext, Context};
|
||||||
use revive_dt_report::Reporter;
|
use revive_dt_report::Reporter;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
differential_benchmarks::{Driver, Watcher, WatcherEvent},
|
differential_benchmarks::{Driver, Watcher, WatcherEvent},
|
||||||
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
|
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Handles the differential testing executing it according to the information defined in the
|
/// Handles the differential testing executing it according to the information defined in the
|
||||||
/// context
|
/// context
|
||||||
#[instrument(level = "info", err(Debug), skip_all)]
|
#[instrument(level = "info", err(Debug), skip_all)]
|
||||||
pub async fn handle_differential_benchmarks(
|
pub async fn handle_differential_benchmarks(
|
||||||
mut context: BenchmarkingContext,
|
mut context: BenchmarkingContext,
|
||||||
reporter: Reporter,
|
reporter: Reporter,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
// A bit of a hack but we need to override the number of nodes specified through the CLI since
|
// A bit of a hack but we need to override the number of nodes specified through the CLI since
|
||||||
// benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to
|
// benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to
|
||||||
// do this. But, for the time being, we need to override the cli arguments.
|
// do this. But, for the time being, we need to override the cli arguments.
|
||||||
if context.concurrency_configuration.number_of_nodes != 1 {
|
if context.concurrency_configuration.number_of_nodes != 1 {
|
||||||
warn!(
|
warn!(
|
||||||
specified_number_of_nodes = context.concurrency_configuration.number_of_nodes,
|
specified_number_of_nodes = context.concurrency_configuration.number_of_nodes,
|
||||||
updated_number_of_nodes = 1,
|
updated_number_of_nodes = 1,
|
||||||
"Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments."
|
"Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments."
|
||||||
);
|
);
|
||||||
context.concurrency_configuration.number_of_nodes = 1;
|
context.concurrency_configuration.number_of_nodes = 1;
|
||||||
};
|
};
|
||||||
let full_context = Context::Benchmark(Box::new(context.clone()));
|
let full_context = Context::Benchmark(Box::new(context.clone()));
|
||||||
|
|
||||||
// Discover all of the metadata files that are defined in the context.
|
// Discover all of the metadata files that are defined in the context.
|
||||||
let corpus = context
|
let metadata_files = collect_metadata_files(&context)
|
||||||
.corpus_configuration
|
.context("Failed to collect metadata files for differential testing")?;
|
||||||
.test_specifiers
|
info!(len = metadata_files.len(), "Discovered metadata files");
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.try_fold(Corpus::default(), Corpus::with_test_specifier)
|
|
||||||
.context("Failed to parse the test corpus")?;
|
|
||||||
info!(
|
|
||||||
len = corpus.metadata_file_count(),
|
|
||||||
"Discovered metadata files"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Discover the list of platforms that the tests should run on based on the context.
|
// Discover the list of platforms that the tests should run on based on the context.
|
||||||
let platforms = context
|
let platforms = context
|
||||||
.platforms
|
.platforms
|
||||||
.iter()
|
.iter()
|
||||||
.copied()
|
.copied()
|
||||||
.map(Into::<&dyn Platform>::into)
|
.map(Into::<&dyn Platform>::into)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
// Starting the nodes of the various platforms specified in the context. Note that we use the
|
// Starting the nodes of the various platforms specified in the context. Note that we use the
|
||||||
// node pool since it contains all of the code needed to spawn nodes from A to Z and therefore
|
// node pool since it contains all of the code needed to spawn nodes from A to Z and therefore
|
||||||
// it's the preferred way for us to start nodes even when we're starting just a single node. The
|
// it's the preferred way for us to start nodes even when we're starting just a single node. The
|
||||||
// added overhead from it is quite small (performance wise) since it's involved only when we're
|
// added overhead from it is quite small (performance wise) since it's involved only when we're
|
||||||
// creating the test definitions, but it might have other maintenance overhead as it obscures
|
// creating the test definitions, but it might have other maintenance overhead as it obscures
|
||||||
// the fact that only a single node is spawned.
|
// the fact that only a single node is spawned.
|
||||||
let platforms_and_nodes = {
|
let platforms_and_nodes = {
|
||||||
let mut map = BTreeMap::new();
|
let mut map = BTreeMap::new();
|
||||||
|
|
||||||
for platform in platforms.iter() {
|
for platform in platforms.iter() {
|
||||||
let platform_identifier = platform.platform_identifier();
|
let platform_identifier = platform.platform_identifier();
|
||||||
|
|
||||||
let node_pool = NodePool::new(full_context.clone(), *platform)
|
let node_pool = NodePool::new(full_context.clone(), *platform)
|
||||||
.await
|
.await
|
||||||
.inspect_err(|err| {
|
.inspect_err(|err| {
|
||||||
error!(
|
error!(
|
||||||
?err,
|
?err,
|
||||||
%platform_identifier,
|
%platform_identifier,
|
||||||
"Failed to initialize the node pool for the platform."
|
"Failed to initialize the node pool for the platform."
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.context("Failed to initialize the node pool")?;
|
.context("Failed to initialize the node pool")?;
|
||||||
|
|
||||||
map.insert(platform_identifier, (*platform, node_pool));
|
map.insert(platform_identifier, (*platform, node_pool));
|
||||||
}
|
}
|
||||||
|
|
||||||
map
|
map
|
||||||
};
|
};
|
||||||
info!("Spawned the platform nodes");
|
info!("Spawned the platform nodes");
|
||||||
|
|
||||||
// Preparing test definitions for the execution.
|
// Preparing test definitions for the execution.
|
||||||
let test_definitions = create_test_definitions_stream(
|
let test_definitions = create_test_definitions_stream(
|
||||||
&full_context,
|
&full_context,
|
||||||
&corpus,
|
metadata_files.iter(),
|
||||||
&platforms_and_nodes,
|
&platforms_and_nodes,
|
||||||
None,
|
reporter.clone(),
|
||||||
reporter.clone(),
|
)
|
||||||
)
|
.await
|
||||||
.await
|
.collect::<Vec<_>>()
|
||||||
.collect::<Vec<_>>()
|
.await;
|
||||||
.await;
|
info!(len = test_definitions.len(), "Created test definitions");
|
||||||
info!(len = test_definitions.len(), "Created test definitions");
|
|
||||||
|
|
||||||
// Creating the objects that will be shared between the various runs. The cached compiler is the
|
// Creating the objects that will be shared between the various runs. The cached compiler is the
|
||||||
// only one at the current moment of time that's safe to share between runs.
|
// only one at the current moment of time that's safe to share between runs.
|
||||||
let cached_compiler = CachedCompiler::new(
|
let cached_compiler = CachedCompiler::new(
|
||||||
context
|
context.working_directory.as_path().join("compilation_cache"),
|
||||||
.working_directory
|
context.compilation_configuration.invalidate_compilation_cache,
|
||||||
.as_path()
|
)
|
||||||
.join("compilation_cache"),
|
.await
|
||||||
context
|
.map(Arc::new)
|
||||||
.compilation_configuration
|
.context("Failed to initialize cached compiler")?;
|
||||||
.invalidate_compilation_cache,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map(Arc::new)
|
|
||||||
.context("Failed to initialize cached compiler")?;
|
|
||||||
|
|
||||||
// Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd
|
// Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd
|
||||||
// like to run all of the workloads for one platform, and then the next sequentially as we'd
|
// like to run all of the workloads for one platform, and then the next sequentially as we'd
|
||||||
// like for the effect of concurrency to be minimized when we're doing the benchmarking.
|
// like for the effect of concurrency to be minimized when we're doing the benchmarking.
|
||||||
for platform in platforms.iter() {
|
for platform in platforms.iter() {
|
||||||
let platform_identifier = platform.platform_identifier();
|
let platform_identifier = platform.platform_identifier();
|
||||||
|
|
||||||
let span = info_span!("Benchmarking for the platform", %platform_identifier);
|
let span = info_span!("Benchmarking for the platform", %platform_identifier);
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
|
|
||||||
for test_definition in test_definitions.iter() {
|
for test_definition in test_definitions.iter() {
|
||||||
let platform_information = &test_definition.platforms[&platform_identifier];
|
let platform_information = &test_definition.platforms[&platform_identifier];
|
||||||
|
|
||||||
let span = info_span!(
|
let span = info_span!(
|
||||||
"Executing workload",
|
"Executing workload",
|
||||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||||
case_idx = %test_definition.case_idx,
|
case_idx = %test_definition.case_idx,
|
||||||
mode = %test_definition.mode,
|
mode = %test_definition.mode,
|
||||||
);
|
);
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
|
|
||||||
// Initializing all of the components requires to execute this particular workload.
|
// Initializing all of the components requires to execute this particular workload.
|
||||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||||
context.wallet_configuration.highest_private_key_exclusive(),
|
context.wallet_configuration.highest_private_key_exclusive(),
|
||||||
)));
|
)));
|
||||||
let (watcher, watcher_tx) = Watcher::new(
|
let (watcher, watcher_tx) = Watcher::new(
|
||||||
platform_information
|
platform_identifier,
|
||||||
.node
|
platform_information
|
||||||
.subscribe_to_full_blocks_information()
|
.node
|
||||||
.await
|
.subscribe_to_full_blocks_information()
|
||||||
.context("Failed to subscribe to full blocks information from the node")?,
|
.await
|
||||||
test_definition
|
.context("Failed to subscribe to full blocks information from the node")?,
|
||||||
.reporter
|
);
|
||||||
.execution_specific_reporter(0usize, platform_identifier),
|
let driver = Driver::new(
|
||||||
);
|
platform_information,
|
||||||
let driver = Driver::new(
|
test_definition,
|
||||||
platform_information,
|
private_key_allocator,
|
||||||
test_definition,
|
cached_compiler.as_ref(),
|
||||||
private_key_allocator,
|
watcher_tx.clone(),
|
||||||
cached_compiler.as_ref(),
|
test_definition
|
||||||
watcher_tx.clone(),
|
.case
|
||||||
test_definition
|
.steps_iterator_for_benchmarks(context.default_repetition_count)
|
||||||
.case
|
.enumerate()
|
||||||
.steps_iterator_for_benchmarks(context.default_repetition_count)
|
.map(|(step_idx, step)| -> (StepPath, Step) {
|
||||||
.enumerate()
|
(StepPath::new(vec![StepIdx::new(step_idx)]), step)
|
||||||
.map(|(step_idx, step)| -> (StepPath, Step) {
|
}),
|
||||||
(StepPath::new(vec![StepIdx::new(step_idx)]), step)
|
)
|
||||||
}),
|
.await
|
||||||
)
|
.context("Failed to create the benchmarks driver")?;
|
||||||
.await
|
|
||||||
.context("Failed to create the benchmarks driver")?;
|
|
||||||
|
|
||||||
futures::future::try_join(
|
futures::future::try_join(
|
||||||
watcher.run(),
|
watcher.run(),
|
||||||
driver
|
driver.execute_all().inspect(|_| {
|
||||||
.execute_all()
|
info!("All transactions submitted - driver completed execution");
|
||||||
.instrument(info_span!("Executing Benchmarks", %platform_identifier))
|
watcher_tx.send(WatcherEvent::AllTransactionsSubmitted).unwrap()
|
||||||
.inspect(|_| {
|
}),
|
||||||
info!("All transactions submitted - driver completed execution");
|
)
|
||||||
watcher_tx
|
.await
|
||||||
.send(WatcherEvent::AllTransactionsSubmitted)
|
.context("Failed to run the driver and executor")
|
||||||
.unwrap()
|
.inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded"))
|
||||||
}),
|
.inspect_err(|err| error!(?err, "Workload Execution Failed"))?;
|
||||||
)
|
}
|
||||||
.await
|
}
|
||||||
.context("Failed to run the driver and executor")
|
|
||||||
.inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded"))
|
|
||||||
.inspect_err(|err| error!(?err, "Workload Execution Failed"))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use std::{collections::HashMap, path::PathBuf};
|
use std::{collections::HashMap, path::PathBuf};
|
||||||
|
|
||||||
use alloy::{
|
use alloy::{
|
||||||
json_abi::JsonAbi,
|
json_abi::JsonAbi,
|
||||||
primitives::{Address, U256},
|
primitives::{Address, U256},
|
||||||
};
|
};
|
||||||
|
|
||||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||||
@@ -10,34 +10,31 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
/// The state associated with the test execution of one of the workloads.
|
/// The state associated with the test execution of one of the workloads.
|
||||||
pub struct ExecutionState {
|
pub struct ExecutionState {
|
||||||
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
/// The compiled contracts, these contracts have been compiled and have had the libraries
|
||||||
/// against them and therefore they're ready to be deployed on-demand.
|
/// linked against them and therefore they're ready to be deployed on-demand.
|
||||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
|
|
||||||
/// A map of all of the deployed contracts and information about them.
|
/// A map of all of the deployed contracts and information about them.
|
||||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
|
||||||
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
/// This map stores the variables used for each one of the cases contained in the metadata
|
||||||
pub variables: HashMap<String, U256>,
|
/// file.
|
||||||
|
pub variables: HashMap<String, U256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ExecutionState {
|
impl ExecutionState {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self { compiled_contracts, deployed_contracts, variables: Default::default() }
|
||||||
compiled_contracts,
|
}
|
||||||
deployed_contracts,
|
|
||||||
variables: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn empty() -> Self {
|
pub fn empty() -> Self {
|
||||||
Self {
|
Self {
|
||||||
compiled_contracts: Default::default(),
|
compiled_contracts: Default::default(),
|
||||||
deployed_contracts: Default::default(),
|
deployed_contracts: Default::default(),
|
||||||
variables: Default::default(),
|
variables: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,18 +1,13 @@
|
|||||||
use std::{
|
use std::{collections::HashSet, pin::Pin, sync::Arc};
|
||||||
collections::HashMap,
|
|
||||||
pin::Pin,
|
|
||||||
sync::Arc,
|
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
|
||||||
};
|
|
||||||
|
|
||||||
use alloy::primitives::{BlockNumber, TxHash};
|
use alloy::primitives::{BlockNumber, TxHash};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{Stream, StreamExt};
|
||||||
use revive_dt_format::steps::StepPath;
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
use revive_dt_report::{ExecutionSpecificReporter, MinedBlockInformation, TransactionInformation};
|
use revive_dt_node_interaction::MinedBlockInformation;
|
||||||
use tokio::sync::{
|
use tokio::sync::{
|
||||||
RwLock,
|
RwLock,
|
||||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||||
};
|
};
|
||||||
use tracing::{info, instrument};
|
use tracing::{info, instrument};
|
||||||
|
|
||||||
@@ -20,196 +15,175 @@ use tracing::{info, instrument};
|
|||||||
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
||||||
/// workload and is not designed for reuse.
|
/// workload and is not designed for reuse.
|
||||||
pub struct Watcher {
|
pub struct Watcher {
|
||||||
/// The receive side of the channel that all of the drivers and various other parts of the code
|
/// The identifier of the platform that this watcher is for.
|
||||||
/// send events to the watcher on.
|
platform_identifier: PlatformIdentifier,
|
||||||
rx: UnboundedReceiver<WatcherEvent>,
|
|
||||||
|
|
||||||
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
/// The receive side of the channel that all of the drivers and various other parts of the code
|
||||||
/// and a single node from that platform.
|
/// send events to the watcher on.
|
||||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
rx: UnboundedReceiver<WatcherEvent>,
|
||||||
|
|
||||||
/// The reporter used to send events to the report aggregator.
|
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
||||||
reporter: ExecutionSpecificReporter,
|
/// and a single node from that platform.
|
||||||
|
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Watcher {
|
impl Watcher {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
platform_identifier: PlatformIdentifier,
|
||||||
reporter: ExecutionSpecificReporter,
|
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||||
) -> (Self, UnboundedSender<WatcherEvent>) {
|
) -> (Self, UnboundedSender<WatcherEvent>) {
|
||||||
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
||||||
(
|
(Self { platform_identifier, rx, blocks_stream }, tx)
|
||||||
Self {
|
}
|
||||||
rx,
|
|
||||||
blocks_stream,
|
|
||||||
reporter,
|
|
||||||
},
|
|
||||||
tx,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "info", skip_all)]
|
#[instrument(level = "info", skip_all)]
|
||||||
pub async fn run(mut self) -> Result<()> {
|
pub async fn run(mut self) -> Result<()> {
|
||||||
// The first event that the watcher receives must be a `RepetitionStartEvent` that informs
|
// The first event that the watcher receives must be a `RepetitionStartEvent` that informs
|
||||||
// the watcher of the last block number that it should ignore and what the block number is
|
// the watcher of the last block number that it should ignore and what the block number is
|
||||||
// for the first important block that it should look for.
|
// for the first important block that it should look for.
|
||||||
let ignore_block_before = loop {
|
let ignore_block_before = loop {
|
||||||
let Some(WatcherEvent::RepetitionStartEvent {
|
let Some(WatcherEvent::RepetitionStartEvent { ignore_block_before }) =
|
||||||
ignore_block_before,
|
self.rx.recv().await
|
||||||
}) = self.rx.recv().await
|
else {
|
||||||
else {
|
continue;
|
||||||
continue;
|
};
|
||||||
};
|
break ignore_block_before;
|
||||||
break ignore_block_before;
|
};
|
||||||
};
|
|
||||||
|
|
||||||
// This is the set of the transaction hashes that the watcher should be looking for and
|
// This is the set of the transaction hashes that the watcher should be looking for and
|
||||||
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
||||||
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
||||||
let watch_for_transaction_hashes =
|
let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::<TxHash>::new()));
|
||||||
Arc::new(RwLock::new(HashMap::<TxHash, (StepPath, SystemTime)>::new()));
|
|
||||||
|
|
||||||
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
||||||
// txs are expected to come through the receive side of the channel. We do not want to rely
|
// txs are expected to come through the receive side of the channel. We do not want to rely
|
||||||
// on the channel closing alone for the watcher to know that all of the transactions were
|
// on the channel closing alone for the watcher to know that all of the transactions were
|
||||||
// submitted and for there to be an explicit event sent by the core orchestrator that
|
// submitted and for there to be an explicit event sent by the core orchestrator that
|
||||||
// informs the watcher that no further transactions are to be expected and that it can
|
// informs the watcher that no further transactions are to be expected and that it can
|
||||||
// safely ignore the channel.
|
// safely ignore the channel.
|
||||||
let all_transactions_submitted = Arc::new(RwLock::new(false));
|
let all_transactions_submitted = Arc::new(RwLock::new(false));
|
||||||
|
|
||||||
let watcher_event_watching_task = {
|
let watcher_event_watching_task = {
|
||||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||||
async move {
|
async move {
|
||||||
while let Some(watcher_event) = self.rx.recv().await {
|
while let Some(watcher_event) = self.rx.recv().await {
|
||||||
match watcher_event {
|
match watcher_event {
|
||||||
// Subsequent repetition starts are ignored since certain workloads can
|
// Subsequent repetition starts are ignored since certain workloads can
|
||||||
// contain nested repetitions and therefore there's no use in doing any
|
// contain nested repetitions and therefore there's no use in doing any
|
||||||
// action if the repetitions are nested.
|
// action if the repetitions are nested.
|
||||||
WatcherEvent::RepetitionStartEvent { .. } => {}
|
WatcherEvent::RepetitionStartEvent { .. } => {},
|
||||||
WatcherEvent::SubmittedTransaction {
|
WatcherEvent::SubmittedTransaction { transaction_hash } => {
|
||||||
transaction_hash,
|
watch_for_transaction_hashes.write().await.insert(transaction_hash);
|
||||||
step_path,
|
},
|
||||||
} => {
|
WatcherEvent::AllTransactionsSubmitted => {
|
||||||
watch_for_transaction_hashes
|
*all_transactions_submitted.write().await = true;
|
||||||
.write()
|
self.rx.close();
|
||||||
.await
|
info!("Watcher's Events Watching Task Finished");
|
||||||
.insert(transaction_hash, (step_path, SystemTime::now()));
|
break;
|
||||||
}
|
},
|
||||||
WatcherEvent::AllTransactionsSubmitted => {
|
}
|
||||||
*all_transactions_submitted.write().await = true;
|
}
|
||||||
self.rx.close();
|
}
|
||||||
info!("Watcher's Events Watching Task Finished");
|
};
|
||||||
break;
|
let block_information_watching_task = {
|
||||||
}
|
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||||
}
|
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||||
}
|
let mut blocks_information_stream = self.blocks_stream;
|
||||||
}
|
async move {
|
||||||
};
|
let mut mined_blocks_information = Vec::new();
|
||||||
let reporter = self.reporter.clone();
|
|
||||||
let block_information_watching_task = {
|
|
||||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
|
||||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
|
||||||
let mut blocks_information_stream = self.blocks_stream;
|
|
||||||
async move {
|
|
||||||
while let Some(mut block) = blocks_information_stream.next().await {
|
|
||||||
// If the block number is equal to or less than the last block before the
|
|
||||||
// repetition then we ignore it and continue on to the next block.
|
|
||||||
if block.ethereum_block_information.block_number <= ignore_block_before {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let watch_for_transaction_hashes =
|
|
||||||
watch_for_transaction_hashes.read().await;
|
|
||||||
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
|
||||||
let Some((step_path, _)) = watch_for_transaction_hashes.get(tx_hash)
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
*block.tx_counts.entry(step_path.clone()).or_default() += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reporter
|
|
||||||
.report_block_mined_event(block.clone())
|
|
||||||
.expect("Can't fail");
|
|
||||||
|
|
||||||
if *all_transactions_submitted.read().await
|
while let Some(block) = blocks_information_stream.next().await {
|
||||||
&& watch_for_transaction_hashes.read().await.is_empty()
|
// If the block number is equal to or less than the last block before the
|
||||||
{
|
// repetition then we ignore it and continue on to the next block.
|
||||||
break;
|
if block.block_number <= ignore_block_before {
|
||||||
}
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
info!(
|
if *all_transactions_submitted.read().await &&
|
||||||
block_number = block.ethereum_block_information.block_number,
|
watch_for_transaction_hashes.read().await.is_empty()
|
||||||
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
|
{
|
||||||
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
break;
|
||||||
"Observed a block"
|
}
|
||||||
);
|
|
||||||
|
|
||||||
// Remove all of the transaction hashes observed in this block from the txs we
|
info!(
|
||||||
// are currently watching for.
|
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
||||||
let mut watch_for_transaction_hashes =
|
block_tx_count = block.transaction_hashes.len(),
|
||||||
watch_for_transaction_hashes.write().await;
|
"Observed a block"
|
||||||
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
);
|
||||||
let Some((step_path, submission_time)) =
|
|
||||||
watch_for_transaction_hashes.remove(tx_hash)
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let transaction_information = TransactionInformation {
|
|
||||||
transaction_hash: *tx_hash,
|
|
||||||
submission_timestamp: submission_time
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.expect("Can't fail")
|
|
||||||
.as_secs() as _,
|
|
||||||
block_timestamp: block.ethereum_block_information.block_timestamp,
|
|
||||||
block_number: block.ethereum_block_information.block_number,
|
|
||||||
};
|
|
||||||
reporter
|
|
||||||
.report_step_transaction_information_event(
|
|
||||||
step_path,
|
|
||||||
transaction_information,
|
|
||||||
)
|
|
||||||
.expect("Can't fail")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Watcher's Block Watching Task Finished");
|
// Remove all of the transaction hashes observed in this block from the txs we
|
||||||
}
|
// are currently watching for.
|
||||||
};
|
let mut watch_for_transaction_hashes =
|
||||||
|
watch_for_transaction_hashes.write().await;
|
||||||
|
for tx_hash in block.transaction_hashes.iter() {
|
||||||
|
watch_for_transaction_hashes.remove(tx_hash);
|
||||||
|
}
|
||||||
|
|
||||||
let (_, _) =
|
mined_blocks_information.push(block);
|
||||||
futures::future::join(watcher_event_watching_task, block_information_watching_task)
|
}
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(())
|
info!("Watcher's Block Watching Task Finished");
|
||||||
}
|
mined_blocks_information
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let (_, mined_blocks_information) =
|
||||||
|
futures::future::join(watcher_event_watching_task, block_information_watching_task)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// region:TEMPORARY
|
||||||
|
{
|
||||||
|
// TODO: The following core is TEMPORARY and will be removed once we have proper
|
||||||
|
// reporting in place and then it can be removed. This serves as as way of doing some
|
||||||
|
// very simple reporting for the time being.
|
||||||
|
use std::io::Write;
|
||||||
|
|
||||||
|
let mut stderr = std::io::stderr().lock();
|
||||||
|
writeln!(stderr, "Watcher information for {}", self.platform_identifier)?;
|
||||||
|
writeln!(stderr, "block_number,block_timestamp,mined_gas,block_gas_limit,tx_count")?;
|
||||||
|
for block in mined_blocks_information {
|
||||||
|
writeln!(
|
||||||
|
stderr,
|
||||||
|
"{},{},{},{},{}",
|
||||||
|
block.block_number,
|
||||||
|
block.block_timestamp,
|
||||||
|
block.mined_gas,
|
||||||
|
block.block_gas_limit,
|
||||||
|
block.transaction_hashes.len()
|
||||||
|
)?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// endregion:TEMPORARY
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
pub enum WatcherEvent {
|
pub enum WatcherEvent {
|
||||||
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
||||||
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
||||||
/// reason behind this is that we do not want the initialization transactions (e.g., contract
|
/// reason behind this is that we do not want the initialization transactions (e.g., contract
|
||||||
/// deployments) to be included in the overall TPS and GPS measurements since these blocks will
|
/// deployments) to be included in the overall TPS and GPS measurements since these blocks will
|
||||||
/// most likely only contain a single transaction since they're just being used for
|
/// most likely only contain a single transaction since they're just being used for
|
||||||
/// initialization.
|
/// initialization.
|
||||||
RepetitionStartEvent {
|
RepetitionStartEvent {
|
||||||
/// This is the block number of the last block seen before the repetition started. This is
|
/// This is the block number of the last block seen before the repetition started. This is
|
||||||
/// used to instruct the watcher to ignore all block prior to this block when it starts
|
/// used to instruct the watcher to ignore all block prior to this block when it starts
|
||||||
/// streaming the blocks.
|
/// streaming the blocks.
|
||||||
ignore_block_before: BlockNumber,
|
ignore_block_before: BlockNumber,
|
||||||
},
|
},
|
||||||
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
|
||||||
/// transaction with this hash in the blocks that it watches.
|
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
||||||
SubmittedTransaction {
|
/// transaction with this hash in the blocks that it watches.
|
||||||
/// The hash of the submitted transaction.
|
SubmittedTransaction {
|
||||||
transaction_hash: TxHash,
|
/// The hash of the submitted transaction.
|
||||||
/// The step path of the step that the transaction belongs to.
|
transaction_hash: TxHash,
|
||||||
step_path: StepPath,
|
},
|
||||||
},
|
|
||||||
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
||||||
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
||||||
/// any longer.
|
/// any longer.
|
||||||
AllTransactionsSubmitted,
|
AllTransactionsSubmitted,
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,371 +1,266 @@
|
|||||||
//! The main entry point into differential testing.
|
//! The main entry point into differential testing.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, BTreeSet},
|
collections::{BTreeMap, BTreeSet},
|
||||||
io::{BufWriter, Write, stderr},
|
io::{BufWriter, Write, stderr},
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
use ansi_term::{ANSIStrings, Color};
|
use crate::Platform;
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use futures::{FutureExt, StreamExt};
|
use futures::{FutureExt, StreamExt};
|
||||||
use revive_dt_common::{cached_fs::read_to_string, types::PrivateKeyAllocator};
|
use revive_dt_common::types::PrivateKeyAllocator;
|
||||||
use revive_dt_core::Platform;
|
|
||||||
use revive_dt_format::corpus::Corpus;
|
|
||||||
use tokio::sync::{Mutex, RwLock, Semaphore};
|
use tokio::sync::{Mutex, RwLock, Semaphore};
|
||||||
use tracing::{Instrument, error, info, info_span, instrument};
|
use tracing::{Instrument, error, info, info_span, instrument};
|
||||||
|
|
||||||
use revive_dt_config::{Context, OutputFormat, TestExecutionContext};
|
use revive_dt_config::{Context, TestExecutionContext};
|
||||||
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
|
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
differential_tests::Driver,
|
differential_tests::Driver,
|
||||||
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
|
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Handles the differential testing executing it according to the information defined in the
|
/// Handles the differential testing executing it according to the information defined in the
|
||||||
/// context
|
/// context
|
||||||
#[instrument(level = "info", err(Debug), skip_all)]
|
#[instrument(level = "info", err(Debug), skip_all)]
|
||||||
pub async fn handle_differential_tests(
|
pub async fn handle_differential_tests(
|
||||||
context: TestExecutionContext,
|
context: TestExecutionContext,
|
||||||
reporter: Reporter,
|
reporter: Reporter,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let reporter_clone = reporter.clone();
|
let reporter_clone = reporter.clone();
|
||||||
|
|
||||||
// Discover all of the metadata files that are defined in the context.
|
// Discover all of the metadata files that are defined in the context.
|
||||||
let corpus = context
|
let metadata_files = collect_metadata_files(&context)
|
||||||
.corpus_configuration
|
.context("Failed to collect metadata files for differential testing")?;
|
||||||
.test_specifiers
|
info!(len = metadata_files.len(), "Discovered metadata files");
|
||||||
.clone()
|
|
||||||
.into_iter()
|
|
||||||
.try_fold(Corpus::default(), Corpus::with_test_specifier)
|
|
||||||
.context("Failed to parse the test corpus")?;
|
|
||||||
info!(
|
|
||||||
len = corpus.metadata_file_count(),
|
|
||||||
"Discovered metadata files"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Discover the list of platforms that the tests should run on based on the context.
|
// Discover the list of platforms that the tests should run on based on the context.
|
||||||
let platforms = context
|
let platforms = context
|
||||||
.platforms
|
.platforms
|
||||||
.iter()
|
.iter()
|
||||||
.copied()
|
.copied()
|
||||||
.map(Into::<&dyn Platform>::into)
|
.map(Into::<&dyn Platform>::into)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
// Starting the nodes of the various platforms specified in the context.
|
// Starting the nodes of the various platforms specified in the context.
|
||||||
let platforms_and_nodes = {
|
let platforms_and_nodes = {
|
||||||
let mut map = BTreeMap::new();
|
let mut map = BTreeMap::new();
|
||||||
|
|
||||||
for platform in platforms.iter() {
|
for platform in platforms.iter() {
|
||||||
let platform_identifier = platform.platform_identifier();
|
let platform_identifier = platform.platform_identifier();
|
||||||
|
|
||||||
let context = Context::Test(Box::new(context.clone()));
|
let context = Context::Test(Box::new(context.clone()));
|
||||||
let node_pool = NodePool::new(context, *platform)
|
let node_pool = NodePool::new(context, *platform)
|
||||||
.await
|
.await
|
||||||
.inspect_err(|err| {
|
.inspect_err(|err| {
|
||||||
error!(
|
error!(
|
||||||
?err,
|
?err,
|
||||||
%platform_identifier,
|
%platform_identifier,
|
||||||
"Failed to initialize the node pool for the platform."
|
"Failed to initialize the node pool for the platform."
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.context("Failed to initialize the node pool")?;
|
.context("Failed to initialize the node pool")?;
|
||||||
|
|
||||||
map.insert(platform_identifier, (*platform, node_pool));
|
map.insert(platform_identifier, (*platform, node_pool));
|
||||||
}
|
}
|
||||||
|
|
||||||
map
|
map
|
||||||
};
|
};
|
||||||
info!("Spawned the platform nodes");
|
info!("Spawned the platform nodes");
|
||||||
|
|
||||||
// Preparing test definitions.
|
// Preparing test definitions.
|
||||||
let only_execute_failed_tests = match context.ignore_success_configuration.path.as_ref() {
|
let full_context = Context::Test(Box::new(context.clone()));
|
||||||
Some(path) => {
|
let test_definitions = create_test_definitions_stream(
|
||||||
let report = read_to_string(path)
|
&full_context,
|
||||||
.context("Failed to read the report file to ignore the succeeding test cases")?;
|
metadata_files.iter(),
|
||||||
Some(serde_json::from_str(&report).context("Failed to deserialize report")?)
|
&platforms_and_nodes,
|
||||||
}
|
reporter.clone(),
|
||||||
None => None,
|
)
|
||||||
};
|
.await
|
||||||
let full_context = Context::Test(Box::new(context.clone()));
|
.collect::<Vec<_>>()
|
||||||
let test_definitions = create_test_definitions_stream(
|
.await;
|
||||||
&full_context,
|
info!(len = test_definitions.len(), "Created test definitions");
|
||||||
&corpus,
|
|
||||||
&platforms_and_nodes,
|
|
||||||
only_execute_failed_tests.as_ref(),
|
|
||||||
reporter.clone(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.await;
|
|
||||||
info!(len = test_definitions.len(), "Created test definitions");
|
|
||||||
|
|
||||||
// Creating everything else required for the driver to run.
|
// Creating everything else required for the driver to run.
|
||||||
let cached_compiler = CachedCompiler::new(
|
let cached_compiler = CachedCompiler::new(
|
||||||
context
|
context.working_directory.as_path().join("compilation_cache"),
|
||||||
.working_directory
|
context.compilation_configuration.invalidate_compilation_cache,
|
||||||
.as_path()
|
)
|
||||||
.join("compilation_cache"),
|
.await
|
||||||
context
|
.map(Arc::new)
|
||||||
.compilation_configuration
|
.context("Failed to initialize cached compiler")?;
|
||||||
.invalidate_compilation_cache,
|
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
||||||
)
|
context.wallet_configuration.highest_private_key_exclusive(),
|
||||||
.await
|
)));
|
||||||
.map(Arc::new)
|
|
||||||
.context("Failed to initialize cached compiler")?;
|
|
||||||
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
|
|
||||||
context.wallet_configuration.highest_private_key_exclusive(),
|
|
||||||
)));
|
|
||||||
|
|
||||||
// Creating the driver and executing all of the steps.
|
// Creating the driver and executing all of the steps.
|
||||||
let semaphore = context
|
let semaphore = context
|
||||||
.concurrency_configuration
|
.concurrency_configuration
|
||||||
.concurrency_limit()
|
.concurrency_limit()
|
||||||
.map(Semaphore::new)
|
.map(Semaphore::new)
|
||||||
.map(Arc::new);
|
.map(Arc::new);
|
||||||
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
|
let running_task_list = Arc::new(RwLock::new(BTreeSet::<usize>::new()));
|
||||||
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|
let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map(
|
||||||
|(test_id, test_definition)| {
|
|(test_id, test_definition)| {
|
||||||
let running_task_list = running_task_list.clone();
|
let running_task_list = running_task_list.clone();
|
||||||
let semaphore = semaphore.clone();
|
let semaphore = semaphore.clone();
|
||||||
|
|
||||||
let private_key_allocator = private_key_allocator.clone();
|
let private_key_allocator = private_key_allocator.clone();
|
||||||
let cached_compiler = cached_compiler.clone();
|
let cached_compiler = cached_compiler.clone();
|
||||||
let mode = test_definition.mode.clone();
|
let mode = test_definition.mode.clone();
|
||||||
let span = info_span!(
|
let span = info_span!(
|
||||||
"Executing Test Case",
|
"Executing Test Case",
|
||||||
test_id,
|
test_id,
|
||||||
metadata_file_path = %test_definition.metadata_file_path.display(),
|
metadata_file_path = %test_definition.metadata_file_path.display(),
|
||||||
case_idx = %test_definition.case_idx,
|
case_idx = %test_definition.case_idx,
|
||||||
mode = %mode,
|
mode = %mode,
|
||||||
);
|
);
|
||||||
async move {
|
async move {
|
||||||
let permit = match semaphore.as_ref() {
|
let permit = match semaphore.as_ref() {
|
||||||
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
|
Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")),
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
running_task_list.write().await.insert(test_id);
|
running_task_list.write().await.insert(test_id);
|
||||||
let driver = match Driver::new_root(
|
let driver = match Driver::new_root(
|
||||||
test_definition,
|
test_definition,
|
||||||
private_key_allocator,
|
private_key_allocator,
|
||||||
&cached_compiler,
|
&cached_compiler,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(driver) => driver,
|
Ok(driver) => driver,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
test_definition
|
test_definition
|
||||||
.reporter
|
.reporter
|
||||||
.report_test_failed_event(format!("{error:#}"))
|
.report_test_failed_event(format!("{error:#}"))
|
||||||
.expect("Can't fail");
|
.expect("Can't fail");
|
||||||
error!("Test Case Failed");
|
error!("Test Case Failed");
|
||||||
drop(permit);
|
drop(permit);
|
||||||
running_task_list.write().await.remove(&test_id);
|
running_task_list.write().await.remove(&test_id);
|
||||||
return;
|
return;
|
||||||
}
|
},
|
||||||
};
|
};
|
||||||
info!("Created the driver for the test case");
|
info!("Created the driver for the test case");
|
||||||
|
|
||||||
match driver.execute_all().await {
|
match driver.execute_all().await {
|
||||||
Ok(steps_executed) => test_definition
|
Ok(steps_executed) => test_definition
|
||||||
.reporter
|
.reporter
|
||||||
.report_test_succeeded_event(steps_executed)
|
.report_test_succeeded_event(steps_executed)
|
||||||
.expect("Can't fail"),
|
.expect("Can't fail"),
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
test_definition
|
test_definition
|
||||||
.reporter
|
.reporter
|
||||||
.report_test_failed_event(format!("{error:#}"))
|
.report_test_failed_event(format!("{error:#}"))
|
||||||
.expect("Can't fail");
|
.expect("Can't fail");
|
||||||
error!("Test Case Failed");
|
error!("Test Case Failed");
|
||||||
}
|
},
|
||||||
};
|
};
|
||||||
info!("Finished the execution of the test case");
|
info!("Finished the execution of the test case");
|
||||||
drop(permit);
|
drop(permit);
|
||||||
running_task_list.write().await.remove(&test_id);
|
running_task_list.write().await.remove(&test_id);
|
||||||
}
|
}
|
||||||
.instrument(span)
|
.instrument(span)
|
||||||
},
|
},
|
||||||
))
|
))
|
||||||
.inspect(|_| {
|
.inspect(|_| {
|
||||||
info!("Finished executing all test cases");
|
info!("Finished executing all test cases");
|
||||||
reporter_clone
|
reporter_clone.report_completion_event().expect("Can't fail")
|
||||||
.report_completion_event()
|
});
|
||||||
.expect("Can't fail")
|
let cli_reporting_task = start_cli_reporting_task(reporter);
|
||||||
});
|
|
||||||
let cli_reporting_task = start_cli_reporting_task(context.output_format, reporter);
|
|
||||||
|
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
loop {
|
loop {
|
||||||
let remaining_tasks = running_task_list.read().await;
|
let remaining_tasks = running_task_list.read().await;
|
||||||
info!(
|
info!(count = remaining_tasks.len(), ?remaining_tasks, "Remaining Tests");
|
||||||
count = remaining_tasks.len(),
|
tokio::time::sleep(Duration::from_secs(10)).await
|
||||||
?remaining_tasks,
|
}
|
||||||
"Remaining Tests"
|
});
|
||||||
);
|
|
||||||
drop(remaining_tasks);
|
|
||||||
tokio::time::sleep(Duration::from_secs(10)).await
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
futures::future::join(driver_task, cli_reporting_task).await;
|
futures::future::join(driver_task, cli_reporting_task).await;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
|
||||||
async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporter) {
|
async fn start_cli_reporting_task(reporter: Reporter) {
|
||||||
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
|
||||||
drop(reporter);
|
drop(reporter);
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
|
||||||
let mut global_success_count = 0;
|
const GREEN: &str = "\x1B[32m";
|
||||||
let mut global_failure_count = 0;
|
const RED: &str = "\x1B[31m";
|
||||||
let mut global_ignore_count = 0;
|
const GREY: &str = "\x1B[90m";
|
||||||
|
const COLOR_RESET: &str = "\x1B[0m";
|
||||||
|
const BOLD: &str = "\x1B[1m";
|
||||||
|
const BOLD_RESET: &str = "\x1B[22m";
|
||||||
|
|
||||||
let mut buf = BufWriter::new(stderr());
|
let mut number_of_successes = 0;
|
||||||
while let Ok(event) = aggregator_events_rx.recv().await {
|
let mut number_of_failures = 0;
|
||||||
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
|
||||||
metadata_file_path,
|
|
||||||
mode,
|
|
||||||
case_status,
|
|
||||||
} = event
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
match output_format {
|
let mut buf = BufWriter::new(stderr());
|
||||||
OutputFormat::Legacy => {
|
while let Ok(event) = aggregator_events_rx.recv().await {
|
||||||
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
for (case_idx, case_status) in case_status.into_iter() {
|
metadata_file_path,
|
||||||
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
mode,
|
||||||
let _ = match case_status {
|
case_status,
|
||||||
TestCaseStatus::Succeeded { steps_executed } => {
|
} = event
|
||||||
global_success_count += 1;
|
else {
|
||||||
writeln!(
|
continue;
|
||||||
buf,
|
};
|
||||||
"{}",
|
|
||||||
ANSIStrings(&[
|
|
||||||
Color::Green.bold().paint("Case Succeeded"),
|
|
||||||
Color::Green
|
|
||||||
.paint(format!(" - Steps Executed: {steps_executed}")),
|
|
||||||
])
|
|
||||||
)
|
|
||||||
}
|
|
||||||
TestCaseStatus::Failed { reason } => {
|
|
||||||
global_failure_count += 1;
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"{}",
|
|
||||||
ANSIStrings(&[
|
|
||||||
Color::Red.bold().paint("Case Failed"),
|
|
||||||
Color::Red.paint(format!(" - Reason: {}", reason.trim())),
|
|
||||||
])
|
|
||||||
)
|
|
||||||
}
|
|
||||||
TestCaseStatus::Ignored { reason, .. } => {
|
|
||||||
global_ignore_count += 1;
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"{}",
|
|
||||||
ANSIStrings(&[
|
|
||||||
Color::Yellow.bold().paint("Case Ignored"),
|
|
||||||
Color::Yellow.paint(format!(" - Reason: {}", reason.trim())),
|
|
||||||
])
|
|
||||||
)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
let _ = writeln!(buf);
|
|
||||||
}
|
|
||||||
OutputFormat::CargoTestLike => {
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"\t{} {} - {}\n",
|
|
||||||
Color::Green.paint("Running"),
|
|
||||||
metadata_file_path.display(),
|
|
||||||
mode
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let mut success_count = 0;
|
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
|
||||||
let mut failure_count = 0;
|
for (case_idx, case_status) in case_status.into_iter() {
|
||||||
let mut ignored_count = 0;
|
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
|
||||||
writeln!(buf, "running {} tests", case_status.len()).unwrap();
|
let _ = match case_status {
|
||||||
for (case_idx, case_result) in case_status.iter() {
|
TestCaseStatus::Succeeded { steps_executed } => {
|
||||||
let status = match case_result {
|
number_of_successes += 1;
|
||||||
TestCaseStatus::Succeeded { .. } => {
|
writeln!(
|
||||||
success_count += 1;
|
buf,
|
||||||
global_success_count += 1;
|
"{}{}Case Succeeded{} - Steps Executed: {}{}",
|
||||||
Color::Green.paint("ok")
|
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
|
||||||
}
|
)
|
||||||
TestCaseStatus::Failed { reason } => {
|
},
|
||||||
failure_count += 1;
|
TestCaseStatus::Failed { reason } => {
|
||||||
global_failure_count += 1;
|
number_of_failures += 1;
|
||||||
Color::Red.paint(format!("FAILED, {reason}"))
|
writeln!(
|
||||||
}
|
buf,
|
||||||
TestCaseStatus::Ignored { reason, .. } => {
|
"{}{}Case Failed{} - Reason: {}{}",
|
||||||
ignored_count += 1;
|
RED,
|
||||||
global_ignore_count += 1;
|
BOLD,
|
||||||
Color::Yellow.paint(format!("ignored, {reason:?}"))
|
BOLD_RESET,
|
||||||
}
|
reason.trim(),
|
||||||
};
|
COLOR_RESET,
|
||||||
writeln!(buf, "test case_idx_{} ... {}", case_idx, status).unwrap();
|
)
|
||||||
}
|
},
|
||||||
writeln!(buf).unwrap();
|
TestCaseStatus::Ignored { reason, .. } => writeln!(
|
||||||
|
buf,
|
||||||
|
"{}{}Case Ignored{} - Reason: {}{}",
|
||||||
|
GREY,
|
||||||
|
BOLD,
|
||||||
|
BOLD_RESET,
|
||||||
|
reason.trim(),
|
||||||
|
COLOR_RESET,
|
||||||
|
),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
let _ = writeln!(buf);
|
||||||
|
}
|
||||||
|
|
||||||
let status = if failure_count > 0 {
|
// Summary at the end.
|
||||||
Color::Red.paint("FAILED")
|
let _ = writeln!(
|
||||||
} else {
|
buf,
|
||||||
Color::Green.paint("ok")
|
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
|
||||||
};
|
number_of_successes + number_of_failures,
|
||||||
writeln!(
|
GREEN,
|
||||||
buf,
|
number_of_successes,
|
||||||
"test result: {}. {} passed; {} failed; {} ignored",
|
COLOR_RESET,
|
||||||
status, success_count, failure_count, ignored_count,
|
RED,
|
||||||
)
|
number_of_failures,
|
||||||
.unwrap();
|
COLOR_RESET,
|
||||||
writeln!(buf).unwrap();
|
start.elapsed().as_secs()
|
||||||
|
);
|
||||||
if aggregator_events_rx.is_empty() {
|
|
||||||
buf = tokio::task::spawn_blocking(move || {
|
|
||||||
buf.flush().unwrap();
|
|
||||||
buf
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
info!("Aggregator Broadcast Channel Closed");
|
|
||||||
|
|
||||||
// Summary at the end.
|
|
||||||
match output_format {
|
|
||||||
OutputFormat::Legacy => {
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"{} cases: {} cases succeeded, {} cases failed in {} seconds",
|
|
||||||
global_success_count + global_failure_count + global_ignore_count,
|
|
||||||
Color::Green.paint(global_success_count.to_string()),
|
|
||||||
Color::Red.paint(global_failure_count.to_string()),
|
|
||||||
start.elapsed().as_secs()
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
OutputFormat::CargoTestLike => {
|
|
||||||
writeln!(
|
|
||||||
buf,
|
|
||||||
"run finished. {} passed; {} failed; {} ignored; finished in {}s",
|
|
||||||
global_success_count,
|
|
||||||
global_failure_count,
|
|
||||||
global_ignore_count,
|
|
||||||
start.elapsed().as_secs()
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use std::{collections::HashMap, path::PathBuf};
|
use std::{collections::HashMap, path::PathBuf};
|
||||||
|
|
||||||
use alloy::{
|
use alloy::{
|
||||||
json_abi::JsonAbi,
|
json_abi::JsonAbi,
|
||||||
primitives::{Address, U256},
|
primitives::{Address, U256},
|
||||||
};
|
};
|
||||||
|
|
||||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
||||||
@@ -10,26 +10,23 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
/// The state associated with the test execution of one of the tests.
|
/// The state associated with the test execution of one of the tests.
|
||||||
pub struct ExecutionState {
|
pub struct ExecutionState {
|
||||||
/// The compiled contracts, these contracts have been compiled and have had the libraries linked
|
/// The compiled contracts, these contracts have been compiled and have had the libraries
|
||||||
/// against them and therefore they're ready to be deployed on-demand.
|
/// linked against them and therefore they're ready to be deployed on-demand.
|
||||||
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
|
|
||||||
/// A map of all of the deployed contracts and information about them.
|
/// A map of all of the deployed contracts and information about them.
|
||||||
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
|
|
||||||
/// This map stores the variables used for each one of the cases contained in the metadata file.
|
/// This map stores the variables used for each one of the cases contained in the metadata
|
||||||
pub variables: HashMap<String, U256>,
|
/// file.
|
||||||
|
pub variables: HashMap<String, U256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ExecutionState {
|
impl ExecutionState {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||||
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self { compiled_contracts, deployed_contracts, variables: Default::default() }
|
||||||
compiled_contracts,
|
}
|
||||||
deployed_contracts,
|
|
||||||
variables: Default::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,16 +2,16 @@
|
|||||||
//! be reused between runs.
|
//! be reused between runs.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
borrow::Cow,
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
sync::{Arc, LazyLock},
|
sync::{Arc, LazyLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crate::Platform;
|
||||||
use futures::FutureExt;
|
use futures::FutureExt;
|
||||||
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
|
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
|
||||||
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
|
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
|
||||||
use revive_dt_core::Platform;
|
|
||||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
|
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
|
||||||
|
|
||||||
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
|
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
|
||||||
@@ -23,33 +23,30 @@ use tokio::sync::{Mutex, RwLock, Semaphore};
|
|||||||
use tracing::{Instrument, debug, debug_span, instrument};
|
use tracing::{Instrument, debug, debug_span, instrument};
|
||||||
|
|
||||||
pub struct CachedCompiler<'a> {
|
pub struct CachedCompiler<'a> {
|
||||||
/// The cache that stores the compiled contracts.
|
/// The cache that stores the compiled contracts.
|
||||||
artifacts_cache: ArtifactsCache,
|
artifacts_cache: ArtifactsCache,
|
||||||
|
|
||||||
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests
|
/// This is a mechanism that the cached compiler uses so that if multiple compilation requests
|
||||||
/// come in for the same contract we never compile all of them and only compile it once and all
|
/// come in for the same contract we never compile all of them and only compile it once and all
|
||||||
/// other tasks that request this same compilation concurrently get the cached version.
|
/// other tasks that request this same compilation concurrently get the cached version.
|
||||||
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
|
cache_key_lock: RwLock<HashMap<CacheKey<'a>, Arc<Mutex<()>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> CachedCompiler<'a> {
|
impl<'a> CachedCompiler<'a> {
|
||||||
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
|
pub async fn new(path: impl AsRef<Path>, invalidate_cache: bool) -> Result<Self> {
|
||||||
let mut cache = ArtifactsCache::new(path);
|
let mut cache = ArtifactsCache::new(path);
|
||||||
if invalidate_cache {
|
if invalidate_cache {
|
||||||
cache = cache
|
cache = cache
|
||||||
.with_invalidated_cache()
|
.with_invalidated_cache()
|
||||||
.await
|
.await
|
||||||
.context("Failed to invalidate compilation cache directory")?;
|
.context("Failed to invalidate compilation cache directory")?;
|
||||||
}
|
}
|
||||||
Ok(Self {
|
Ok(Self { artifacts_cache: cache, cache_key_lock: Default::default() })
|
||||||
artifacts_cache: cache,
|
}
|
||||||
cache_key_lock: Default::default(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compiles or gets the compilation artifacts from the cache.
|
/// Compiles or gets the compilation artifacts from the cache.
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
#[instrument(
|
#[instrument(
|
||||||
level = "debug",
|
level = "debug",
|
||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
@@ -59,297 +56,309 @@ impl<'a> CachedCompiler<'a> {
|
|||||||
),
|
),
|
||||||
err
|
err
|
||||||
)]
|
)]
|
||||||
pub async fn compile_contracts(
|
pub async fn compile_contracts(
|
||||||
&self,
|
&self,
|
||||||
metadata: &'a Metadata,
|
metadata: &'a Metadata,
|
||||||
metadata_file_path: &'a Path,
|
metadata_file_path: &'a Path,
|
||||||
mode: Cow<'a, Mode>,
|
mode: Cow<'a, Mode>,
|
||||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
compiler: &dyn SolidityCompiler,
|
compiler: &dyn SolidityCompiler,
|
||||||
platform: &dyn Platform,
|
platform: &dyn Platform,
|
||||||
reporter: &ExecutionSpecificReporter,
|
reporter: &ExecutionSpecificReporter,
|
||||||
) -> Result<CompilerOutput> {
|
) -> Result<CompilerOutput> {
|
||||||
let cache_key = CacheKey {
|
let cache_key = CacheKey {
|
||||||
compiler_identifier: platform.compiler_identifier(),
|
compiler_identifier: platform.compiler_identifier(),
|
||||||
compiler_version: compiler.version().clone(),
|
compiler_version: compiler.version().clone(),
|
||||||
metadata_file_path,
|
metadata_file_path,
|
||||||
solc_mode: mode.clone(),
|
solc_mode: mode.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let compilation_callback = || {
|
let compilation_callback = || {
|
||||||
async move {
|
async move {
|
||||||
compile_contracts(
|
compile_contracts(
|
||||||
metadata
|
metadata
|
||||||
.directory()
|
.directory()
|
||||||
.context("Failed to get metadata directory while preparing compilation")?,
|
.context("Failed to get metadata directory while preparing compilation")?,
|
||||||
metadata
|
metadata
|
||||||
.files_to_compile()
|
.files_to_compile()
|
||||||
.context("Failed to enumerate files to compile from metadata")?,
|
.context("Failed to enumerate files to compile from metadata")?,
|
||||||
&mode,
|
&mode,
|
||||||
deployed_libraries,
|
deployed_libraries,
|
||||||
compiler,
|
compiler,
|
||||||
reporter,
|
reporter,
|
||||||
)
|
)
|
||||||
.map(|compilation_result| compilation_result.map(CacheValue::new))
|
.map(|compilation_result| compilation_result.map(CacheValue::new))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
.instrument(debug_span!(
|
.instrument(debug_span!(
|
||||||
"Running compilation for the cache key",
|
"Running compilation for the cache key",
|
||||||
cache_key.compiler_identifier = %cache_key.compiler_identifier,
|
cache_key.compiler_identifier = %cache_key.compiler_identifier,
|
||||||
cache_key.compiler_version = %cache_key.compiler_version,
|
cache_key.compiler_version = %cache_key.compiler_version,
|
||||||
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
|
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
|
||||||
cache_key.solc_mode = %cache_key.solc_mode,
|
cache_key.solc_mode = %cache_key.solc_mode,
|
||||||
))
|
))
|
||||||
};
|
};
|
||||||
|
|
||||||
let compiled_contracts = match deployed_libraries {
|
let compiled_contracts = match deployed_libraries {
|
||||||
// If deployed libraries have been specified then we will re-compile the contract as it
|
// If deployed libraries have been specified then we will re-compile the contract as it
|
||||||
// means that linking is required in this case.
|
// means that linking is required in this case.
|
||||||
Some(_) => {
|
Some(_) => {
|
||||||
debug!("Deployed libraries defined, recompilation must take place");
|
debug!("Deployed libraries defined, recompilation must take place");
|
||||||
debug!("Cache miss");
|
debug!("Cache miss");
|
||||||
compilation_callback()
|
compilation_callback()
|
||||||
.await
|
.await
|
||||||
.context("Compilation callback for deployed libraries failed")?
|
.context("Compilation callback for deployed libraries failed")?
|
||||||
.compiler_output
|
.compiler_output
|
||||||
}
|
},
|
||||||
// If no deployed libraries are specified then we can follow the cached flow and attempt
|
// If no deployed libraries are specified then we can follow the cached flow and attempt
|
||||||
// to lookup the compilation artifacts in the cache.
|
// to lookup the compilation artifacts in the cache.
|
||||||
None => {
|
None => {
|
||||||
debug!("Deployed libraries undefined, attempting to make use of cache");
|
debug!("Deployed libraries undefined, attempting to make use of cache");
|
||||||
|
|
||||||
// Lock this specific cache key such that we do not get inconsistent state. We want
|
// Lock this specific cache key such that we do not get inconsistent state. We want
|
||||||
// that when multiple cases come in asking for the compilation artifacts then they
|
// that when multiple cases come in asking for the compilation artifacts then they
|
||||||
// don't all trigger a compilation if there's a cache miss. Hence, the lock here.
|
// don't all trigger a compilation if there's a cache miss. Hence, the lock here.
|
||||||
let read_guard = self.cache_key_lock.read().await;
|
let read_guard = self.cache_key_lock.read().await;
|
||||||
let mutex = match read_guard.get(&cache_key).cloned() {
|
let mutex = match read_guard.get(&cache_key).cloned() {
|
||||||
Some(value) => {
|
Some(value) => {
|
||||||
drop(read_guard);
|
drop(read_guard);
|
||||||
value
|
value
|
||||||
}
|
},
|
||||||
None => {
|
None => {
|
||||||
drop(read_guard);
|
drop(read_guard);
|
||||||
self.cache_key_lock
|
self.cache_key_lock
|
||||||
.write()
|
.write()
|
||||||
.await
|
.await
|
||||||
.entry(cache_key.clone())
|
.entry(cache_key.clone())
|
||||||
.or_default()
|
.or_default()
|
||||||
.clone()
|
.clone()
|
||||||
}
|
},
|
||||||
};
|
};
|
||||||
let _guard = mutex.lock().await;
|
let _guard = mutex.lock().await;
|
||||||
|
|
||||||
match self.artifacts_cache.get(&cache_key).await {
|
match self.artifacts_cache.get(&cache_key).await {
|
||||||
Some(cache_value) => {
|
Some(cache_value) => {
|
||||||
if deployed_libraries.is_some() {
|
if deployed_libraries.is_some() {
|
||||||
reporter
|
reporter
|
||||||
.report_post_link_contracts_compilation_succeeded_event(
|
.report_post_link_contracts_compilation_succeeded_event(
|
||||||
compiler.version().clone(),
|
compiler.version().clone(),
|
||||||
compiler.path(),
|
compiler.path(),
|
||||||
true,
|
true,
|
||||||
None,
|
None,
|
||||||
cache_value.compiler_output.clone(),
|
cache_value.compiler_output.clone(),
|
||||||
)
|
)
|
||||||
.expect("Can't happen");
|
.expect("Can't happen");
|
||||||
} else {
|
} else {
|
||||||
reporter
|
reporter
|
||||||
.report_pre_link_contracts_compilation_succeeded_event(
|
.report_pre_link_contracts_compilation_succeeded_event(
|
||||||
compiler.version().clone(),
|
compiler.version().clone(),
|
||||||
compiler.path(),
|
compiler.path(),
|
||||||
true,
|
true,
|
||||||
None,
|
None,
|
||||||
cache_value.compiler_output.clone(),
|
cache_value.compiler_output.clone(),
|
||||||
)
|
)
|
||||||
.expect("Can't happen");
|
.expect("Can't happen");
|
||||||
}
|
}
|
||||||
cache_value.compiler_output
|
cache_value.compiler_output
|
||||||
}
|
},
|
||||||
None => {
|
None => {
|
||||||
let compiler_output = compilation_callback()
|
let compiler_output = compilation_callback()
|
||||||
.await
|
.await
|
||||||
.context("Compilation callback failed (cache miss path)")?
|
.context("Compilation callback failed (cache miss path)")?
|
||||||
.compiler_output;
|
.compiler_output;
|
||||||
self.artifacts_cache
|
self.artifacts_cache
|
||||||
.insert(
|
.insert(
|
||||||
&cache_key,
|
&cache_key,
|
||||||
&CacheValue {
|
&CacheValue { compiler_output: compiler_output.clone() },
|
||||||
compiler_output: compiler_output.clone(),
|
)
|
||||||
},
|
.await
|
||||||
)
|
.context(
|
||||||
.await
|
"Failed to write the cached value of the compilation artifacts",
|
||||||
.context(
|
)?;
|
||||||
"Failed to write the cached value of the compilation artifacts",
|
compiler_output
|
||||||
)?;
|
},
|
||||||
compiler_output
|
}
|
||||||
}
|
},
|
||||||
}
|
};
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(compiled_contracts)
|
Ok(compiled_contracts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn compile_contracts(
|
async fn compile_contracts(
|
||||||
metadata_directory: impl AsRef<Path>,
|
metadata_directory: impl AsRef<Path>,
|
||||||
mut files_to_compile: impl Iterator<Item = PathBuf>,
|
mut files_to_compile: impl Iterator<Item = PathBuf>,
|
||||||
mode: &Mode,
|
mode: &Mode,
|
||||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
compiler: &dyn SolidityCompiler,
|
compiler: &dyn SolidityCompiler,
|
||||||
reporter: &ExecutionSpecificReporter,
|
reporter: &ExecutionSpecificReporter,
|
||||||
) -> Result<CompilerOutput> {
|
) -> Result<CompilerOutput> {
|
||||||
// Puts a limit on how many compilations we can perform at any given instance which helps us
|
// Puts a limit on how many compilations we can perform at any given instance which helps us
|
||||||
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
|
// with some of the errors we've been seeing with high concurrency on MacOS (we have not tried
|
||||||
// it on Linux so we don't know if these issues also persist there or not.)
|
// it on Linux so we don't know if these issues also persist there or not.)
|
||||||
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
|
static SPAWN_GATE: LazyLock<Semaphore> = LazyLock::new(|| Semaphore::new(5));
|
||||||
let _permit = SPAWN_GATE.acquire().await?;
|
let _permit = SPAWN_GATE.acquire().await?;
|
||||||
|
|
||||||
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||||
.with_allowed_extension("sol")
|
.with_allowed_extension("sol")
|
||||||
.with_use_cached_fs(true)
|
.with_use_cached_fs(true)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let compilation = Compiler::new()
|
let compilation = Compiler::new()
|
||||||
.with_allow_path(metadata_directory)
|
.with_allow_path(metadata_directory)
|
||||||
// Handling the modes
|
// Handling the modes
|
||||||
.with_optimization(mode.optimize_setting)
|
.with_optimization(mode.optimize_setting)
|
||||||
.with_pipeline(mode.pipeline)
|
.with_pipeline(mode.pipeline)
|
||||||
// Adding the contract sources to the compiler.
|
// Adding the contract sources to the compiler.
|
||||||
.try_then(|compiler| {
|
.try_then(|compiler| {
|
||||||
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
|
files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path))
|
||||||
})?
|
})?
|
||||||
// Adding the deployed libraries to the compiler.
|
// Adding the deployed libraries to the compiler.
|
||||||
.then(|compiler| {
|
.then(|compiler| {
|
||||||
deployed_libraries
|
deployed_libraries
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|value| value.iter())
|
.flat_map(|value| value.iter())
|
||||||
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
|
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
|
||||||
.flat_map(|(_, ident, address, _)| {
|
.flat_map(|(_, ident, address, _)| {
|
||||||
all_sources_in_dir
|
all_sources_in_dir.iter().map(move |path| (ident, address, path))
|
||||||
.iter()
|
})
|
||||||
.map(move |path| (ident, address, path))
|
.fold(compiler, |compiler, (ident, address, path)| {
|
||||||
})
|
compiler.with_library(path, ident.as_str(), *address)
|
||||||
.fold(compiler, |compiler, (ident, address, path)| {
|
})
|
||||||
compiler.with_library(path, ident.as_str(), *address)
|
});
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
let input = compilation.input().clone();
|
let input = compilation.input().clone();
|
||||||
let output = compilation.try_build(compiler).await;
|
let output = compilation.try_build(compiler).await;
|
||||||
|
|
||||||
match (output.as_ref(), deployed_libraries.is_some()) {
|
match (output.as_ref(), deployed_libraries.is_some()) {
|
||||||
(Ok(output), true) => {
|
(Ok(output), true) => {
|
||||||
reporter
|
reporter
|
||||||
.report_post_link_contracts_compilation_succeeded_event(
|
.report_post_link_contracts_compilation_succeeded_event(
|
||||||
compiler.version().clone(),
|
compiler.version().clone(),
|
||||||
compiler.path(),
|
compiler.path(),
|
||||||
false,
|
false,
|
||||||
input,
|
input,
|
||||||
output.clone(),
|
output.clone(),
|
||||||
)
|
)
|
||||||
.expect("Can't happen");
|
.expect("Can't happen");
|
||||||
}
|
},
|
||||||
(Ok(output), false) => {
|
(Ok(output), false) => {
|
||||||
reporter
|
reporter
|
||||||
.report_pre_link_contracts_compilation_succeeded_event(
|
.report_pre_link_contracts_compilation_succeeded_event(
|
||||||
compiler.version().clone(),
|
compiler.version().clone(),
|
||||||
compiler.path(),
|
compiler.path(),
|
||||||
false,
|
false,
|
||||||
input,
|
input,
|
||||||
output.clone(),
|
output.clone(),
|
||||||
)
|
)
|
||||||
.expect("Can't happen");
|
.expect("Can't happen");
|
||||||
}
|
},
|
||||||
(Err(err), true) => {
|
(Err(err), true) => {
|
||||||
reporter
|
reporter
|
||||||
.report_post_link_contracts_compilation_failed_event(
|
.report_post_link_contracts_compilation_failed_event(
|
||||||
compiler.version().clone(),
|
compiler.version().clone(),
|
||||||
compiler.path().to_path_buf(),
|
compiler.path().to_path_buf(),
|
||||||
input,
|
input,
|
||||||
format!("{err:#}"),
|
format!("{err:#}"),
|
||||||
)
|
)
|
||||||
.expect("Can't happen");
|
.expect("Can't happen");
|
||||||
}
|
},
|
||||||
(Err(err), false) => {
|
(Err(err), false) => {
|
||||||
reporter
|
reporter
|
||||||
.report_pre_link_contracts_compilation_failed_event(
|
.report_pre_link_contracts_compilation_failed_event(
|
||||||
compiler.version().clone(),
|
compiler.version().clone(),
|
||||||
compiler.path().to_path_buf(),
|
compiler.path().to_path_buf(),
|
||||||
input,
|
input,
|
||||||
format!("{err:#}"),
|
format!("{err:#}"),
|
||||||
)
|
)
|
||||||
.expect("Can't happen");
|
.expect("Can't happen");
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
output
|
output
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ArtifactsCache {
|
struct ArtifactsCache {
|
||||||
path: PathBuf,
|
path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ArtifactsCache {
|
impl ArtifactsCache {
|
||||||
pub fn new(path: impl AsRef<Path>) -> Self {
|
pub fn new(path: impl AsRef<Path>) -> Self {
|
||||||
Self {
|
Self { path: path.as_ref().to_path_buf() }
|
||||||
path: path.as_ref().to_path_buf(),
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "debug", skip_all, err)]
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
pub async fn with_invalidated_cache(self) -> Result<Self> {
|
pub async fn with_invalidated_cache(self) -> Result<Self> {
|
||||||
cacache::clear(self.path.as_path())
|
cacache::clear(self.path.as_path())
|
||||||
.await
|
.await
|
||||||
.map_err(Into::<Error>::into)
|
.map_err(Into::<Error>::into)
|
||||||
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
|
.with_context(|| format!("Failed to clear cache at {}", self.path.display()))?;
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", skip_all, err)]
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> {
|
pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> {
|
||||||
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
|
let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?;
|
||||||
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
|
let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?;
|
||||||
cacache::write(self.path.as_path(), key.encode_hex(), value)
|
cacache::write(self.path.as_path(), key.encode_hex(), value)
|
||||||
.await
|
.await
|
||||||
.with_context(|| {
|
.with_context(|| {
|
||||||
format!("Failed to write cache entry under {}", self.path.display())
|
format!("Failed to write cache entry under {}", self.path.display())
|
||||||
})?;
|
})?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
|
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
|
||||||
let key = bson::to_vec(key).ok()?;
|
let key = bson::to_vec(key).ok()?;
|
||||||
let value = cacache::read(self.path.as_path(), key.encode_hex())
|
let value = cacache::read(self.path.as_path(), key.encode_hex()).await.ok()?;
|
||||||
.await
|
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
||||||
.ok()?;
|
Some(value)
|
||||||
let value = bson::from_slice::<CacheValue>(&value).ok()?;
|
}
|
||||||
Some(value)
|
|
||||||
}
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
|
pub async fn get_or_insert_with(
|
||||||
|
&self,
|
||||||
|
key: &CacheKey<'_>,
|
||||||
|
callback: impl AsyncFnOnce() -> Result<CacheValue>,
|
||||||
|
) -> Result<CacheValue> {
|
||||||
|
match self.get(key).await {
|
||||||
|
Some(value) => {
|
||||||
|
debug!("Cache hit");
|
||||||
|
Ok(value)
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
debug!("Cache miss");
|
||||||
|
let value = callback().await?;
|
||||||
|
self.insert(key, &value).await?;
|
||||||
|
Ok(value)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||||
struct CacheKey<'a> {
|
struct CacheKey<'a> {
|
||||||
/// The identifier of the used compiler.
|
/// The identifier of the used compiler.
|
||||||
compiler_identifier: CompilerIdentifier,
|
compiler_identifier: CompilerIdentifier,
|
||||||
|
|
||||||
/// The version of the compiler that was used to compile the artifacts.
|
/// The version of the compiler that was used to compile the artifacts.
|
||||||
compiler_version: Version,
|
compiler_version: Version,
|
||||||
|
|
||||||
/// The path of the metadata file that the compilation artifacts are for.
|
/// The path of the metadata file that the compilation artifacts are for.
|
||||||
metadata_file_path: &'a Path,
|
metadata_file_path: &'a Path,
|
||||||
|
|
||||||
/// The mode that the compilation artifacts where compiled with.
|
/// The mode that the compilation artifacts where compiled with.
|
||||||
solc_mode: Cow<'a, Mode>,
|
solc_mode: Cow<'a, Mode>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
struct CacheValue {
|
struct CacheValue {
|
||||||
/// The compiler output from the compilation run.
|
/// The compiler output from the compilation run.
|
||||||
compiler_output: CompilerOutput,
|
compiler_output: CompilerOutput,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CacheValue {
|
impl CacheValue {
|
||||||
pub fn new(compiler_output: CompilerOutput) -> Self {
|
pub fn new(compiler_output: CompilerOutput) -> Self {
|
||||||
Self { compiler_output }
|
Self { compiler_output }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,33 @@
|
|||||||
|
use revive_dt_config::CorpusConfiguration;
|
||||||
|
use revive_dt_format::{corpus::Corpus, metadata::MetadataFile};
|
||||||
|
use tracing::{info, info_span, instrument};
|
||||||
|
|
||||||
|
/// Given an object that implements [`AsRef<CorpusConfiguration>`], this function finds all of the
|
||||||
|
/// corpus files and produces a map containing all of the [`MetadataFile`]s discovered.
|
||||||
|
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
|
||||||
|
pub fn collect_metadata_files(
|
||||||
|
context: impl AsRef<CorpusConfiguration>,
|
||||||
|
) -> anyhow::Result<Vec<MetadataFile>> {
|
||||||
|
let mut metadata_files = Vec::new();
|
||||||
|
|
||||||
|
let corpus_configuration = AsRef::<CorpusConfiguration>::as_ref(&context);
|
||||||
|
for path in &corpus_configuration.paths {
|
||||||
|
let span = info_span!("Processing corpus file", path = %path.display());
|
||||||
|
let _guard = span.enter();
|
||||||
|
|
||||||
|
let corpus = Corpus::try_from_path(path)?;
|
||||||
|
info!(
|
||||||
|
name = corpus.name(),
|
||||||
|
number_of_contained_paths = corpus.path_count(),
|
||||||
|
"Deserialized corpus file"
|
||||||
|
);
|
||||||
|
metadata_files.extend(corpus.enumerate_tests());
|
||||||
|
}
|
||||||
|
|
||||||
|
// There's a possibility that there are certain paths that all lead to the same metadata files
|
||||||
|
// and therefore it's important that we sort them and then deduplicate them.
|
||||||
|
metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||||
|
metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||||
|
|
||||||
|
Ok(metadata_files)
|
||||||
|
}
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
mod cached_compiler;
|
mod cached_compiler;
|
||||||
|
mod metadata;
|
||||||
mod pool;
|
mod pool;
|
||||||
mod test;
|
mod test;
|
||||||
|
|
||||||
pub use cached_compiler::*;
|
pub use cached_compiler::*;
|
||||||
|
pub use metadata::*;
|
||||||
pub use pool::*;
|
pub use pool::*;
|
||||||
pub use test::*;
|
pub use test::*;
|
||||||
|
|||||||
@@ -2,58 +2,53 @@
|
|||||||
|
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
|
||||||
|
use crate::Platform;
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_core::Platform;
|
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
|
|
||||||
/// The node pool starts one or more [Node] which then can be accessed
|
/// The node pool starts one or more [Node] which then can be accessed
|
||||||
/// in a round robbin fashion.
|
/// in a round robbin fashion.
|
||||||
pub struct NodePool {
|
pub struct NodePool {
|
||||||
next: AtomicUsize,
|
next: AtomicUsize,
|
||||||
nodes: Vec<Box<dyn EthereumNode + Send + Sync>>,
|
nodes: Vec<Box<dyn EthereumNode + Send + Sync>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NodePool {
|
impl NodePool {
|
||||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||||
pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
|
pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result<Self> {
|
||||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||||
let nodes = concurrency_configuration.number_of_nodes;
|
let nodes = concurrency_configuration.number_of_nodes;
|
||||||
|
|
||||||
let mut handles = Vec::with_capacity(nodes);
|
let mut handles = Vec::with_capacity(nodes);
|
||||||
for _ in 0..nodes {
|
for _ in 0..nodes {
|
||||||
let context = context.clone();
|
let context = context.clone();
|
||||||
handles.push(platform.new_node(context)?);
|
handles.push(platform.new_node(context)?);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut nodes = Vec::with_capacity(nodes);
|
let mut nodes = Vec::with_capacity(nodes);
|
||||||
for handle in handles {
|
for handle in handles {
|
||||||
nodes.push(
|
nodes.push(
|
||||||
handle
|
handle
|
||||||
.join()
|
.join()
|
||||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||||
.context("Failed to join node spawn thread")?
|
.context("Failed to join node spawn thread")?
|
||||||
.context("Node failed to spawn")?,
|
.context("Node failed to spawn")?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pre_transactions_tasks = nodes
|
let pre_transactions_tasks =
|
||||||
.iter_mut()
|
nodes.iter_mut().map(|node| node.pre_transactions()).collect::<Vec<_>>();
|
||||||
.map(|node| node.pre_transactions())
|
futures::future::try_join_all(pre_transactions_tasks)
|
||||||
.collect::<Vec<_>>();
|
.await
|
||||||
futures::future::try_join_all(pre_transactions_tasks)
|
.context("Failed to run the pre-transactions task")?;
|
||||||
.await
|
|
||||||
.context("Failed to run the pre-transactions task")?;
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self { nodes, next: Default::default() })
|
||||||
nodes,
|
}
|
||||||
next: Default::default(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a handle to the next node.
|
/// Get a handle to the next node.
|
||||||
pub fn round_robbin(&self) -> &dyn EthereumNode {
|
pub fn round_robbin(&self) -> &dyn EthereumNode {
|
||||||
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
||||||
self.nodes.get(current).unwrap().as_ref()
|
self.nodes.get(current).unwrap().as_ref()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
+254
-297
@@ -1,168 +1,163 @@
|
|||||||
use std::collections::BTreeMap;
|
use std::{borrow::Cow, collections::BTreeMap, path::Path, sync::Arc};
|
||||||
use std::sync::Arc;
|
|
||||||
use std::{borrow::Cow, path::Path};
|
|
||||||
|
|
||||||
use futures::{Stream, StreamExt, stream};
|
use futures::{Stream, StreamExt, stream};
|
||||||
use indexmap::{IndexMap, indexmap};
|
use indexmap::{IndexMap, indexmap};
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_common::{iterators::EitherIter, types::PlatformIdentifier};
|
||||||
use revive_dt_config::Context;
|
use revive_dt_config::Context;
|
||||||
use revive_dt_format::corpus::Corpus;
|
use revive_dt_format::mode::ParsedMode;
|
||||||
use serde_json::{Value, json};
|
use serde_json::{Value, json};
|
||||||
|
|
||||||
use revive_dt_compiler::Mode;
|
use revive_dt_compiler::{Mode, SolidityCompiler};
|
||||||
use revive_dt_compiler::SolidityCompiler;
|
|
||||||
use revive_dt_format::{
|
use revive_dt_format::{
|
||||||
case::{Case, CaseIdx},
|
case::{Case, CaseIdx},
|
||||||
metadata::MetadataFile,
|
metadata::MetadataFile,
|
||||||
};
|
};
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
use revive_dt_report::{ExecutionSpecificReporter, Report, Reporter, TestCaseStatus};
|
use revive_dt_report::{ExecutionSpecificReporter, Reporter, TestSpecificReporter, TestSpecifier};
|
||||||
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
|
|
||||||
use tracing::{debug, error, info};
|
use tracing::{debug, error, info};
|
||||||
|
|
||||||
use crate::Platform;
|
use crate::{Platform, helpers::NodePool};
|
||||||
use crate::helpers::NodePool;
|
|
||||||
|
|
||||||
pub async fn create_test_definitions_stream<'a>(
|
pub async fn create_test_definitions_stream<'a>(
|
||||||
// This is only required for creating the compiler objects and is not used anywhere else in the
|
// This is only required for creating the compiler objects and is not used anywhere else in the
|
||||||
// function.
|
// function.
|
||||||
context: &Context,
|
context: &Context,
|
||||||
corpus: &'a Corpus,
|
metadata_files: impl IntoIterator<Item = &'a MetadataFile>,
|
||||||
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
|
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
|
||||||
only_execute_failed_tests: Option<&Report>,
|
reporter: Reporter,
|
||||||
reporter: Reporter,
|
|
||||||
) -> impl Stream<Item = TestDefinition<'a>> {
|
) -> impl Stream<Item = TestDefinition<'a>> {
|
||||||
let cloned_reporter = reporter.clone();
|
stream::iter(
|
||||||
stream::iter(
|
metadata_files
|
||||||
corpus
|
.into_iter()
|
||||||
.cases_iterator()
|
// Flatten over the cases.
|
||||||
.inspect(move |(metadata_file, ..)| {
|
.flat_map(|metadata_file| {
|
||||||
cloned_reporter
|
metadata_file
|
||||||
.report_metadata_file_discovery_event(
|
.cases
|
||||||
metadata_file.metadata_file_path.clone(),
|
.iter()
|
||||||
metadata_file.content.clone(),
|
.enumerate()
|
||||||
)
|
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
|
||||||
.unwrap();
|
})
|
||||||
})
|
// Flatten over the modes, prefer the case modes over the metadata file modes.
|
||||||
.map(move |(metadata_file, case_idx, case, mode)| {
|
.flat_map(move |(metadata_file, case_idx, case)| {
|
||||||
let reporter = reporter.clone();
|
let reporter = reporter.clone();
|
||||||
|
|
||||||
(
|
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
||||||
metadata_file,
|
let modes = match modes {
|
||||||
case_idx,
|
Some(modes) => EitherIter::A(
|
||||||
case,
|
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
|
||||||
mode.clone(),
|
),
|
||||||
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
||||||
solc_mode: mode.as_ref().clone(),
|
};
|
||||||
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
|
||||||
case_idx: CaseIdx::new(case_idx),
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
// Inform the reporter of each one of the test cases that were discovered which we expect to
|
|
||||||
// run.
|
|
||||||
.inspect(|(_, _, _, _, reporter)| {
|
|
||||||
reporter
|
|
||||||
.report_test_case_discovery_event()
|
|
||||||
.expect("Can't fail");
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
// Creating the Test Definition objects from all of the various objects we have and creating
|
|
||||||
// their required dependencies (e.g., compiler).
|
|
||||||
.filter_map(
|
|
||||||
move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
|
||||||
let mut platforms = BTreeMap::new();
|
|
||||||
for (platform, node_pool) in platforms_and_nodes.values() {
|
|
||||||
let node = node_pool.round_robbin();
|
|
||||||
let compiler = platform
|
|
||||||
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
|
|
||||||
.await
|
|
||||||
.inspect_err(|err| {
|
|
||||||
error!(
|
|
||||||
?err,
|
|
||||||
platform_identifier = %platform.platform_identifier(),
|
|
||||||
"Failed to instantiate the compiler"
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.ok()?;
|
|
||||||
|
|
||||||
reporter
|
modes.into_iter().map(move |mode| {
|
||||||
.report_node_assigned_event(
|
(
|
||||||
node.id(),
|
metadata_file,
|
||||||
platform.platform_identifier(),
|
case_idx,
|
||||||
node.connection_string(),
|
case,
|
||||||
)
|
mode.clone(),
|
||||||
.expect("Can't fail");
|
reporter.test_specific_reporter(Arc::new(TestSpecifier {
|
||||||
|
solc_mode: mode.as_ref().clone(),
|
||||||
|
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||||
|
case_idx: CaseIdx::new(case_idx),
|
||||||
|
})),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
// Inform the reporter of each one of the test cases that were discovered which we
|
||||||
|
// expect to run.
|
||||||
|
.inspect(|(_, _, _, _, reporter)| {
|
||||||
|
reporter.report_test_case_discovery_event().expect("Can't fail");
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
// Creating the Test Definition objects from all of the various objects we have and creating
|
||||||
|
// their required dependencies (e.g., compiler).
|
||||||
|
.filter_map(move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
||||||
|
let mut platforms = BTreeMap::new();
|
||||||
|
for (platform, node_pool) in platforms_and_nodes.values() {
|
||||||
|
let node = node_pool.round_robbin();
|
||||||
|
let compiler = platform
|
||||||
|
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
|
||||||
|
.await
|
||||||
|
.inspect_err(|err| {
|
||||||
|
error!(
|
||||||
|
?err,
|
||||||
|
platform_identifier = %platform.platform_identifier(),
|
||||||
|
"Failed to instantiate the compiler"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok()?;
|
||||||
|
|
||||||
let reporter =
|
reporter
|
||||||
reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
|
.report_node_assigned_event(
|
||||||
|
node.id(),
|
||||||
|
platform.platform_identifier(),
|
||||||
|
node.connection_string(),
|
||||||
|
)
|
||||||
|
.expect("Can't fail");
|
||||||
|
|
||||||
platforms.insert(
|
let reporter =
|
||||||
platform.platform_identifier(),
|
reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||||
TestPlatformInformation {
|
|
||||||
platform: *platform,
|
|
||||||
node,
|
|
||||||
compiler,
|
|
||||||
reporter,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(TestDefinition {
|
platforms.insert(
|
||||||
/* Metadata file information */
|
platform.platform_identifier(),
|
||||||
metadata: metadata_file,
|
TestPlatformInformation { platform: *platform, node, compiler, reporter },
|
||||||
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/* Mode Information */
|
Some(TestDefinition {
|
||||||
mode: mode.clone(),
|
/* Metadata file information */
|
||||||
|
metadata: metadata_file,
|
||||||
|
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
||||||
|
|
||||||
/* Case Information */
|
/* Mode Information */
|
||||||
case_idx: CaseIdx::new(case_idx),
|
mode: mode.clone(),
|
||||||
case,
|
|
||||||
|
|
||||||
/* Platform and Node Assignment Information */
|
/* Case Information */
|
||||||
platforms,
|
case_idx: CaseIdx::new(case_idx),
|
||||||
|
case,
|
||||||
|
|
||||||
/* Reporter */
|
/* Platform and Node Assignment Information */
|
||||||
reporter,
|
platforms,
|
||||||
})
|
|
||||||
},
|
/* Reporter */
|
||||||
)
|
reporter,
|
||||||
// Filter out the test cases which are incompatible or that can't run in the current setup.
|
})
|
||||||
.filter_map(move |test| async move {
|
})
|
||||||
match test.check_compatibility(only_execute_failed_tests) {
|
// Filter out the test cases which are incompatible or that can't run in the current setup.
|
||||||
Ok(()) => Some(test),
|
.filter_map(move |test| async move {
|
||||||
Err((reason, additional_information)) => {
|
match test.check_compatibility() {
|
||||||
debug!(
|
Ok(()) => Some(test),
|
||||||
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
Err((reason, additional_information)) => {
|
||||||
case_idx = %test.case_idx,
|
debug!(
|
||||||
mode = %test.mode,
|
metadata_file_path = %test.metadata.metadata_file_path.display(),
|
||||||
reason,
|
case_idx = %test.case_idx,
|
||||||
additional_information =
|
mode = %test.mode,
|
||||||
serde_json::to_string(&additional_information).unwrap(),
|
reason,
|
||||||
"Ignoring Test Case"
|
additional_information =
|
||||||
);
|
serde_json::to_string(&additional_information).unwrap(),
|
||||||
test.reporter
|
"Ignoring Test Case"
|
||||||
.report_test_ignored_event(
|
);
|
||||||
reason.to_string(),
|
test.reporter
|
||||||
additional_information
|
.report_test_ignored_event(
|
||||||
.into_iter()
|
reason.to_string(),
|
||||||
.map(|(k, v)| (k.into(), v))
|
additional_information
|
||||||
.collect::<IndexMap<_, _>>(),
|
.into_iter()
|
||||||
)
|
.map(|(k, v)| (k.into(), v))
|
||||||
.expect("Can't fail");
|
.collect::<IndexMap<_, _>>(),
|
||||||
None
|
)
|
||||||
}
|
.expect("Can't fail");
|
||||||
}
|
None
|
||||||
})
|
},
|
||||||
.inspect(|test| {
|
}
|
||||||
info!(
|
})
|
||||||
metadata_file_path = %test.metadata_file_path.display(),
|
.inspect(|test| {
|
||||||
case_idx = %test.case_idx,
|
info!(
|
||||||
mode = %test.mode,
|
metadata_file_path = %test.metadata_file_path.display(),
|
||||||
"Created a test case definition"
|
case_idx = %test.case_idx,
|
||||||
);
|
mode = %test.mode,
|
||||||
})
|
"Created a test case definition"
|
||||||
|
);
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This is a full description of a differential test to run alongside the full metadata file, the
|
/// This is a full description of a differential test to run alongside the full metadata file, the
|
||||||
@@ -170,180 +165,142 @@ pub async fn create_test_definitions_stream<'a>(
|
|||||||
/// these platforms that they should run on, the compilers to use, and everything else needed making
|
/// these platforms that they should run on, the compilers to use, and everything else needed making
|
||||||
/// it a complete description.
|
/// it a complete description.
|
||||||
pub struct TestDefinition<'a> {
|
pub struct TestDefinition<'a> {
|
||||||
/* Metadata file information */
|
/* Metadata file information */
|
||||||
pub metadata: &'a MetadataFile,
|
pub metadata: &'a MetadataFile,
|
||||||
pub metadata_file_path: &'a Path,
|
pub metadata_file_path: &'a Path,
|
||||||
|
|
||||||
/* Mode Information */
|
/* Mode Information */
|
||||||
pub mode: Cow<'a, Mode>,
|
pub mode: Cow<'a, Mode>,
|
||||||
|
|
||||||
/* Case Information */
|
/* Case Information */
|
||||||
pub case_idx: CaseIdx,
|
pub case_idx: CaseIdx,
|
||||||
pub case: &'a Case,
|
pub case: &'a Case,
|
||||||
|
|
||||||
/* Platform and Node Assignment Information */
|
/* Platform and Node Assignment Information */
|
||||||
pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>,
|
pub platforms: BTreeMap<PlatformIdentifier, TestPlatformInformation<'a>>,
|
||||||
|
|
||||||
/* Reporter */
|
/* Reporter */
|
||||||
pub reporter: TestSpecificReporter,
|
pub reporter: TestSpecificReporter,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> TestDefinition<'a> {
|
impl<'a> TestDefinition<'a> {
|
||||||
/// Checks if this test can be ran with the current configuration.
|
/// Checks if this test can be ran with the current configuration.
|
||||||
pub fn check_compatibility(
|
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
&self,
|
self.check_metadata_file_ignored()?;
|
||||||
only_execute_failed_tests: Option<&Report>,
|
self.check_case_file_ignored()?;
|
||||||
) -> TestCheckFunctionResult {
|
self.check_target_compatibility()?;
|
||||||
self.check_metadata_file_ignored()?;
|
self.check_evm_version_compatibility()?;
|
||||||
self.check_case_file_ignored()?;
|
self.check_compiler_compatibility()?;
|
||||||
self.check_target_compatibility()?;
|
Ok(())
|
||||||
self.check_evm_version_compatibility()?;
|
}
|
||||||
self.check_compiler_compatibility()?;
|
|
||||||
self.check_ignore_succeeded(only_execute_failed_tests)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the metadata file is ignored or not.
|
/// Checks if the metadata file is ignored or not.
|
||||||
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult {
|
||||||
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
if self.metadata.ignore.is_some_and(|ignore| ignore) {
|
||||||
Err(("Metadata file is ignored.", indexmap! {}))
|
Err(("Metadata file is ignored.", indexmap! {}))
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if the case file is ignored or not.
|
/// Checks if the case file is ignored or not.
|
||||||
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
fn check_case_file_ignored(&self) -> TestCheckFunctionResult {
|
||||||
if self.case.ignore.is_some_and(|ignore| ignore) {
|
if self.case.ignore.is_some_and(|ignore| ignore) {
|
||||||
Err(("Case is ignored.", indexmap! {}))
|
Err(("Case is ignored.", indexmap! {}))
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if the platforms all support the desired targets in the metadata file.
|
/// Checks if the platforms all support the desired targets in the metadata file.
|
||||||
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
let mut error_map = indexmap! {
|
let mut error_map = indexmap! {
|
||||||
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||||
};
|
};
|
||||||
let mut is_allowed = true;
|
let mut is_allowed = true;
|
||||||
for (_, platform_information) in self.platforms.iter() {
|
for (_, platform_information) in self.platforms.iter() {
|
||||||
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
||||||
None => true,
|
None => true,
|
||||||
Some(required_vm_identifiers) => {
|
Some(required_vm_identifiers) =>
|
||||||
required_vm_identifiers.contains(&platform_information.platform.vm_identifier())
|
required_vm_identifiers.contains(&platform_information.platform.vm_identifier()),
|
||||||
}
|
};
|
||||||
};
|
is_allowed &= is_allowed_for_platform;
|
||||||
is_allowed &= is_allowed_for_platform;
|
error_map.insert(
|
||||||
error_map.insert(
|
platform_information.platform.platform_identifier().into(),
|
||||||
platform_information.platform.platform_identifier().into(),
|
json!(is_allowed_for_platform),
|
||||||
json!(is_allowed_for_platform),
|
);
|
||||||
);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if is_allowed {
|
if is_allowed {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err((
|
Err((
|
||||||
"One of the platforms do do not support the targets allowed by the test.",
|
"One of the platforms do do not support the targets allowed by the test.",
|
||||||
error_map,
|
error_map,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks for the compatibility of the EVM version with the platforms specified.
|
// Checks for the compatibility of the EVM version with the platforms specified.
|
||||||
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
let Some(evm_version_requirement) = self.metadata.required_evm_version else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut error_map = indexmap! {
|
let mut error_map = indexmap! {
|
||||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||||
};
|
};
|
||||||
let mut is_allowed = true;
|
let mut is_allowed = true;
|
||||||
for (_, platform_information) in self.platforms.iter() {
|
for (_, platform_information) in self.platforms.iter() {
|
||||||
let is_allowed_for_platform =
|
let is_allowed_for_platform =
|
||||||
evm_version_requirement.matches(&platform_information.node.evm_version());
|
evm_version_requirement.matches(&platform_information.node.evm_version());
|
||||||
is_allowed &= is_allowed_for_platform;
|
is_allowed &= is_allowed_for_platform;
|
||||||
error_map.insert(
|
error_map.insert(
|
||||||
platform_information.platform.platform_identifier().into(),
|
platform_information.platform.platform_identifier().into(),
|
||||||
json!(is_allowed_for_platform),
|
json!(is_allowed_for_platform),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_allowed {
|
if is_allowed {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err((
|
Err(("EVM version is incompatible for the platforms specified", error_map))
|
||||||
"EVM version is incompatible for the platforms specified",
|
}
|
||||||
error_map,
|
}
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if the platforms compilers support the mode that the test is for.
|
/// Checks if the platforms compilers support the mode that the test is for.
|
||||||
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
||||||
let mut error_map = indexmap! {
|
let mut error_map = indexmap! {
|
||||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||||
};
|
};
|
||||||
let mut is_allowed = true;
|
let mut is_allowed = true;
|
||||||
for (_, platform_information) in self.platforms.iter() {
|
for (_, platform_information) in self.platforms.iter() {
|
||||||
let is_allowed_for_platform = platform_information
|
let is_allowed_for_platform = platform_information
|
||||||
.compiler
|
.compiler
|
||||||
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||||
is_allowed &= is_allowed_for_platform;
|
is_allowed &= is_allowed_for_platform;
|
||||||
error_map.insert(
|
error_map.insert(
|
||||||
platform_information.platform.platform_identifier().into(),
|
platform_information.platform.platform_identifier().into(),
|
||||||
json!(is_allowed_for_platform),
|
json!(is_allowed_for_platform),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_allowed {
|
if is_allowed {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err((
|
Err((
|
||||||
"Compilers do not support this mode either for the provided platforms.",
|
"Compilers do not support this mode either for the provided platforms.",
|
||||||
error_map,
|
error_map,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if the test case should be executed or not based on the passed report and whether the
|
|
||||||
/// user has instructed the tool to ignore the already succeeding test cases.
|
|
||||||
fn check_ignore_succeeded(
|
|
||||||
&self,
|
|
||||||
only_execute_failed_tests: Option<&Report>,
|
|
||||||
) -> TestCheckFunctionResult {
|
|
||||||
let Some(report) = only_execute_failed_tests else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
let test_case_status = report
|
|
||||||
.execution_information
|
|
||||||
.get(&(self.metadata_file_path.to_path_buf().into()))
|
|
||||||
.and_then(|obj| obj.case_reports.get(&self.case_idx))
|
|
||||||
.and_then(|obj| obj.mode_execution_reports.get(&self.mode))
|
|
||||||
.and_then(|obj| obj.status.as_ref());
|
|
||||||
|
|
||||||
match test_case_status {
|
|
||||||
Some(TestCaseStatus::Failed { .. }) => Ok(()),
|
|
||||||
Some(TestCaseStatus::Ignored { .. }) => Err((
|
|
||||||
"Ignored since it was ignored in a previous run",
|
|
||||||
indexmap! {},
|
|
||||||
)),
|
|
||||||
Some(TestCaseStatus::Succeeded { .. }) => {
|
|
||||||
Err(("Ignored since it succeeded in a prior run", indexmap! {}))
|
|
||||||
}
|
|
||||||
None => Ok(()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TestPlatformInformation<'a> {
|
pub struct TestPlatformInformation<'a> {
|
||||||
pub platform: &'a dyn Platform,
|
pub platform: &'a dyn Platform,
|
||||||
pub node: &'a dyn EthereumNode,
|
pub node: &'a dyn EthereumNode,
|
||||||
pub compiler: Box<dyn SolidityCompiler>,
|
pub compiler: Box<dyn SolidityCompiler>,
|
||||||
pub reporter: ExecutionSpecificReporter,
|
pub reporter: ExecutionSpecificReporter,
|
||||||
}
|
}
|
||||||
|
|
||||||
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
|
type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>;
|
||||||
|
|||||||
+397
-378
@@ -3,9 +3,12 @@
|
|||||||
//! This crate defines the testing configuration and
|
//! This crate defines the testing configuration and
|
||||||
//! provides a helper utility to execute tests.
|
//! provides a helper utility to execute tests.
|
||||||
|
|
||||||
|
pub mod differential_tests;
|
||||||
|
pub mod helpers;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
thread::{self, JoinHandle},
|
thread::{self, JoinHandle},
|
||||||
};
|
};
|
||||||
|
|
||||||
use alloy::genesis::Genesis;
|
use alloy::genesis::Genesis;
|
||||||
@@ -14,471 +17,487 @@ use revive_dt_common::types::*;
|
|||||||
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_node::{
|
use revive_dt_node::{
|
||||||
Node, node_implementations::geth::GethNode,
|
Node,
|
||||||
node_implementations::lighthouse_geth::LighthouseGethNode,
|
node_implementations::{
|
||||||
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombienetNode,
|
geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
|
||||||
|
zombienet::ZombieNode,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::EthereumNode;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
|
pub use helpers::CachedCompiler;
|
||||||
|
|
||||||
/// A trait that describes the interface for the platforms that are supported by the tool.
|
/// A trait that describes the interface for the platforms that are supported by the tool.
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
pub trait Platform {
|
pub trait Platform {
|
||||||
/// Returns the identifier of this platform. This is a combination of the node and the compiler
|
/// Returns the identifier of this platform. This is a combination of the node and the compiler
|
||||||
/// used.
|
/// used.
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier;
|
fn platform_identifier(&self) -> PlatformIdentifier;
|
||||||
|
|
||||||
/// Returns a full identifier for the platform.
|
/// Returns a full identifier for the platform.
|
||||||
fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) {
|
fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) {
|
||||||
(
|
(self.node_identifier(), self.vm_identifier(), self.compiler_identifier())
|
||||||
self.node_identifier(),
|
}
|
||||||
self.vm_identifier(),
|
|
||||||
self.compiler_identifier(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the identifier of the node used.
|
/// Returns the identifier of the node used.
|
||||||
fn node_identifier(&self) -> NodeIdentifier;
|
fn node_identifier(&self) -> NodeIdentifier;
|
||||||
|
|
||||||
/// Returns the identifier of the vm used.
|
/// Returns the identifier of the vm used.
|
||||||
fn vm_identifier(&self) -> VmIdentifier;
|
fn vm_identifier(&self) -> VmIdentifier;
|
||||||
|
|
||||||
/// Returns the identifier of the compiler used.
|
/// Returns the identifier of the compiler used.
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier;
|
fn compiler_identifier(&self) -> CompilerIdentifier;
|
||||||
|
|
||||||
/// Creates a new node for the platform by spawning a new thread, creating the node object,
|
/// Creates a new node for the platform by spawning a new thread, creating the node object,
|
||||||
/// initializing it, spawning it, and waiting for it to start up.
|
/// initializing it, spawning it, and waiting for it to start up.
|
||||||
fn new_node(
|
fn new_node(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>>;
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>>;
|
||||||
|
|
||||||
/// Creates a new compiler for the provided platform
|
/// Creates a new compiler for the provided platform
|
||||||
fn new_compiler(
|
fn new_compiler(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
version: Option<VersionOrRequirement>,
|
version: Option<VersionOrRequirement>,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
|
||||||
|
|
||||||
/// Exports the genesis/chainspec for the node.
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
pub struct GethEvmSolcPlatform;
|
pub struct GethEvmSolcPlatform;
|
||||||
|
|
||||||
impl Platform for GethEvmSolcPlatform {
|
impl Platform for GethEvmSolcPlatform {
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
PlatformIdentifier::GethEvmSolc
|
PlatformIdentifier::GethEvmSolc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
NodeIdentifier::Geth
|
NodeIdentifier::Geth
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
VmIdentifier::Evm
|
VmIdentifier::Evm
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
CompilerIdentifier::Solc
|
CompilerIdentifier::Solc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_node(
|
fn new_node(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
Ok(thread::spawn(move || {
|
Ok(thread::spawn(move || {
|
||||||
let node = GethNode::new(context);
|
let node = GethNode::new(context);
|
||||||
let node = spawn_node::<GethNode>(node, genesis)?;
|
let node = spawn_node::<GethNode>(node, genesis)?;
|
||||||
Ok(Box::new(node) as Box<_>)
|
Ok(Box::new(node) as Box<_>)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_compiler(
|
fn new_compiler(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
version: Option<VersionOrRequirement>,
|
version: Option<VersionOrRequirement>,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let compiler = Solc::new(context, version).await;
|
let compiler = Solc::new(context, version).await;
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
let node_genesis = GethNode::node_genesis(genesis.clone(), &wallet);
|
|
||||||
serde_json::to_value(node_genesis)
|
|
||||||
.context("Failed to convert node genesis to a serde_value")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
pub struct LighthouseGethEvmSolcPlatform;
|
pub struct LighthouseGethEvmSolcPlatform;
|
||||||
|
|
||||||
impl Platform for LighthouseGethEvmSolcPlatform {
|
impl Platform for LighthouseGethEvmSolcPlatform {
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
PlatformIdentifier::LighthouseGethEvmSolc
|
PlatformIdentifier::LighthouseGethEvmSolc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
NodeIdentifier::LighthouseGeth
|
NodeIdentifier::LighthouseGeth
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
VmIdentifier::Evm
|
VmIdentifier::Evm
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
CompilerIdentifier::Solc
|
CompilerIdentifier::Solc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_node(
|
fn new_node(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
Ok(thread::spawn(move || {
|
Ok(thread::spawn(move || {
|
||||||
let node = LighthouseGethNode::new(context);
|
let node = LighthouseGethNode::new(context);
|
||||||
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
|
||||||
Ok(Box::new(node) as Box<_>)
|
Ok(Box::new(node) as Box<_>)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_compiler(
|
fn new_compiler(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
version: Option<VersionOrRequirement>,
|
version: Option<VersionOrRequirement>,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let compiler = Solc::new(context, version).await;
|
let compiler = Solc::new(context, version).await;
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
|
pub struct KitchensinkPolkavmResolcPlatform;
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
let node_genesis = LighthouseGethNode::node_genesis(genesis.clone(), &wallet);
|
impl Platform for KitchensinkPolkavmResolcPlatform {
|
||||||
serde_json::to_value(node_genesis)
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
.context("Failed to convert node genesis to a serde_value")
|
PlatformIdentifier::KitchensinkPolkavmResolc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Kitchensink
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::PolkaVM
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Resolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context).path.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
kitchensink_path,
|
||||||
|
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
context,
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Resolc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct KitchensinkRevmSolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for KitchensinkRevmSolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::KitchensinkRevmSolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Kitchensink
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::Evm
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Solc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context).path.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
kitchensink_path,
|
||||||
|
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
context,
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Solc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
pub struct ReviveDevNodePolkavmResolcPlatform;
|
pub struct ReviveDevNodePolkavmResolcPlatform;
|
||||||
|
|
||||||
impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
impl Platform for ReviveDevNodePolkavmResolcPlatform {
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
PlatformIdentifier::ReviveDevNodePolkavmResolc
|
PlatformIdentifier::ReviveDevNodePolkavmResolc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
NodeIdentifier::ReviveDevNode
|
NodeIdentifier::ReviveDevNode
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
VmIdentifier::PolkaVM
|
VmIdentifier::PolkaVM
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
CompilerIdentifier::Resolc
|
CompilerIdentifier::Resolc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_node(
|
fn new_node(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
let revive_dev_node_path =
|
||||||
|
AsRef::<ReviveDevNodeConfiguration>::as_ref(&context).path.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
revive_dev_node_path,
|
||||||
|
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
context,
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
fn new_compiler(
|
||||||
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
&self,
|
||||||
|
context: Context,
|
||||||
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
Box::pin(async move {
|
||||||
Ok(thread::spawn(move || {
|
let compiler = Resolc::new(context, version).await;
|
||||||
let node = SubstrateNode::new(
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
revive_dev_node_path,
|
})
|
||||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
}
|
||||||
Some(revive_dev_node_consensus),
|
|
||||||
context,
|
|
||||||
ð_rpc_connection_strings,
|
|
||||||
);
|
|
||||||
let node = spawn_node(node, genesis)?;
|
|
||||||
Ok(Box::new(node) as Box<_>)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
version: Option<VersionOrRequirement>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let compiler = Resolc::new(context, version).await;
|
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.as_path();
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
|
|
||||||
|
|
||||||
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
pub struct ReviveDevNodeRevmSolcPlatform;
|
pub struct ReviveDevNodeRevmSolcPlatform;
|
||||||
|
|
||||||
impl Platform for ReviveDevNodeRevmSolcPlatform {
|
impl Platform for ReviveDevNodeRevmSolcPlatform {
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
PlatformIdentifier::ReviveDevNodeRevmSolc
|
PlatformIdentifier::ReviveDevNodeRevmSolc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
NodeIdentifier::ReviveDevNode
|
NodeIdentifier::ReviveDevNode
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
VmIdentifier::Evm
|
VmIdentifier::Evm
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
CompilerIdentifier::Solc
|
CompilerIdentifier::Solc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_node(
|
fn new_node(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
|
let revive_dev_node_path =
|
||||||
|
AsRef::<ReviveDevNodeConfiguration>::as_ref(&context).path.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
revive_dev_node_path,
|
||||||
|
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
context,
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
|
fn new_compiler(
|
||||||
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
|
&self,
|
||||||
|
context: Context,
|
||||||
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
Box::pin(async move {
|
||||||
Ok(thread::spawn(move || {
|
let compiler = Solc::new(context, version).await;
|
||||||
let node = SubstrateNode::new(
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
revive_dev_node_path,
|
})
|
||||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
}
|
||||||
Some(revive_dev_node_consensus),
|
|
||||||
context,
|
|
||||||
ð_rpc_connection_strings,
|
|
||||||
);
|
|
||||||
let node = spawn_node(node, genesis)?;
|
|
||||||
Ok(Box::new(node) as Box<_>)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
|
||||||
&self,
|
|
||||||
context: Context,
|
|
||||||
version: Option<VersionOrRequirement>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let compiler = Solc::new(context, version).await;
|
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.as_path();
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
|
|
||||||
|
|
||||||
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
pub struct ZombienetPolkavmResolcPlatform;
|
pub struct ZombienetPolkavmResolcPlatform;
|
||||||
|
|
||||||
impl Platform for ZombienetPolkavmResolcPlatform {
|
impl Platform for ZombienetPolkavmResolcPlatform {
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
PlatformIdentifier::ZombienetPolkavmResolc
|
PlatformIdentifier::ZombienetPolkavmResolc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
NodeIdentifier::Zombienet
|
NodeIdentifier::Zombienet
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
VmIdentifier::PolkaVM
|
VmIdentifier::PolkaVM
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
CompilerIdentifier::Resolc
|
CompilerIdentifier::Resolc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_node(
|
fn new_node(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
let polkadot_parachain_path =
|
||||||
.path
|
AsRef::<PolkadotParachainConfiguration>::as_ref(&context).path.clone();
|
||||||
.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
Ok(thread::spawn(move || {
|
||||||
Ok(thread::spawn(move || {
|
let node = ZombieNode::new(polkadot_parachain_path, context);
|
||||||
let node = ZombienetNode::new(polkadot_parachain_path, context);
|
let node = spawn_node(node, genesis)?;
|
||||||
let node = spawn_node(node, genesis)?;
|
Ok(Box::new(node) as Box<_>)
|
||||||
Ok(Box::new(node) as Box<_>)
|
}))
|
||||||
}))
|
}
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
fn new_compiler(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
version: Option<VersionOrRequirement>,
|
version: Option<VersionOrRequirement>,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let compiler = Resolc::new(context, version).await;
|
let compiler = Solc::new(context, version).await;
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.as_path();
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
pub struct ZombienetRevmSolcPlatform;
|
pub struct ZombienetRevmSolcPlatform;
|
||||||
|
|
||||||
impl Platform for ZombienetRevmSolcPlatform {
|
impl Platform for ZombienetRevmSolcPlatform {
|
||||||
fn platform_identifier(&self) -> PlatformIdentifier {
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
PlatformIdentifier::ZombienetRevmSolc
|
PlatformIdentifier::ZombienetRevmSolc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn node_identifier(&self) -> NodeIdentifier {
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
NodeIdentifier::Zombienet
|
NodeIdentifier::Zombienet
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vm_identifier(&self) -> VmIdentifier {
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
VmIdentifier::Evm
|
VmIdentifier::Evm
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
CompilerIdentifier::Solc
|
CompilerIdentifier::Solc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_node(
|
fn new_node(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
let polkadot_parachain_path =
|
||||||
.path
|
AsRef::<PolkadotParachainConfiguration>::as_ref(&context).path.clone();
|
||||||
.clone();
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
let genesis = genesis_configuration.genesis()?.clone();
|
Ok(thread::spawn(move || {
|
||||||
Ok(thread::spawn(move || {
|
let node = ZombieNode::new(polkadot_parachain_path, context);
|
||||||
let node = ZombienetNode::new(polkadot_parachain_path, context);
|
let node = spawn_node(node, genesis)?;
|
||||||
let node = spawn_node(node, genesis)?;
|
Ok(Box::new(node) as Box<_>)
|
||||||
Ok(Box::new(node) as Box<_>)
|
}))
|
||||||
}))
|
}
|
||||||
}
|
|
||||||
|
|
||||||
fn new_compiler(
|
fn new_compiler(
|
||||||
&self,
|
&self,
|
||||||
context: Context,
|
context: Context,
|
||||||
version: Option<VersionOrRequirement>,
|
version: Option<VersionOrRequirement>,
|
||||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let compiler = Solc::new(context, version).await;
|
let compiler = Solc::new(context, version).await;
|
||||||
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
|
||||||
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
|
|
||||||
.path
|
|
||||||
.as_path();
|
|
||||||
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
|
||||||
|
|
||||||
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
impl From<PlatformIdentifier> for Box<dyn Platform> {
|
||||||
fn from(value: PlatformIdentifier) -> Self {
|
fn from(value: PlatformIdentifier) -> Self {
|
||||||
match value {
|
match value {
|
||||||
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
|
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
|
||||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
PlatformIdentifier::LighthouseGethEvmSolc =>
|
||||||
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>,
|
||||||
}
|
PlatformIdentifier::KitchensinkPolkavmResolc =>
|
||||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>,
|
||||||
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
PlatformIdentifier::KitchensinkRevmSolc =>
|
||||||
}
|
Box::new(KitchensinkRevmSolcPlatform) as Box<_>,
|
||||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
PlatformIdentifier::ReviveDevNodePolkavmResolc =>
|
||||||
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
|
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>,
|
||||||
}
|
PlatformIdentifier::ReviveDevNodeRevmSolc =>
|
||||||
PlatformIdentifier::ZombienetPolkavmResolc => {
|
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>,
|
||||||
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
|
PlatformIdentifier::ZombienetPolkavmResolc =>
|
||||||
}
|
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>,
|
||||||
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
|
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<PlatformIdentifier> for &dyn Platform {
|
impl From<PlatformIdentifier> for &dyn Platform {
|
||||||
fn from(value: PlatformIdentifier) -> Self {
|
fn from(value: PlatformIdentifier) -> Self {
|
||||||
match value {
|
match value {
|
||||||
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
|
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
|
||||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
PlatformIdentifier::LighthouseGethEvmSolc =>
|
||||||
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
&LighthouseGethEvmSolcPlatform as &dyn Platform,
|
||||||
}
|
PlatformIdentifier::KitchensinkPolkavmResolc =>
|
||||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
&KitchensinkPolkavmResolcPlatform as &dyn Platform,
|
||||||
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
PlatformIdentifier::KitchensinkRevmSolc =>
|
||||||
}
|
&KitchensinkRevmSolcPlatform as &dyn Platform,
|
||||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
PlatformIdentifier::ReviveDevNodePolkavmResolc =>
|
||||||
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
|
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform,
|
||||||
}
|
PlatformIdentifier::ReviveDevNodeRevmSolc =>
|
||||||
PlatformIdentifier::ZombienetPolkavmResolc => {
|
&ReviveDevNodeRevmSolcPlatform as &dyn Platform,
|
||||||
&ZombienetPolkavmResolcPlatform as &dyn Platform
|
PlatformIdentifier::ZombienetPolkavmResolc =>
|
||||||
}
|
&ZombienetPolkavmResolcPlatform as &dyn Platform,
|
||||||
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
|
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn spawn_node<T: Node + EthereumNode + Send + Sync>(
|
fn spawn_node<T: Node + EthereumNode + Send + Sync>(
|
||||||
mut node: T,
|
mut node: T,
|
||||||
genesis: Genesis,
|
genesis: Genesis,
|
||||||
) -> anyhow::Result<T> {
|
) -> anyhow::Result<T> {
|
||||||
info!(
|
info!(id = node.id(), connection_string = node.connection_string(), "Spawning node");
|
||||||
id = node.id(),
|
node.spawn(genesis).context("Failed to spawn node process")?;
|
||||||
connection_string = node.connection_string(),
|
info!(id = node.id(), connection_string = node.connection_string(), "Spawned node");
|
||||||
"Spawning node"
|
Ok(node)
|
||||||
);
|
|
||||||
node.spawn(genesis)
|
|
||||||
.context("Failed to spawn node process")?;
|
|
||||||
info!(
|
|
||||||
id = node.id(),
|
|
||||||
connection_string = node.connection_string(),
|
|
||||||
"Spawned node"
|
|
||||||
);
|
|
||||||
Ok(node)
|
|
||||||
}
|
}
|
||||||
|
|||||||
+56
-71
@@ -2,11 +2,10 @@ mod differential_benchmarks;
|
|||||||
mod differential_tests;
|
mod differential_tests;
|
||||||
mod helpers;
|
mod helpers;
|
||||||
|
|
||||||
use anyhow::Context as _;
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use revive_dt_report::ReportAggregator;
|
use revive_dt_report::ReportAggregator;
|
||||||
use schemars::schema_for;
|
use schemars::schema_for;
|
||||||
use tracing::{info, level_filters::LevelFilter};
|
use tracing::info;
|
||||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||||
|
|
||||||
use revive_dt_config::Context;
|
use revive_dt_config::Context;
|
||||||
@@ -14,83 +13,69 @@ use revive_dt_core::Platform;
|
|||||||
use revive_dt_format::metadata::Metadata;
|
use revive_dt_format::metadata::Metadata;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
differential_benchmarks::handle_differential_benchmarks,
|
differential_benchmarks::handle_differential_benchmarks,
|
||||||
differential_tests::handle_differential_tests,
|
differential_tests::handle_differential_tests,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn main() -> anyhow::Result<()> {
|
fn main() -> anyhow::Result<()> {
|
||||||
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||||
.lossy(false)
|
.lossy(false)
|
||||||
// Assuming that each line contains 255 characters and that each character is one byte, then
|
// Assuming that each line contains 255 characters and that each character is one byte, then
|
||||||
// this means that our buffer is about 4GBs large.
|
// this means that our buffer is about 4GBs large.
|
||||||
.buffered_lines_limit(0x1000000)
|
.buffered_lines_limit(0x1000000)
|
||||||
.thread_name("buffered writer")
|
.thread_name("buffered writer")
|
||||||
.finish(std::io::stdout());
|
.finish(std::io::stdout());
|
||||||
|
|
||||||
let subscriber = FmtSubscriber::builder()
|
let subscriber = FmtSubscriber::builder()
|
||||||
.with_writer(writer)
|
.with_writer(writer)
|
||||||
.with_thread_ids(false)
|
.with_thread_ids(false)
|
||||||
.with_thread_names(false)
|
.with_thread_names(false)
|
||||||
.with_env_filter(
|
.with_env_filter(EnvFilter::from_default_env())
|
||||||
EnvFilter::builder()
|
.with_ansi(false)
|
||||||
.with_default_directive(LevelFilter::OFF.into())
|
.pretty()
|
||||||
.from_env_lossy(),
|
.finish();
|
||||||
)
|
tracing::subscriber::set_global_default(subscriber)?;
|
||||||
.with_ansi(false)
|
info!("Differential testing tool is starting");
|
||||||
.pretty()
|
|
||||||
.finish();
|
|
||||||
tracing::subscriber::set_global_default(subscriber)?;
|
|
||||||
info!("Differential testing tool is starting");
|
|
||||||
|
|
||||||
let mut context = Context::try_parse()?;
|
let context = Context::try_parse()?;
|
||||||
context.update_for_profile();
|
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
||||||
|
|
||||||
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
|
match context {
|
||||||
|
Context::Test(context) => tokio::runtime::Builder::new_multi_thread()
|
||||||
|
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.expect("Failed building the Runtime")
|
||||||
|
.block_on(async move {
|
||||||
|
let differential_tests_handling_task =
|
||||||
|
handle_differential_tests(*context, reporter);
|
||||||
|
|
||||||
match context {
|
futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
|
||||||
Context::Test(context) => tokio::runtime::Builder::new_multi_thread()
|
.await?;
|
||||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.expect("Failed building the Runtime")
|
|
||||||
.block_on(async move {
|
|
||||||
let differential_tests_handling_task =
|
|
||||||
handle_differential_tests(*context, reporter);
|
|
||||||
|
|
||||||
futures::future::try_join(differential_tests_handling_task, report_aggregator_task)
|
Ok(())
|
||||||
.await?;
|
}),
|
||||||
|
Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread()
|
||||||
|
.worker_threads(context.concurrency_configuration.number_of_threads)
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.expect("Failed building the Runtime")
|
||||||
|
.block_on(async move {
|
||||||
|
let differential_benchmarks_handling_task =
|
||||||
|
handle_differential_benchmarks(*context, reporter);
|
||||||
|
|
||||||
Ok(())
|
futures::future::try_join(
|
||||||
}),
|
differential_benchmarks_handling_task,
|
||||||
Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread()
|
report_aggregator_task,
|
||||||
.worker_threads(context.concurrency_configuration.number_of_threads)
|
)
|
||||||
.enable_all()
|
.await?;
|
||||||
.build()
|
|
||||||
.expect("Failed building the Runtime")
|
|
||||||
.block_on(async move {
|
|
||||||
let differential_benchmarks_handling_task =
|
|
||||||
handle_differential_benchmarks(*context, reporter);
|
|
||||||
|
|
||||||
futures::future::try_join(
|
Ok(())
|
||||||
differential_benchmarks_handling_task,
|
}),
|
||||||
report_aggregator_task,
|
Context::ExportJsonSchema => {
|
||||||
)
|
let schema = schema_for!(Metadata);
|
||||||
.await?;
|
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||||
|
Ok(())
|
||||||
Ok(())
|
},
|
||||||
}),
|
}
|
||||||
Context::ExportGenesis(ref export_genesis_context) => {
|
|
||||||
let platform = Into::<&dyn Platform>::into(export_genesis_context.platform);
|
|
||||||
let genesis = platform.export_genesis(context)?;
|
|
||||||
let genesis_json = serde_json::to_string_pretty(&genesis)
|
|
||||||
.context("Failed to serialize the genesis to JSON")?;
|
|
||||||
println!("{genesis_json}");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Context::ExportJsonSchema => {
|
|
||||||
let schema = schema_for!(Metadata);
|
|
||||||
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,12 +16,12 @@ revive-common = { workspace = true }
|
|||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
futures = { workspace = true }
|
futures = { workspace = true }
|
||||||
|
regex = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
schemars = { workspace = true }
|
schemars = { workspace = true }
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true, features = ["derive"] }
|
serde = { workspace = true, features = ["derive"] }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
itertools = { workspace = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
|
|||||||
+83
-107
@@ -1,132 +1,108 @@
|
|||||||
use alloy::primitives::Address;
|
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use revive_dt_common::{
|
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
|
||||||
macros::define_wrapper_type,
|
|
||||||
types::{Mode, ParsedMode},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::steps::*;
|
use crate::{mode::ParsedMode, steps::*};
|
||||||
|
|
||||||
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
|
||||||
pub struct Case {
|
pub struct Case {
|
||||||
/// An optional name of the test case.
|
/// An optional name of the test case.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub name: Option<String>,
|
pub name: Option<String>,
|
||||||
|
|
||||||
/// An optional comment on the case which has no impact on the execution in any way.
|
/// An optional comment on the case which has no impact on the execution in any way.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
|
|
||||||
/// This represents a mode that has been parsed from test metadata.
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
///
|
///
|
||||||
/// Mode strings can take the following form (in pseudo-regex):
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
///
|
///
|
||||||
/// ```text
|
/// ```text
|
||||||
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
/// ```
|
/// ```
|
||||||
///
|
///
|
||||||
/// If this is provided then it takes higher priority than the modes specified in the metadata
|
/// If this is provided then it takes higher priority than the modes specified in the metadata
|
||||||
/// file.
|
/// file.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub modes: Option<Vec<ParsedMode>>,
|
pub modes: Option<Vec<ParsedMode>>,
|
||||||
|
|
||||||
/// The set of steps to run as part of this test case.
|
/// The set of steps to run as part of this test case.
|
||||||
#[serde(rename = "inputs")]
|
#[serde(rename = "inputs")]
|
||||||
pub steps: Vec<Step>,
|
pub steps: Vec<Step>,
|
||||||
|
|
||||||
/// An optional name of the group of tests that this test belongs to.
|
/// An optional name of the group of tests that this test belongs to.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub group: Option<String>,
|
pub group: Option<String>,
|
||||||
|
|
||||||
/// An optional set of expectations and assertions to make about the transaction after it ran.
|
/// An optional set of expectations and assertions to make about the transaction after it ran.
|
||||||
///
|
///
|
||||||
/// If this is not specified then the only assertion that will be ran is that the transaction
|
/// If this is not specified then the only assertion that will be ran is that the transaction
|
||||||
/// was successful.
|
/// was successful.
|
||||||
///
|
///
|
||||||
/// This expectation that's on the case itself will be attached to the final step of the case.
|
/// This expectation that's on the case itself will be attached to the final step of the case.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub expected: Option<Expected>,
|
pub expected: Option<Expected>,
|
||||||
|
|
||||||
/// An optional boolean which defines if the case as a whole should be ignored. If null then the
|
/// An optional boolean which defines if the case as a whole should be ignored. If null then
|
||||||
/// case will not be ignored.
|
/// the case will not be ignored.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub ignore: Option<bool>,
|
pub ignore: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Case {
|
impl Case {
|
||||||
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
|
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
|
||||||
let steps_len = self.steps.len();
|
let steps_len = self.steps.len();
|
||||||
self.steps
|
self.steps.clone().into_iter().enumerate().map(move |(idx, mut step)| {
|
||||||
.clone()
|
let Step::FunctionCall(ref mut input) = step else {
|
||||||
.into_iter()
|
return step;
|
||||||
.enumerate()
|
};
|
||||||
.map(move |(idx, mut step)| {
|
|
||||||
let Step::FunctionCall(ref mut input) = step else {
|
|
||||||
return step;
|
|
||||||
};
|
|
||||||
|
|
||||||
if idx + 1 == steps_len {
|
if idx + 1 == steps_len {
|
||||||
if input.expected.is_none() {
|
if input.expected.is_none() {
|
||||||
input.expected = self.expected.clone();
|
input.expected = self.expected.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: What does it mean for us to have an `expected` field on the case itself
|
// TODO: What does it mean for us to have an `expected` field on the case itself
|
||||||
// but the final input also has an expected field that doesn't match the one on
|
// but the final input also has an expected field that doesn't match the one on
|
||||||
// the case? What are we supposed to do with that final expected field on the
|
// the case? What are we supposed to do with that final expected field on the
|
||||||
// case?
|
// case?
|
||||||
|
|
||||||
step
|
step
|
||||||
} else {
|
} else {
|
||||||
step
|
step
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn steps_iterator_for_benchmarks(
|
pub fn steps_iterator_for_benchmarks(
|
||||||
&self,
|
&self,
|
||||||
default_repeat_count: usize,
|
default_repeat_count: usize,
|
||||||
) -> Box<dyn Iterator<Item = Step> + '_> {
|
) -> Box<dyn Iterator<Item = Step> + '_> {
|
||||||
let contains_repeat = self
|
let contains_repeat = self.steps_iterator().any(|step| matches!(&step, Step::Repeat(..)));
|
||||||
.steps_iterator()
|
if contains_repeat {
|
||||||
.any(|step| matches!(&step, Step::Repeat(..)));
|
Box::new(self.steps_iterator()) as Box<_>
|
||||||
if contains_repeat {
|
} else {
|
||||||
Box::new(self.steps_iterator()) as Box<_>
|
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
|
||||||
} else {
|
comment: None,
|
||||||
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
|
repeat: default_repeat_count,
|
||||||
comment: None,
|
steps: self.steps_iterator().collect(),
|
||||||
repeat: default_repeat_count,
|
})))) as Box<_>
|
||||||
steps: self.steps_iterator().collect(),
|
}
|
||||||
})))) as Box<_>
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn solc_modes(&self) -> Vec<Mode> {
|
pub fn solc_modes(&self) -> Vec<Mode> {
|
||||||
match &self.modes {
|
match &self.modes {
|
||||||
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
|
||||||
None => Mode::all().cloned().collect(),
|
None => Mode::all().cloned().collect(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deployer_address(&self) -> Address {
|
|
||||||
self.steps
|
|
||||||
.iter()
|
|
||||||
.filter_map(|step| match step {
|
|
||||||
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
|
||||||
Step::BalanceAssertion(..) => None,
|
|
||||||
Step::StorageEmptyAssertion(..) => None,
|
|
||||||
Step::Repeat(..) => None,
|
|
||||||
Step::AllocateAccount(..) => None,
|
|
||||||
})
|
|
||||||
.next()
|
|
||||||
.unwrap_or(FunctionCallStep::default_caller_address())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
/// A wrapper type for the index of test cases found in metadata file.
|
/// A wrapper type for the index of test cases found in metadata file.
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct CaseIdx(usize) impl Display, FromStr;
|
pub struct CaseIdx(usize) impl Display, FromStr;
|
||||||
);
|
);
|
||||||
|
|||||||
+106
-183
@@ -1,200 +1,123 @@
|
|||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
fs::File,
|
||||||
collections::HashMap,
|
path::{Path, PathBuf},
|
||||||
path::{Path, PathBuf},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use itertools::Itertools;
|
use revive_dt_common::iterators::FilesWithExtensionIterator;
|
||||||
use revive_dt_common::{
|
use serde::{Deserialize, Serialize};
|
||||||
iterators::{EitherIter, FilesWithExtensionIterator},
|
use tracing::{debug, info};
|
||||||
types::{Mode, ParsedMode, ParsedTestSpecifier},
|
|
||||||
};
|
|
||||||
use tracing::{debug, warn};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::metadata::{Metadata, MetadataFile};
|
||||||
case::{Case, CaseIdx},
|
use anyhow::Context as _;
|
||||||
metadata::{Metadata, MetadataFile},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
pub struct Corpus {
|
#[serde(untagged)]
|
||||||
test_specifiers: HashMap<ParsedTestSpecifier, Vec<PathBuf>>,
|
pub enum Corpus {
|
||||||
metadata_files: HashMap<PathBuf, MetadataFile>,
|
SinglePath { name: String, path: PathBuf },
|
||||||
|
MultiplePaths { name: String, paths: Vec<PathBuf> },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Corpus {
|
impl Corpus {
|
||||||
pub fn new() -> Self {
|
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
|
||||||
Default::default()
|
let mut corpus = File::open(file_path.as_ref())
|
||||||
}
|
.map_err(anyhow::Error::from)
|
||||||
|
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Failed to open and deserialize corpus file at {}",
|
||||||
|
file_path.as_ref().display()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
pub fn with_test_specifier(
|
let corpus_directory = file_path
|
||||||
mut self,
|
.as_ref()
|
||||||
test_specifier: ParsedTestSpecifier,
|
.canonicalize()
|
||||||
) -> anyhow::Result<Self> {
|
.context("Failed to canonicalize the path to the corpus file")?
|
||||||
match &test_specifier {
|
.parent()
|
||||||
ParsedTestSpecifier::FileOrDirectory {
|
.context("Corpus file has no parent")?
|
||||||
metadata_or_directory_file_path: metadata_file_path,
|
.to_path_buf();
|
||||||
}
|
|
||||||
| ParsedTestSpecifier::Case {
|
|
||||||
metadata_file_path, ..
|
|
||||||
}
|
|
||||||
| ParsedTestSpecifier::CaseWithMode {
|
|
||||||
metadata_file_path, ..
|
|
||||||
} => {
|
|
||||||
let metadata_files = enumerate_metadata_files(metadata_file_path);
|
|
||||||
self.test_specifiers.insert(
|
|
||||||
test_specifier,
|
|
||||||
metadata_files
|
|
||||||
.iter()
|
|
||||||
.map(|metadata_file| metadata_file.metadata_file_path.clone())
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
for metadata_file in metadata_files.into_iter() {
|
|
||||||
self.metadata_files
|
|
||||||
.insert(metadata_file.metadata_file_path.clone(), metadata_file);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(self)
|
for path in corpus.paths_iter_mut() {
|
||||||
}
|
*path = corpus_directory.join(path.as_path())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn cases_iterator(
|
Ok(corpus)
|
||||||
&self,
|
}
|
||||||
) -> impl Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_ {
|
|
||||||
let mut iterator = Box::new(std::iter::empty())
|
|
||||||
as Box<dyn Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_>;
|
|
||||||
|
|
||||||
for (test_specifier, metadata_file_paths) in self.test_specifiers.iter() {
|
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
|
||||||
for metadata_file_path in metadata_file_paths {
|
let mut tests = self
|
||||||
let metadata_file = self
|
.paths_iter()
|
||||||
.metadata_files
|
.flat_map(|root_path| {
|
||||||
.get(metadata_file_path)
|
if !root_path.is_dir() {
|
||||||
.expect("Must succeed");
|
Box::new(std::iter::once(root_path.to_path_buf()))
|
||||||
|
as Box<dyn Iterator<Item = _>>
|
||||||
|
} else {
|
||||||
|
Box::new(
|
||||||
|
FilesWithExtensionIterator::new(root_path)
|
||||||
|
.with_use_cached_fs(true)
|
||||||
|
.with_allowed_extension("sol")
|
||||||
|
.with_allowed_extension("json"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
||||||
|
})
|
||||||
|
.filter_map(|(root_path, metadata_file_path)| {
|
||||||
|
Metadata::try_from_file(&metadata_file_path)
|
||||||
|
.or_else(|| {
|
||||||
|
debug!(
|
||||||
|
discovered_from = %root_path.display(),
|
||||||
|
metadata_file_path = %metadata_file_path.display(),
|
||||||
|
"Skipping file since it doesn't contain valid metadata"
|
||||||
|
);
|
||||||
|
None
|
||||||
|
})
|
||||||
|
.map(|metadata| MetadataFile {
|
||||||
|
metadata_file_path,
|
||||||
|
corpus_file_path: root_path.to_path_buf(),
|
||||||
|
content: metadata,
|
||||||
|
})
|
||||||
|
.inspect(|metadata_file| {
|
||||||
|
debug!(
|
||||||
|
metadata_file_path = %metadata_file.relative_path().display(),
|
||||||
|
"Loaded metadata file"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
||||||
|
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
||||||
|
info!(len = tests.len(), corpus_name = self.name(), "Found tests in Corpus");
|
||||||
|
tests
|
||||||
|
}
|
||||||
|
|
||||||
match test_specifier {
|
pub fn name(&self) -> &str {
|
||||||
ParsedTestSpecifier::FileOrDirectory { .. } => {
|
match self {
|
||||||
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
|
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
|
||||||
let case_idx = CaseIdx::new(case_idx);
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
|
||||||
let modes = match modes {
|
match self {
|
||||||
Some(modes) => EitherIter::A(
|
Corpus::SinglePath { path, .. } =>
|
||||||
ParsedMode::many_to_modes(modes.iter())
|
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>,
|
||||||
.map(Cow::<'static, _>::Owned),
|
Corpus::MultiplePaths { paths, .. } =>
|
||||||
),
|
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>,
|
||||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
}
|
||||||
};
|
}
|
||||||
|
|
||||||
iterator = Box::new(
|
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
|
||||||
iterator.chain(
|
match self {
|
||||||
modes
|
Corpus::SinglePath { path, .. } =>
|
||||||
.into_iter()
|
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>,
|
||||||
.map(move |mode| (metadata_file, case_idx, case, mode)),
|
Corpus::MultiplePaths { paths, .. } =>
|
||||||
),
|
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>,
|
||||||
)
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
ParsedTestSpecifier::Case { case_idx, .. } => {
|
|
||||||
let Some(case) = metadata_file.cases.get(*case_idx) else {
|
|
||||||
warn!(
|
|
||||||
test_specifier = %test_specifier,
|
|
||||||
metadata_file_path = %metadata_file_path.display(),
|
|
||||||
case_idx = case_idx,
|
|
||||||
case_count = metadata_file.cases.len(),
|
|
||||||
"Specified case not found in metadata file"
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let case_idx = CaseIdx::new(*case_idx);
|
|
||||||
|
|
||||||
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
|
pub fn path_count(&self) -> usize {
|
||||||
let modes = match modes {
|
match self {
|
||||||
Some(modes) => EitherIter::A(
|
Corpus::SinglePath { .. } => 1,
|
||||||
ParsedMode::many_to_modes(modes.iter())
|
Corpus::MultiplePaths { paths, .. } => paths.len(),
|
||||||
.map(Cow::<'static, Mode>::Owned),
|
}
|
||||||
),
|
}
|
||||||
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
|
|
||||||
};
|
|
||||||
|
|
||||||
iterator = Box::new(
|
|
||||||
iterator.chain(
|
|
||||||
modes
|
|
||||||
.into_iter()
|
|
||||||
.map(move |mode| (metadata_file, case_idx, case, mode)),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
ParsedTestSpecifier::CaseWithMode { case_idx, mode, .. } => {
|
|
||||||
let Some(case) = metadata_file.cases.get(*case_idx) else {
|
|
||||||
warn!(
|
|
||||||
test_specifier = %test_specifier,
|
|
||||||
metadata_file_path = %metadata_file_path.display(),
|
|
||||||
case_idx = case_idx,
|
|
||||||
case_count = metadata_file.cases.len(),
|
|
||||||
"Specified case not found in metadata file"
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let case_idx = CaseIdx::new(*case_idx);
|
|
||||||
|
|
||||||
let mode = Cow::Borrowed(mode);
|
|
||||||
iterator = Box::new(iterator.chain(std::iter::once((
|
|
||||||
metadata_file,
|
|
||||||
case_idx,
|
|
||||||
case,
|
|
||||||
mode,
|
|
||||||
))))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
iterator.unique_by(|item| (&item.0.metadata_file_path, item.1, item.3.clone()))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn metadata_file_count(&self) -> usize {
|
|
||||||
self.metadata_files.len()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn enumerate_metadata_files(path: impl AsRef<Path>) -> Vec<MetadataFile> {
|
|
||||||
let root_path = path.as_ref();
|
|
||||||
let mut tests = if !root_path.is_dir() {
|
|
||||||
Box::new(std::iter::once(root_path.to_path_buf())) as Box<dyn Iterator<Item = _>>
|
|
||||||
} else {
|
|
||||||
Box::new(
|
|
||||||
FilesWithExtensionIterator::new(root_path)
|
|
||||||
.with_use_cached_fs(true)
|
|
||||||
.with_allowed_extension("sol")
|
|
||||||
.with_allowed_extension("json"),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
.map(move |metadata_file_path| (root_path, metadata_file_path))
|
|
||||||
.filter_map(|(root_path, metadata_file_path)| {
|
|
||||||
Metadata::try_from_file(&metadata_file_path)
|
|
||||||
.or_else(|| {
|
|
||||||
debug!(
|
|
||||||
discovered_from = %root_path.display(),
|
|
||||||
metadata_file_path = %metadata_file_path.display(),
|
|
||||||
"Skipping file since it doesn't contain valid metadata"
|
|
||||||
);
|
|
||||||
None
|
|
||||||
})
|
|
||||||
.map(|metadata| MetadataFile {
|
|
||||||
metadata_file_path,
|
|
||||||
corpus_file_path: root_path.to_path_buf(),
|
|
||||||
content: metadata,
|
|
||||||
})
|
|
||||||
.inspect(|metadata_file| {
|
|
||||||
debug!(
|
|
||||||
metadata_file_path = %metadata_file.relative_path().display(),
|
|
||||||
"Loaded metadata file"
|
|
||||||
)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
|
|
||||||
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
|
|
||||||
tests
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,5 +3,6 @@
|
|||||||
pub mod case;
|
pub mod case;
|
||||||
pub mod corpus;
|
pub mod corpus;
|
||||||
pub mod metadata;
|
pub mod metadata;
|
||||||
|
pub mod mode;
|
||||||
pub mod steps;
|
pub mod steps;
|
||||||
pub mod traits;
|
pub mod traits;
|
||||||
|
|||||||
+415
-459
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,242 @@
|
|||||||
|
use anyhow::Context as _;
|
||||||
|
use regex::Regex;
|
||||||
|
use revive_dt_common::{
|
||||||
|
iterators::EitherIter,
|
||||||
|
types::{Mode, ModeOptimizerSetting, ModePipeline},
|
||||||
|
};
|
||||||
|
use schemars::JsonSchema;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{collections::HashSet, fmt::Display, str::FromStr, sync::LazyLock};
|
||||||
|
|
||||||
|
/// This represents a mode that has been parsed from test metadata.
|
||||||
|
///
|
||||||
|
/// Mode strings can take the following form (in pseudo-regex):
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
|
/// [YEILV][+-]? (M[0123sz])? <semver>?
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
|
||||||
|
#[serde(try_from = "String", into = "String")]
|
||||||
|
pub struct ParsedMode {
|
||||||
|
pub pipeline: Option<ModePipeline>,
|
||||||
|
pub optimize_flag: Option<bool>,
|
||||||
|
pub optimize_setting: Option<ModeOptimizerSetting>,
|
||||||
|
pub version: Option<semver::VersionReq>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for ParsedMode {
|
||||||
|
type Err = anyhow::Error;
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||||
|
Regex::new(r"(?x)
|
||||||
|
^
|
||||||
|
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
|
||||||
|
\s*
|
||||||
|
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
|
||||||
|
\s*
|
||||||
|
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
|
||||||
|
$
|
||||||
|
").unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
|
let Some(caps) = REGEX.captures(s) else {
|
||||||
|
anyhow::bail!("Cannot parse mode '{s}' from string");
|
||||||
|
};
|
||||||
|
|
||||||
|
let pipeline = match caps.name("pipeline") {
|
||||||
|
Some(m) => Some(
|
||||||
|
ModePipeline::from_str(m.as_str())
|
||||||
|
.context("Failed to parse mode pipeline from string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
|
||||||
|
|
||||||
|
let optimize_setting = match caps.name("optimize_setting") {
|
||||||
|
Some(m) => Some(
|
||||||
|
ModeOptimizerSetting::from_str(m.as_str())
|
||||||
|
.context("Failed to parse optimizer setting from string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let version = match caps.name("version") {
|
||||||
|
Some(m) => Some(
|
||||||
|
semver::VersionReq::parse(m.as_str())
|
||||||
|
.map_err(|e| {
|
||||||
|
anyhow::anyhow!(
|
||||||
|
"Cannot parse the version requirement '{}': {e}",
|
||||||
|
m.as_str()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.context("Failed to parse semver requirement from mode string")?,
|
||||||
|
),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ParsedMode { pipeline, optimize_flag, optimize_setting, version })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ParsedMode {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let mut has_written = false;
|
||||||
|
|
||||||
|
if let Some(pipeline) = self.pipeline {
|
||||||
|
pipeline.fmt(f)?;
|
||||||
|
if let Some(optimize_flag) = self.optimize_flag {
|
||||||
|
f.write_str(if optimize_flag { "+" } else { "-" })?;
|
||||||
|
}
|
||||||
|
has_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(optimize_setting) = self.optimize_setting {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
optimize_setting.fmt(f)?;
|
||||||
|
has_written = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(version) = &self.version {
|
||||||
|
if has_written {
|
||||||
|
f.write_str(" ")?;
|
||||||
|
}
|
||||||
|
version.fmt(f)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ParsedMode> for String {
|
||||||
|
fn from(parsed_mode: ParsedMode) -> Self {
|
||||||
|
parsed_mode.to_string()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<String> for ParsedMode {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||||
|
ParsedMode::from_str(&value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ParsedMode {
|
||||||
|
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
|
||||||
|
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
|
||||||
|
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(ModePipeline::test_cases()),
|
||||||
|
|p| EitherIter::B(std::iter::once(*p)),
|
||||||
|
);
|
||||||
|
|
||||||
|
let optimize_flag_setting = self
|
||||||
|
.optimize_flag
|
||||||
|
.map(|flag| if flag { ModeOptimizerSetting::M3 } else { ModeOptimizerSetting::M0 });
|
||||||
|
|
||||||
|
let optimize_flag_iter = match optimize_flag_setting {
|
||||||
|
Some(setting) => EitherIter::A(std::iter::once(setting)),
|
||||||
|
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|
||||||
|
|| EitherIter::A(optimize_flag_iter),
|
||||||
|
|s| EitherIter::B(std::iter::once(*s)),
|
||||||
|
);
|
||||||
|
|
||||||
|
pipeline_iter.flat_map(move |pipeline| {
|
||||||
|
optimize_settings_iter.clone().map(move |optimize_setting| Mode {
|
||||||
|
pipeline,
|
||||||
|
optimize_setting,
|
||||||
|
version: self.version.clone(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
|
||||||
|
/// This avoids any duplicate entries.
|
||||||
|
pub fn many_to_modes<'a>(
|
||||||
|
parsed: impl Iterator<Item = &'a ParsedMode>,
|
||||||
|
) -> impl Iterator<Item = Mode> {
|
||||||
|
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
|
||||||
|
modes.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_from_str() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", "Mz"),
|
||||||
|
("Y", "Y"),
|
||||||
|
("Y+", "Y+"),
|
||||||
|
("Y-", "Y-"),
|
||||||
|
("E", "E"),
|
||||||
|
("E+", "E+"),
|
||||||
|
("E-", "E-"),
|
||||||
|
("Y M0", "Y M0"),
|
||||||
|
("Y M1", "Y M1"),
|
||||||
|
("Y M2", "Y M2"),
|
||||||
|
("Y M3", "Y M3"),
|
||||||
|
("Y Ms", "Y Ms"),
|
||||||
|
("Y Mz", "Y Mz"),
|
||||||
|
("E M0", "E M0"),
|
||||||
|
("E M1", "E M1"),
|
||||||
|
("E M2", "E M2"),
|
||||||
|
("E M3", "E M3"),
|
||||||
|
("E Ms", "E Ms"),
|
||||||
|
("E Mz", "E Mz"),
|
||||||
|
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
|
||||||
|
("Y 0.8.0", "Y ^0.8.0"),
|
||||||
|
("E+ 0.8.0", "E+ ^0.8.0"),
|
||||||
|
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
|
||||||
|
("E Mz <0.7.0", "E Mz <0.7.0"),
|
||||||
|
// We can parse +- _and_ M1/M2 but the latter takes priority.
|
||||||
|
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
|
||||||
|
("E- M2 0.7.0", "E- M2 ^0.7.0"),
|
||||||
|
// We don't see this in the wild but it is parsed.
|
||||||
|
("<=0.8", "<=0.8"),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
assert_eq!(
|
||||||
|
expected,
|
||||||
|
parsed.to_string(),
|
||||||
|
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_parsed_mode_to_test_modes() {
|
||||||
|
let strings = vec![
|
||||||
|
("Mz", vec!["Y Mz", "E Mz"]),
|
||||||
|
("Y", vec!["Y M0", "Y M3"]),
|
||||||
|
("E", vec!["E M0", "E M3"]),
|
||||||
|
("Y+", vec!["Y M3"]),
|
||||||
|
("Y-", vec!["Y M0"]),
|
||||||
|
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
|
||||||
|
("<=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"]),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (actual, expected) in strings {
|
||||||
|
let parsed = ParsedMode::from_str(actual)
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
|
||||||
|
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
|
||||||
|
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
expected_set, actual_set,
|
||||||
|
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
+989
-1067
File diff suppressed because it is too large
Load Diff
+135
-136
@@ -1,10 +1,10 @@
|
|||||||
use std::collections::HashMap;
|
use std::{collections::HashMap, pin::Pin};
|
||||||
use std::pin::Pin;
|
|
||||||
|
|
||||||
use alloy::eips::BlockNumberOrTag;
|
use alloy::{
|
||||||
use alloy::json_abi::JsonAbi;
|
eips::BlockNumberOrTag,
|
||||||
use alloy::primitives::TxHash;
|
json_abi::JsonAbi,
|
||||||
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256};
|
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, U256},
|
||||||
|
};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
use crate::metadata::{ContractIdent, ContractInstance};
|
use crate::metadata::{ContractIdent, ContractInstance};
|
||||||
@@ -12,165 +12,164 @@ use crate::metadata::{ContractIdent, ContractInstance};
|
|||||||
/// A trait of the interface are required to implement to be used by the resolution logic that this
|
/// A trait of the interface are required to implement to be used by the resolution logic that this
|
||||||
/// crate implements to go from string calldata and into the bytes calldata.
|
/// crate implements to go from string calldata and into the bytes calldata.
|
||||||
pub trait ResolverApi {
|
pub trait ResolverApi {
|
||||||
/// Returns the ID of the chain that the node is on.
|
/// Returns the ID of the chain that the node is on.
|
||||||
fn chain_id(&self) -> Pin<Box<dyn Future<Output = Result<ChainId>> + '_>>;
|
fn chain_id(&self) -> Pin<Box<dyn Future<Output = Result<ChainId>> + '_>>;
|
||||||
|
|
||||||
/// Returns the gas price for the specified transaction.
|
/// Returns the gas price for the specified transaction.
|
||||||
fn transaction_gas_price(
|
fn transaction_gas_price(
|
||||||
&self,
|
&self,
|
||||||
tx_hash: TxHash,
|
tx_hash: TxHash,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||||
|
|
||||||
// TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit
|
// TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit
|
||||||
// when we implement the changes to the gas we need to adjust this to be a u64.
|
// when we implement the changes to the gas we need to adjust this to be a u64.
|
||||||
/// Returns the gas limit of the specified block.
|
/// Returns the gas limit of the specified block.
|
||||||
fn block_gas_limit(
|
fn block_gas_limit(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<u128>> + '_>>;
|
||||||
|
|
||||||
/// Returns the coinbase of the specified block.
|
/// Returns the coinbase of the specified block.
|
||||||
fn block_coinbase(
|
fn block_coinbase(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<Address>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<Address>> + '_>>;
|
||||||
|
|
||||||
/// Returns the difficulty of the specified block.
|
/// Returns the difficulty of the specified block.
|
||||||
fn block_difficulty(
|
fn block_difficulty(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||||
|
|
||||||
/// Returns the base fee of the specified block.
|
/// Returns the base fee of the specified block.
|
||||||
fn block_base_fee(
|
fn block_base_fee(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<u64>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<u64>> + '_>>;
|
||||||
|
|
||||||
/// Returns the hash of the specified block.
|
/// Returns the hash of the specified block.
|
||||||
fn block_hash(
|
fn block_hash(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<BlockHash>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<BlockHash>> + '_>>;
|
||||||
|
|
||||||
/// Returns the timestamp of the specified block,
|
/// Returns the timestamp of the specified block,
|
||||||
fn block_timestamp(
|
fn block_timestamp(
|
||||||
&self,
|
&self,
|
||||||
number: BlockNumberOrTag,
|
number: BlockNumberOrTag,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<BlockTimestamp>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<BlockTimestamp>> + '_>>;
|
||||||
|
|
||||||
/// Returns the number of the last block.
|
/// Returns the number of the last block.
|
||||||
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = Result<BlockNumber>> + '_>>;
|
fn last_block_number(&self) -> Pin<Box<dyn Future<Output = Result<BlockNumber>> + '_>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Default)]
|
#[derive(Clone, Copy, Debug, Default)]
|
||||||
/// Contextual information required by the code that's performing the resolution.
|
/// Contextual information required by the code that's performing the resolution.
|
||||||
pub struct ResolutionContext<'a> {
|
pub struct ResolutionContext<'a> {
|
||||||
/// When provided the contracts provided here will be used for resolutions.
|
/// When provided the contracts provided here will be used for resolutions.
|
||||||
deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
deployed_contracts: Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
|
|
||||||
/// When provided the variables in here will be used for performing resolutions.
|
/// When provided the variables in here will be used for performing resolutions.
|
||||||
variables: Option<&'a HashMap<String, U256>>,
|
variables: Option<&'a HashMap<String, U256>>,
|
||||||
|
|
||||||
/// When provided this block number will be treated as the tip of the chain.
|
/// When provided this block number will be treated as the tip of the chain.
|
||||||
block_number: Option<&'a BlockNumber>,
|
block_number: Option<&'a BlockNumber>,
|
||||||
|
|
||||||
/// When provided the resolver will use this transaction hash for all of its resolutions.
|
/// When provided the resolver will use this transaction hash for all of its resolutions.
|
||||||
transaction_hash: Option<&'a TxHash>,
|
transaction_hash: Option<&'a TxHash>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> ResolutionContext<'a> {
|
impl<'a> ResolutionContext<'a> {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Default::default()
|
Default::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_from_parts(
|
pub fn new_from_parts(
|
||||||
deployed_contracts: impl Into<
|
deployed_contracts: impl Into<
|
||||||
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
>,
|
>,
|
||||||
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||||
block_number: impl Into<Option<&'a BlockNumber>>,
|
block_number: impl Into<Option<&'a BlockNumber>>,
|
||||||
transaction_hash: impl Into<Option<&'a TxHash>>,
|
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
deployed_contracts: deployed_contracts.into(),
|
deployed_contracts: deployed_contracts.into(),
|
||||||
variables: variables.into(),
|
variables: variables.into(),
|
||||||
block_number: block_number.into(),
|
block_number: block_number.into(),
|
||||||
transaction_hash: transaction_hash.into(),
|
transaction_hash: transaction_hash.into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_deployed_contracts(
|
pub fn with_deployed_contracts(
|
||||||
mut self,
|
mut self,
|
||||||
deployed_contracts: impl Into<
|
deployed_contracts: impl Into<
|
||||||
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
Option<&'a HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||||
>,
|
>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
self.deployed_contracts = deployed_contracts.into();
|
self.deployed_contracts = deployed_contracts.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_variables(
|
pub fn with_variables(
|
||||||
mut self,
|
mut self,
|
||||||
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
variables: impl Into<Option<&'a HashMap<String, U256>>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
self.variables = variables.into();
|
self.variables = variables.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self {
|
pub fn with_block_number(mut self, block_number: impl Into<Option<&'a BlockNumber>>) -> Self {
|
||||||
self.block_number = block_number.into();
|
self.block_number = block_number.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_transaction_hash(
|
pub fn with_transaction_hash(
|
||||||
mut self,
|
mut self,
|
||||||
transaction_hash: impl Into<Option<&'a TxHash>>,
|
transaction_hash: impl Into<Option<&'a TxHash>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
self.transaction_hash = transaction_hash.into();
|
self.transaction_hash = transaction_hash.into();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag {
|
pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag {
|
||||||
match self.block_number {
|
match self.block_number {
|
||||||
Some(block_number) => match number {
|
Some(block_number) => match number {
|
||||||
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
|
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
|
||||||
n @ (BlockNumberOrTag::Finalized
|
n @ (BlockNumberOrTag::Finalized |
|
||||||
| BlockNumberOrTag::Safe
|
BlockNumberOrTag::Safe |
|
||||||
| BlockNumberOrTag::Earliest
|
BlockNumberOrTag::Earliest |
|
||||||
| BlockNumberOrTag::Pending
|
BlockNumberOrTag::Pending |
|
||||||
| BlockNumberOrTag::Number(_)) => n,
|
BlockNumberOrTag::Number(_)) => n,
|
||||||
},
|
},
|
||||||
None => number,
|
None => number,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deployed_contract(
|
pub fn deployed_contract(
|
||||||
&self,
|
&self,
|
||||||
instance: &ContractInstance,
|
instance: &ContractInstance,
|
||||||
) -> Option<&(ContractIdent, Address, JsonAbi)> {
|
) -> Option<&(ContractIdent, Address, JsonAbi)> {
|
||||||
self.deployed_contracts
|
self.deployed_contracts
|
||||||
.and_then(|deployed_contracts| deployed_contracts.get(instance))
|
.and_then(|deployed_contracts| deployed_contracts.get(instance))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> {
|
pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> {
|
||||||
self.deployed_contract(instance).map(|(_, a, _)| a)
|
self.deployed_contract(instance).map(|(_, a, _)| a)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> {
|
pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> {
|
||||||
self.deployed_contract(instance).map(|(_, _, a)| a)
|
self.deployed_contract(instance).map(|(_, _, a)| a)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
|
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
|
||||||
self.variables
|
self.variables.and_then(|variables| variables.get(name.as_ref()))
|
||||||
.and_then(|variables| variables.get(name.as_ref()))
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
|
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
|
||||||
self.block_number
|
self.block_number
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn transaction_hash(&self) -> Option<&'a TxHash> {
|
pub fn transaction_hash(&self) -> Option<&'a TxHash> {
|
||||||
self.transaction_hash
|
self.transaction_hash
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,34 @@
|
|||||||
|
[package]
|
||||||
|
name = "ml-test-runner"
|
||||||
|
description = "ML-based test runner for executing differential tests file by file"
|
||||||
|
version.workspace = true
|
||||||
|
authors.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
rust-version.workspace = true
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "ml-test-runner"
|
||||||
|
path = "src/main.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
revive-dt-common = { workspace = true }
|
||||||
|
revive-dt-compiler = { workspace = true }
|
||||||
|
revive-dt-config = { workspace = true }
|
||||||
|
revive-dt-core = { workspace = true }
|
||||||
|
revive-dt-format = { workspace = true }
|
||||||
|
revive-dt-node = { workspace = true }
|
||||||
|
revive-dt-node-interaction = { workspace = true }
|
||||||
|
revive-dt-report = { workspace = true }
|
||||||
|
|
||||||
|
alloy = { workspace = true }
|
||||||
|
anyhow = { workspace = true }
|
||||||
|
clap = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
temp-dir = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
tracing-subscriber = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
@@ -0,0 +1,74 @@
|
|||||||
|
# ML Test Runner
|
||||||
|
|
||||||
|
A test runner for executing Revive differential tests file-by-file with cargo-test-style output.
|
||||||
|
|
||||||
|
This is similar to the `retester` binary but designed for ML-based test execution with a focus on:
|
||||||
|
- Running tests file-by-file (rather than in bulk)
|
||||||
|
- Caching passed tests to skip them in future runs
|
||||||
|
- Providing cargo-test-style output for easy integration with ML pipelines
|
||||||
|
- Single platform testing (rather than differential testing)
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **File-by-file execution**: Run tests on individual `.sol` files, corpus files (`.json`), or recursively walk directories
|
||||||
|
- **Cached results**: Skip tests that have already passed using `--cached-passed`
|
||||||
|
- **Fail fast**: Stop on first failure with `--bail`
|
||||||
|
- **Cargo-like output**: Familiar test output format with colored pass/fail indicators
|
||||||
|
- **Platform support**: Test against `geth` or `kitchensink` platforms
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run a single .sol file (compile-only mode, default)
|
||||||
|
./ml-test-runner path/to/test.sol --platform geth
|
||||||
|
|
||||||
|
# Run all tests in a corpus file
|
||||||
|
./ml-test-runner path/to/corpus.json --platform kitchensink
|
||||||
|
|
||||||
|
# Walk a directory recursively for .sol files
|
||||||
|
./ml-test-runner path/to/tests/ --platform geth
|
||||||
|
|
||||||
|
# Use cached results and bail on first failure
|
||||||
|
./ml-test-runner path/to/tests/ --cached-passed ./cache.txt --bail
|
||||||
|
|
||||||
|
# Start the platform and execute tests (full mode)
|
||||||
|
./ml-test-runner path/to/tests/ --platform geth --start-platform
|
||||||
|
|
||||||
|
# Enable verbose logging (info, debug, or trace level)
|
||||||
|
RUST_LOG=info ./ml-test-runner path/to/tests/
|
||||||
|
RUST_LOG=debug ./ml-test-runner path/to/tests/ --start-platform
|
||||||
|
RUST_LOG=trace ./ml-test-runner path/to/tests/ --start-platform
|
||||||
|
```
|
||||||
|
|
||||||
|
## Arguments
|
||||||
|
|
||||||
|
- `<PATH>` - Path to test file (`.sol`), corpus file (`.json`), or folder of `.sol` files
|
||||||
|
- `--cached-passed <FILE>` - File to track tests that have already passed
|
||||||
|
- `--bail` - Stop after the first file failure
|
||||||
|
- `--platform <PLATFORM>` - Platform to test against (`geth`, `kitchensink`, or `zombienet`, default: `geth`)
|
||||||
|
- `--start-platform` - Start the platform and execute tests (default: `false`, compile-only mode)
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
The runner produces cargo-test-style output:
|
||||||
|
|
||||||
|
```
|
||||||
|
test path/to/test1.sol ... ok
|
||||||
|
test path/to/test2.sol ... FAILED
|
||||||
|
test path/to/test3.sol ... cached
|
||||||
|
|
||||||
|
failures:
|
||||||
|
|
||||||
|
---- path/to/test2.sol ----
|
||||||
|
Error: ...
|
||||||
|
|
||||||
|
test result: FAILED. 1 passed; 1 failed; 1 cached; finished in 2.34s
|
||||||
|
```
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo build --release -p ml-test-runner
|
||||||
|
```
|
||||||
|
|
||||||
|
The binary will be available at `target/release/ml-test-runner`.
|
||||||
@@ -0,0 +1,639 @@
|
|||||||
|
use anyhow::Context;
|
||||||
|
use clap::Parser;
|
||||||
|
use revive_dt_common::{
|
||||||
|
iterators::FilesWithExtensionIterator,
|
||||||
|
types::{PlatformIdentifier, PrivateKeyAllocator},
|
||||||
|
};
|
||||||
|
use revive_dt_config::TestExecutionContext;
|
||||||
|
use revive_dt_core::{
|
||||||
|
CachedCompiler, Platform,
|
||||||
|
helpers::{TestDefinition, TestPlatformInformation},
|
||||||
|
};
|
||||||
|
use revive_dt_format::{
|
||||||
|
case::CaseIdx,
|
||||||
|
corpus::Corpus,
|
||||||
|
metadata::{Metadata, MetadataFile},
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::{BTreeMap, HashSet},
|
||||||
|
fs::File,
|
||||||
|
io::{BufRead, BufReader, BufWriter, Write},
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
sync::Arc,
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
use temp_dir::TempDir;
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
use tracing::info;
|
||||||
|
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||||
|
|
||||||
|
/// ML-based test runner for executing differential tests file by file
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
#[command(name = "ml-test-runner")]
|
||||||
|
struct MlTestRunnerArgs {
|
||||||
|
/// Path to test file (.sol), corpus file (.json), or folder containing .sol files
|
||||||
|
#[arg(value_name = "PATH")]
|
||||||
|
path: PathBuf,
|
||||||
|
|
||||||
|
/// File to cache tests that have already passed
|
||||||
|
#[arg(long = "cached-passed")]
|
||||||
|
cached_passed: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// File to store tests that have failed (defaults to .<platform>-failed)
|
||||||
|
#[arg(long = "cached-failed")]
|
||||||
|
cached_failed: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// Stop after the first file failure
|
||||||
|
#[arg(long = "bail")]
|
||||||
|
bail: bool,
|
||||||
|
|
||||||
|
/// Platform to test against (e.g., geth-evm-solc, kitchensink-polkavm-resolc)
|
||||||
|
#[arg(long = "platform", default_value = "geth-evm-solc")]
|
||||||
|
platform: PlatformIdentifier,
|
||||||
|
|
||||||
|
/// Start the platform and wait for RPC readiness
|
||||||
|
#[arg(long = "start-platform", default_value = "false")]
|
||||||
|
start_platform: bool,
|
||||||
|
|
||||||
|
/// Private key to use for wallet initialization (hex string with or without 0x prefix)
|
||||||
|
#[arg(
|
||||||
|
long = "private-key",
|
||||||
|
default_value = "0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133"
|
||||||
|
)]
|
||||||
|
private_key: String,
|
||||||
|
|
||||||
|
/// RPC port to connect to when using existing node
|
||||||
|
#[arg(long = "rpc-port", default_value = "8545")]
|
||||||
|
rpc_port: u16,
|
||||||
|
|
||||||
|
/// Show verbose output including cached tests and detailed error messages
|
||||||
|
#[arg(long = "verbose", short = 'v')]
|
||||||
|
verbose: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> anyhow::Result<()> {
|
||||||
|
let args = MlTestRunnerArgs::parse();
|
||||||
|
|
||||||
|
// Only set up tracing if RUST_LOG is explicitly set or --verbose is passed
|
||||||
|
if std::env::var("RUST_LOG").is_ok() || args.verbose {
|
||||||
|
let subscriber = FmtSubscriber::builder()
|
||||||
|
.with_env_filter(EnvFilter::from_default_env())
|
||||||
|
.with_writer(std::io::stderr)
|
||||||
|
.finish();
|
||||||
|
tracing::subscriber::set_global_default(subscriber)
|
||||||
|
.expect("Failed to set tracing subscriber");
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("ML test runner starting");
|
||||||
|
info!("Platform: {:?}", args.platform);
|
||||||
|
info!("Start platform: {}", args.start_platform);
|
||||||
|
|
||||||
|
tokio::runtime::Builder::new_multi_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.expect("Failed building the Runtime")
|
||||||
|
.block_on(run(args))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wait for HTTP server to be ready by attempting to connect to the specified port
|
||||||
|
async fn wait_for_http_server(port: u16) -> anyhow::Result<()> {
|
||||||
|
const MAX_RETRIES: u32 = 60;
|
||||||
|
const RETRY_DELAY: Duration = Duration::from_secs(1);
|
||||||
|
|
||||||
|
for attempt in 1..=MAX_RETRIES {
|
||||||
|
match tokio::net::TcpStream::connect(format!("127.0.0.1:{}", port)).await {
|
||||||
|
Ok(_) => {
|
||||||
|
info!("Successfully connected to HTTP server on port {} (attempt {})", port, attempt);
|
||||||
|
return Ok(());
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
if attempt == MAX_RETRIES {
|
||||||
|
anyhow::bail!(
|
||||||
|
"Failed to connect to HTTP server on port {} after {} attempts: {}",
|
||||||
|
port,
|
||||||
|
MAX_RETRIES,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if attempt % 10 == 0 {
|
||||||
|
info!(
|
||||||
|
"Still waiting for HTTP server on port {} (attempt {}/{})",
|
||||||
|
port, attempt, MAX_RETRIES
|
||||||
|
);
|
||||||
|
}
|
||||||
|
tokio::time::sleep(RETRY_DELAY).await;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
|
||||||
|
let start_time = Instant::now();
|
||||||
|
|
||||||
|
info!("Discovering test files from: {}", args.path.display());
|
||||||
|
let test_files = discover_test_files(&args.path)?;
|
||||||
|
info!("Found {} test file(s)", test_files.len());
|
||||||
|
|
||||||
|
let cached_passed = if let Some(cache_file) = &args.cached_passed {
|
||||||
|
let cached = load_cached_passed(cache_file)?;
|
||||||
|
info!("Loaded {} cached passed test(s)", cached.len());
|
||||||
|
cached
|
||||||
|
} else {
|
||||||
|
HashSet::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
let cached_passed = Arc::new(Mutex::new(cached_passed));
|
||||||
|
|
||||||
|
// Set up cached-failed file (defaults to .<platform>-failed)
|
||||||
|
let cached_failed_path = args
|
||||||
|
.cached_failed
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| PathBuf::from(format!(".{:?}-failed", args.platform)));
|
||||||
|
|
||||||
|
let cached_failed = Arc::new(Mutex::new(HashSet::<String>::new()));
|
||||||
|
|
||||||
|
// Get the platform based on CLI args
|
||||||
|
let platform: &dyn Platform = match args.platform {
|
||||||
|
PlatformIdentifier::GethEvmSolc => &revive_dt_core::GethEvmSolcPlatform,
|
||||||
|
PlatformIdentifier::LighthouseGethEvmSolc => &revive_dt_core::LighthouseGethEvmSolcPlatform,
|
||||||
|
PlatformIdentifier::KitchensinkPolkavmResolc =>
|
||||||
|
&revive_dt_core::KitchensinkPolkavmResolcPlatform,
|
||||||
|
PlatformIdentifier::KitchensinkRevmSolc => &revive_dt_core::KitchensinkRevmSolcPlatform,
|
||||||
|
PlatformIdentifier::ReviveDevNodePolkavmResolc =>
|
||||||
|
&revive_dt_core::ReviveDevNodePolkavmResolcPlatform,
|
||||||
|
PlatformIdentifier::ReviveDevNodeRevmSolc => &revive_dt_core::ReviveDevNodeRevmSolcPlatform,
|
||||||
|
PlatformIdentifier::ZombienetPolkavmResolc =>
|
||||||
|
&revive_dt_core::ZombienetPolkavmResolcPlatform,
|
||||||
|
PlatformIdentifier::ZombienetRevmSolc => &revive_dt_core::ZombienetRevmSolcPlatform,
|
||||||
|
};
|
||||||
|
|
||||||
|
let test_context = TestExecutionContext::default();
|
||||||
|
let context = revive_dt_config::Context::Test(Box::new(test_context));
|
||||||
|
|
||||||
|
let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform {
|
||||||
|
info!("Starting blockchain node...");
|
||||||
|
let node_handle =
|
||||||
|
platform.new_node(context.clone()).context("Failed to spawn node thread")?;
|
||||||
|
|
||||||
|
info!("Waiting for node to start...");
|
||||||
|
let node = node_handle
|
||||||
|
.join()
|
||||||
|
.map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))?
|
||||||
|
.context("Failed to start node")?;
|
||||||
|
|
||||||
|
info!("Node started with ID: {}, connection: {}", node.id(), node.connection_string());
|
||||||
|
let node = Box::leak(node);
|
||||||
|
|
||||||
|
info!("Running pre-transactions...");
|
||||||
|
node.pre_transactions().await.context("Failed to run pre-transactions")?;
|
||||||
|
info!("Pre-transactions completed");
|
||||||
|
|
||||||
|
node
|
||||||
|
} else {
|
||||||
|
info!("Using existing node at port {}", args.rpc_port);
|
||||||
|
|
||||||
|
// Wait for the HTTP server to be ready
|
||||||
|
info!("Waiting for HTTP server to be ready on port {}...", args.rpc_port);
|
||||||
|
wait_for_http_server(args.rpc_port).await?;
|
||||||
|
info!("HTTP server is ready");
|
||||||
|
|
||||||
|
let existing_node: Box<dyn revive_dt_node_interaction::EthereumNode> = match args.platform {
|
||||||
|
PlatformIdentifier::GethEvmSolc | PlatformIdentifier::LighthouseGethEvmSolc =>
|
||||||
|
Box::new(
|
||||||
|
revive_dt_node::node_implementations::geth::GethNode::new_existing(
|
||||||
|
&args.private_key,
|
||||||
|
args.rpc_port,
|
||||||
|
)
|
||||||
|
.await?,
|
||||||
|
),
|
||||||
|
PlatformIdentifier::KitchensinkPolkavmResolc |
|
||||||
|
PlatformIdentifier::KitchensinkRevmSolc |
|
||||||
|
PlatformIdentifier::ReviveDevNodePolkavmResolc |
|
||||||
|
PlatformIdentifier::ReviveDevNodeRevmSolc |
|
||||||
|
PlatformIdentifier::ZombienetPolkavmResolc |
|
||||||
|
PlatformIdentifier::ZombienetRevmSolc => Box::new(
|
||||||
|
revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(
|
||||||
|
&args.private_key,
|
||||||
|
args.rpc_port,
|
||||||
|
)
|
||||||
|
.await?,
|
||||||
|
),
|
||||||
|
};
|
||||||
|
Box::leak(existing_node)
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut passed_files = 0;
|
||||||
|
let mut failed_files = 0;
|
||||||
|
let mut skipped_files = 0;
|
||||||
|
let mut failures = Vec::new();
|
||||||
|
|
||||||
|
const GREEN: &str = "\x1B[32m";
|
||||||
|
const RED: &str = "\x1B[31m";
|
||||||
|
const YELLOW: &str = "\x1B[33m";
|
||||||
|
const COLOUR_RESET: &str = "\x1B[0m";
|
||||||
|
const BOLD: &str = "\x1B[1m";
|
||||||
|
const BOLD_RESET: &str = "\x1B[22m";
|
||||||
|
|
||||||
|
for test_file in test_files {
|
||||||
|
let file_display = test_file.display().to_string();
|
||||||
|
|
||||||
|
info!("\n\n == Executing test file: {file_display} == \n\n");
|
||||||
|
// Check if already passed
|
||||||
|
{
|
||||||
|
let cache = cached_passed.lock().await;
|
||||||
|
if cache.contains(&file_display) {
|
||||||
|
if args.verbose {
|
||||||
|
println!("test {file_display} ... {YELLOW}cached{COLOUR_RESET}");
|
||||||
|
}
|
||||||
|
skipped_files += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Loading metadata from: {}", test_file.display());
|
||||||
|
let metadata_file = match load_metadata_file(&test_file) {
|
||||||
|
Ok(mf) => {
|
||||||
|
info!("Loaded metadata with {} case(s)", mf.cases.len());
|
||||||
|
mf
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
// Skip files without metadata instead of treating them as failures
|
||||||
|
info!("Skipping {} (no metadata): {}", file_display, e);
|
||||||
|
skipped_files += 1;
|
||||||
|
continue;
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Execute test with 10 second timeout
|
||||||
|
let test_result = tokio::time::timeout(
|
||||||
|
Duration::from_secs(20),
|
||||||
|
execute_test_file(&metadata_file, platform, node, &context),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let result = match test_result {
|
||||||
|
Ok(Ok(_)) => Ok(()),
|
||||||
|
Ok(Err(e)) => Err(e),
|
||||||
|
Err(_) => Err(anyhow::anyhow!("Test timed out after 20 seconds")),
|
||||||
|
};
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(_) => {
|
||||||
|
println!("test {file_display} ... {GREEN}ok{COLOUR_RESET}");
|
||||||
|
passed_files += 1;
|
||||||
|
|
||||||
|
// Update cache
|
||||||
|
if let Some(cache_file) = &args.cached_passed {
|
||||||
|
let mut cache = cached_passed.lock().await;
|
||||||
|
cache.insert(file_display);
|
||||||
|
if let Err(e) = save_cached_passed(cache_file, &cache) {
|
||||||
|
info!("Failed to save cache: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
println!("test {file_display} ... {RED}FAILED{COLOUR_RESET}");
|
||||||
|
failed_files += 1;
|
||||||
|
let error_detail = if args.verbose { format!("{:?}", e) } else { format!("{}", e) };
|
||||||
|
failures.push((file_display.clone(), error_detail));
|
||||||
|
|
||||||
|
// Update cached-failed
|
||||||
|
{
|
||||||
|
let mut cache = cached_failed.lock().await;
|
||||||
|
cache.insert(file_display);
|
||||||
|
if let Err(e) = save_cached_failed(&cached_failed_path, &cache) {
|
||||||
|
info!("Failed to save cached-failed: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.bail {
|
||||||
|
info!("Bailing after first failure");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print summary
|
||||||
|
println!();
|
||||||
|
if !failures.is_empty() && args.verbose {
|
||||||
|
println!("{BOLD}failures:{BOLD_RESET}");
|
||||||
|
println!();
|
||||||
|
for (file, error) in &failures {
|
||||||
|
println!("---- {} ----", file);
|
||||||
|
println!("{}", error);
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let elapsed = start_time.elapsed();
|
||||||
|
println!(
|
||||||
|
"test result: {}. {} passed; {} failed; {} cached; finished in {:.2}s",
|
||||||
|
if failed_files == 0 {
|
||||||
|
format!("{GREEN}ok{COLOUR_RESET}")
|
||||||
|
} else {
|
||||||
|
format!("{RED}FAILED{COLOUR_RESET}")
|
||||||
|
},
|
||||||
|
passed_files,
|
||||||
|
failed_files,
|
||||||
|
skipped_files,
|
||||||
|
elapsed.as_secs_f64()
|
||||||
|
);
|
||||||
|
|
||||||
|
if failed_files > 0 {
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Discover test files from the given path
|
||||||
|
fn discover_test_files(path: &Path) -> anyhow::Result<Vec<PathBuf>> {
|
||||||
|
if !path.exists() {
|
||||||
|
anyhow::bail!("Path does not exist: {}", path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut files = Vec::new();
|
||||||
|
|
||||||
|
if path.is_file() {
|
||||||
|
let extension = path.extension().and_then(|s| s.to_str()).unwrap_or("");
|
||||||
|
|
||||||
|
match extension {
|
||||||
|
"sol" => {
|
||||||
|
// Single .sol file
|
||||||
|
files.push(path.to_path_buf());
|
||||||
|
},
|
||||||
|
"json" => {
|
||||||
|
// Corpus file - enumerate its tests
|
||||||
|
let corpus = Corpus::try_from_path(path)?;
|
||||||
|
let metadata_files = corpus.enumerate_tests();
|
||||||
|
for metadata in metadata_files {
|
||||||
|
files.push(metadata.metadata_file_path);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => anyhow::bail!("Unsupported file extension: {}. Expected .sol or .json", extension),
|
||||||
|
}
|
||||||
|
} else if path.is_dir() {
|
||||||
|
// First, find all test.json files
|
||||||
|
let mut test_json_dirs = HashSet::new();
|
||||||
|
for json_file in FilesWithExtensionIterator::new(path)
|
||||||
|
.with_allowed_extension("json")
|
||||||
|
.with_use_cached_fs(true)
|
||||||
|
{
|
||||||
|
if json_file.file_name().and_then(|s| s.to_str()) == Some("test.json") {
|
||||||
|
if let Some(parent) = json_file.parent() {
|
||||||
|
test_json_dirs.insert(parent.to_path_buf());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to parse as corpus file first, then as metadata file
|
||||||
|
if let Ok(corpus) = Corpus::try_from_path(&json_file) {
|
||||||
|
// It's a corpus file - enumerate its tests
|
||||||
|
let metadata_files = corpus.enumerate_tests();
|
||||||
|
for metadata in metadata_files {
|
||||||
|
files.push(metadata.metadata_file_path);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// It's a metadata file - use it directly
|
||||||
|
files.push(json_file);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then, find .sol files that are NOT in directories with test.json
|
||||||
|
for sol_file in FilesWithExtensionIterator::new(path)
|
||||||
|
.with_allowed_extension("sol")
|
||||||
|
.with_use_cached_fs(true)
|
||||||
|
{
|
||||||
|
if let Some(parent) = sol_file.parent() {
|
||||||
|
if !test_json_dirs.contains(parent) {
|
||||||
|
files.push(sol_file);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
files.push(sol_file);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
anyhow::bail!("Path is neither a file nor a directory: {}", path.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(files)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load metadata from a test file
|
||||||
|
fn load_metadata_file(path: &Path) -> anyhow::Result<MetadataFile> {
|
||||||
|
let metadata = Metadata::try_from_file(path)
|
||||||
|
.ok_or_else(|| anyhow::anyhow!("Failed to load metadata from {}", path.display()))?;
|
||||||
|
|
||||||
|
Ok(MetadataFile {
|
||||||
|
metadata_file_path: path.to_path_buf(),
|
||||||
|
corpus_file_path: path.to_path_buf(),
|
||||||
|
content: metadata,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute all test cases in a metadata file
|
||||||
|
async fn execute_test_file(
|
||||||
|
metadata_file: &MetadataFile,
|
||||||
|
platform: &dyn Platform,
|
||||||
|
node: &'static dyn revive_dt_node_interaction::EthereumNode,
|
||||||
|
context: &revive_dt_config::Context,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
if metadata_file.cases.is_empty() {
|
||||||
|
anyhow::bail!("No test cases found in file");
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Processing {} test case(s)", metadata_file.cases.len());
|
||||||
|
|
||||||
|
let temp_dir = TempDir::new()?;
|
||||||
|
info!("Created temporary directory: {}", temp_dir.path().display());
|
||||||
|
|
||||||
|
info!("Initializing cached compiler");
|
||||||
|
let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false)
|
||||||
|
.await
|
||||||
|
.map(Arc::new)
|
||||||
|
.context("Failed to create cached compiler")?;
|
||||||
|
|
||||||
|
let private_key_allocator =
|
||||||
|
Arc::new(Mutex::new(PrivateKeyAllocator::new(alloy::primitives::U256::from(100))));
|
||||||
|
|
||||||
|
let (reporter, report_task) =
|
||||||
|
revive_dt_report::ReportAggregator::new(context.clone()).into_task();
|
||||||
|
|
||||||
|
tokio::spawn(report_task);
|
||||||
|
|
||||||
|
info!("Building test definitions for {} case(s)", metadata_file.cases.len());
|
||||||
|
let mut test_definitions = Vec::new();
|
||||||
|
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
|
||||||
|
info!("Building test definition for case {}", case_idx);
|
||||||
|
let test_def = build_test_definition(
|
||||||
|
metadata_file,
|
||||||
|
case,
|
||||||
|
case_idx,
|
||||||
|
platform,
|
||||||
|
node,
|
||||||
|
&context,
|
||||||
|
&reporter,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if let Some(test_def) = test_def {
|
||||||
|
info!("Test definition for case {} created successfully", case_idx);
|
||||||
|
test_definitions.push(test_def);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Executing {} test definition(s)", test_definitions.len());
|
||||||
|
for (idx, test_definition) in test_definitions.iter().enumerate() {
|
||||||
|
info!("─────────────────────────────────────────────────────────────────");
|
||||||
|
info!(
|
||||||
|
"Executing case {}/{}: case_idx={}, mode={}, steps={}",
|
||||||
|
idx + 1,
|
||||||
|
test_definitions.len(),
|
||||||
|
test_definition.case_idx,
|
||||||
|
test_definition.mode,
|
||||||
|
test_definition.case.steps.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
info!("Creating driver for case {}", test_definition.case_idx);
|
||||||
|
let driver = revive_dt_core::differential_tests::Driver::new_root(
|
||||||
|
test_definition,
|
||||||
|
private_key_allocator.clone(),
|
||||||
|
&cached_compiler,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.context("Failed to create driver")?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Running {} step(s) for case {}",
|
||||||
|
test_definition.case.steps.len(),
|
||||||
|
test_definition.case_idx
|
||||||
|
);
|
||||||
|
let steps_executed = driver
|
||||||
|
.execute_all()
|
||||||
|
.await
|
||||||
|
.context(format!("Failed to execute case {}", test_definition.case_idx))?;
|
||||||
|
info!(
|
||||||
|
"✓ Case {} completed successfully, executed {} step(s)",
|
||||||
|
test_definition.case_idx, steps_executed
|
||||||
|
);
|
||||||
|
}
|
||||||
|
info!("─────────────────────────────────────────────────────────────────");
|
||||||
|
info!("All {} test case(s) executed successfully", test_definitions.len());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a test definition for a single test case
|
||||||
|
async fn build_test_definition<'a>(
|
||||||
|
metadata_file: &'a MetadataFile,
|
||||||
|
case: &'a revive_dt_format::case::Case,
|
||||||
|
case_idx: usize,
|
||||||
|
platform: &'a dyn Platform,
|
||||||
|
node: &'a dyn revive_dt_node_interaction::EthereumNode,
|
||||||
|
context: &revive_dt_config::Context,
|
||||||
|
reporter: &revive_dt_report::Reporter,
|
||||||
|
) -> anyhow::Result<Option<TestDefinition<'a>>> {
|
||||||
|
let mode = case
|
||||||
|
.modes
|
||||||
|
.as_ref()
|
||||||
|
.or(metadata_file.modes.as_ref())
|
||||||
|
.and_then(|modes| modes.first())
|
||||||
|
.and_then(|parsed_mode| parsed_mode.to_modes().next())
|
||||||
|
.map(Cow::Owned)
|
||||||
|
.or_else(|| revive_dt_compiler::Mode::all().next().map(Cow::Borrowed))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let compiler = platform
|
||||||
|
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
|
||||||
|
.await
|
||||||
|
.context("Failed to create compiler")?;
|
||||||
|
|
||||||
|
let test_reporter =
|
||||||
|
reporter.test_specific_reporter(Arc::new(revive_dt_report::TestSpecifier {
|
||||||
|
solc_mode: mode.as_ref().clone(),
|
||||||
|
metadata_file_path: metadata_file.metadata_file_path.clone(),
|
||||||
|
case_idx: CaseIdx::new(case_idx),
|
||||||
|
}));
|
||||||
|
|
||||||
|
let execution_reporter =
|
||||||
|
test_reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||||
|
|
||||||
|
let mut platforms = BTreeMap::new();
|
||||||
|
platforms.insert(
|
||||||
|
platform.platform_identifier(),
|
||||||
|
TestPlatformInformation { platform, node, compiler, reporter: execution_reporter },
|
||||||
|
);
|
||||||
|
|
||||||
|
let test_definition = TestDefinition {
|
||||||
|
metadata: metadata_file,
|
||||||
|
metadata_file_path: &metadata_file.metadata_file_path,
|
||||||
|
mode,
|
||||||
|
case_idx: CaseIdx::new(case_idx),
|
||||||
|
case,
|
||||||
|
platforms,
|
||||||
|
reporter: test_reporter,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err((reason, _)) = test_definition.check_compatibility() {
|
||||||
|
info!("Skipping case {}: {}", case_idx, reason);
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some(test_definition))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load cached passed tests from file
|
||||||
|
fn load_cached_passed(path: &Path) -> anyhow::Result<HashSet<String>> {
|
||||||
|
if !path.exists() {
|
||||||
|
return Ok(HashSet::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
let file = File::open(path).context("Failed to open cached-passed file")?;
|
||||||
|
let reader = BufReader::new(file);
|
||||||
|
|
||||||
|
let mut cache = HashSet::new();
|
||||||
|
for line in reader.lines() {
|
||||||
|
let line = line?;
|
||||||
|
let trimmed = line.trim();
|
||||||
|
if !trimmed.is_empty() {
|
||||||
|
cache.insert(trimmed.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Save cached passed tests to file
|
||||||
|
fn save_cached_passed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
|
||||||
|
let file = File::create(path).context("Failed to create cached-passed file")?;
|
||||||
|
let mut writer = BufWriter::new(file);
|
||||||
|
|
||||||
|
let mut entries: Vec<_> = cache.iter().collect();
|
||||||
|
entries.sort();
|
||||||
|
|
||||||
|
for entry in entries {
|
||||||
|
writeln!(writer, "{}", entry)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.flush()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Save cached failed tests to file
|
||||||
|
fn save_cached_failed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
|
||||||
|
let file = File::create(path).context("Failed to create cached-failed file")?;
|
||||||
|
let mut writer = BufWriter::new(file);
|
||||||
|
|
||||||
|
let mut entries: Vec<_> = cache.iter().collect();
|
||||||
|
entries.sort();
|
||||||
|
|
||||||
|
for entry in entries {
|
||||||
|
writeln!(writer, "{}", entry)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.flush()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -12,7 +12,6 @@ rust-version.workspace = true
|
|||||||
revive-common = { workspace = true }
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
|||||||
@@ -1,83 +1,102 @@
|
|||||||
//! This crate implements all node interactions.
|
//! This crate implements all node interactions.
|
||||||
|
|
||||||
use std::pin::Pin;
|
use std::{pin::Pin, sync::Arc};
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use alloy::network::Ethereum;
|
use alloy::{
|
||||||
use alloy::primitives::{Address, StorageKey, TxHash, U256};
|
primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
|
||||||
use alloy::providers::DynProvider;
|
rpc::types::{
|
||||||
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
|
||||||
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace},
|
||||||
|
},
|
||||||
|
};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_report::MinedBlockInformation;
|
|
||||||
|
|
||||||
/// An interface for all interactions with Ethereum compatible nodes.
|
/// An interface for all interactions with Ethereum compatible nodes.
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
pub trait EthereumNode {
|
pub trait EthereumNode {
|
||||||
/// A function to run post spawning the nodes and before any transactions are run on the node.
|
/// A function to run post spawning the nodes and before any transactions are run on the node.
|
||||||
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>;
|
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>>;
|
||||||
|
|
||||||
fn id(&self) -> usize;
|
fn id(&self) -> usize;
|
||||||
|
|
||||||
/// Returns the nodes connection string.
|
/// Returns the nodes connection string.
|
||||||
fn connection_string(&self) -> &str;
|
fn connection_string(&self) -> &str;
|
||||||
|
|
||||||
fn submit_transaction(
|
fn submit_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: TransactionRequest,
|
transaction: TransactionRequest,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<TxHash>> + '_>>;
|
||||||
|
|
||||||
fn get_receipt(
|
fn get_receipt(
|
||||||
&self,
|
&self,
|
||||||
tx_hash: TxHash,
|
tx_hash: TxHash,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||||
|
|
||||||
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
||||||
fn execute_transaction(
|
fn execute_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: TransactionRequest,
|
transaction: TransactionRequest,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<TransactionReceipt>> + '_>>;
|
||||||
|
|
||||||
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
|
/// Trace the transaction in the [TransactionReceipt] and return a [GethTrace].
|
||||||
fn trace_transaction(
|
fn trace_transaction(
|
||||||
&self,
|
&self,
|
||||||
tx_hash: TxHash,
|
tx_hash: TxHash,
|
||||||
trace_options: GethDebugTracingOptions,
|
trace_options: GethDebugTracingOptions,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<GethTrace>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<GethTrace>> + '_>>;
|
||||||
|
|
||||||
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
/// Returns the state diff of the transaction hash in the [TransactionReceipt].
|
||||||
fn state_diff(&self, tx_hash: TxHash) -> Pin<Box<dyn Future<Output = Result<DiffMode>> + '_>>;
|
fn state_diff(&self, tx_hash: TxHash) -> Pin<Box<dyn Future<Output = Result<DiffMode>> + '_>>;
|
||||||
|
|
||||||
/// Returns the balance of the provided [`Address`] back.
|
/// Returns the balance of the provided [`Address`] back.
|
||||||
fn balance_of(&self, address: Address) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
fn balance_of(&self, address: Address) -> Pin<Box<dyn Future<Output = Result<U256>> + '_>>;
|
||||||
|
|
||||||
/// Returns the latest storage proof of the provided [`Address`]
|
/// Returns the latest storage proof of the provided [`Address`]
|
||||||
fn latest_state_proof(
|
fn latest_state_proof(
|
||||||
&self,
|
&self,
|
||||||
address: Address,
|
address: Address,
|
||||||
keys: Vec<StorageKey>,
|
keys: Vec<StorageKey>,
|
||||||
) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>;
|
) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>;
|
||||||
|
|
||||||
/// Returns the resolver that is to use with this ethereum node.
|
/// Returns the resolver that is to use with this ethereum node.
|
||||||
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>;
|
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>;
|
||||||
|
|
||||||
/// Returns the EVM version of the node.
|
/// Returns the EVM version of the node.
|
||||||
fn evm_version(&self) -> EVMVersion;
|
fn evm_version(&self) -> EVMVersion;
|
||||||
|
|
||||||
/// Returns a stream of the blocks that were mined by the node.
|
/// Returns a stream of the blocks that were mined by the node.
|
||||||
fn subscribe_to_full_blocks_information(
|
fn subscribe_to_full_blocks_information(
|
||||||
&self,
|
&self,
|
||||||
) -> Pin<
|
) -> Pin<
|
||||||
Box<
|
Box<
|
||||||
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
dyn Future<Output = anyhow::Result<Pin<Box<dyn Stream<Item = MinedBlockInformation>>>>>
|
||||||
+ '_,
|
+ '_,
|
||||||
>,
|
>,
|
||||||
>;
|
>;
|
||||||
|
|
||||||
fn provider(&self)
|
/// Checks if the provided address is in the wallet. If it is, returns the address.
|
||||||
-> Pin<Box<dyn Future<Output = anyhow::Result<DynProvider<Ethereum>>> + '_>>;
|
/// Otherwise, returns the default signer's address.
|
||||||
|
fn resolve_signer_or_default(&self, address: Address) -> Address;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub struct MinedBlockInformation {
|
||||||
|
/// The block number.
|
||||||
|
pub block_number: BlockNumber,
|
||||||
|
|
||||||
|
/// The block timestamp.
|
||||||
|
pub block_timestamp: BlockTimestamp,
|
||||||
|
|
||||||
|
/// The amount of gas mined in the block.
|
||||||
|
pub mined_gas: u128,
|
||||||
|
|
||||||
|
/// The gas limit of the block.
|
||||||
|
pub block_gas_limit: u128,
|
||||||
|
|
||||||
|
/// The hashes of the transactions that were mined as part of the block.
|
||||||
|
pub transaction_hashes: Vec<TxHash>,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ revive-dt-common = { workspace = true }
|
|||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
revive-dt-node-interaction = { workspace = true }
|
revive-dt-node-interaction = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
|
||||||
|
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
@@ -30,7 +29,6 @@ serde_yaml_ng = { workspace = true }
|
|||||||
|
|
||||||
sp-core = { workspace = true }
|
sp-core = { workspace = true }
|
||||||
sp-runtime = { workspace = true }
|
sp-runtime = { workspace = true }
|
||||||
subxt = { workspace = true }
|
|
||||||
zombienet-sdk = { workspace = true }
|
zombienet-sdk = { workspace = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
|||||||
+132
-151
@@ -1,9 +1,9 @@
|
|||||||
use std::{
|
use std::{
|
||||||
fs::{File, OpenOptions},
|
fs::{File, OpenOptions},
|
||||||
io::{BufRead, BufReader, Write},
|
io::{BufRead, BufReader, Write},
|
||||||
path::Path,
|
path::Path,
|
||||||
process::{Child, Command},
|
process::{Child, Command},
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
use anyhow::{Context, Result, bail};
|
use anyhow::{Context, Result, bail};
|
||||||
@@ -12,180 +12,161 @@ use anyhow::{Context, Result, bail};
|
|||||||
/// when the process is dropped.
|
/// when the process is dropped.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Process {
|
pub struct Process {
|
||||||
/// The handle of the child process.
|
/// The handle of the child process.
|
||||||
child: Child,
|
child: Child,
|
||||||
|
|
||||||
/// The file that stdout is being logged to.
|
/// The file that stdout is being logged to.
|
||||||
stdout_logs_file: File,
|
stdout_logs_file: File,
|
||||||
|
|
||||||
/// The file that stderr is being logged to.
|
/// The file that stderr is being logged to.
|
||||||
stderr_logs_file: File,
|
stderr_logs_file: File,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Process {
|
impl Process {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
log_file_prefix: impl Into<Option<&'static str>>,
|
log_file_prefix: impl Into<Option<&'static str>>,
|
||||||
logs_directory: impl AsRef<Path>,
|
logs_directory: impl AsRef<Path>,
|
||||||
binary_path: impl AsRef<Path>,
|
binary_path: impl AsRef<Path>,
|
||||||
command_building_callback: impl FnOnce(&mut Command, File, File),
|
command_building_callback: impl FnOnce(&mut Command, File, File),
|
||||||
process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
|
process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
let log_file_prefix = log_file_prefix.into();
|
let log_file_prefix = log_file_prefix.into();
|
||||||
|
|
||||||
let (stdout_file_name, stderr_file_name) = match log_file_prefix {
|
let (stdout_file_name, stderr_file_name) = match log_file_prefix {
|
||||||
Some(prefix) => (
|
Some(prefix) => (format!("{prefix}_stdout.log"), format!("{prefix}_stderr.log")),
|
||||||
format!("{prefix}_stdout.log"),
|
None => ("stdout.log".to_string(), "stderr.log".to_string()),
|
||||||
format!("{prefix}_stderr.log"),
|
};
|
||||||
),
|
|
||||||
None => ("stdout.log".to_string(), "stderr.log".to_string()),
|
|
||||||
};
|
|
||||||
|
|
||||||
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
|
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
|
||||||
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
|
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
|
||||||
|
|
||||||
let stdout_logs_file = OpenOptions::new()
|
let stdout_logs_file = OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.truncate(true)
|
.truncate(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
.open(stdout_logs_file_path.as_path())
|
.open(stdout_logs_file_path.as_path())
|
||||||
.context("Failed to open the stdout logs file")?;
|
.context("Failed to open the stdout logs file")?;
|
||||||
let stderr_logs_file = OpenOptions::new()
|
let stderr_logs_file = OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.truncate(true)
|
.truncate(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
.open(stderr_logs_file_path.as_path())
|
.open(stderr_logs_file_path.as_path())
|
||||||
.context("Failed to open the stderr logs file")?;
|
.context("Failed to open the stderr logs file")?;
|
||||||
|
|
||||||
let mut command = {
|
let mut command = {
|
||||||
let stdout_logs_file = stdout_logs_file
|
let stdout_logs_file =
|
||||||
.try_clone()
|
stdout_logs_file.try_clone().context("Failed to clone the stdout logs file")?;
|
||||||
.context("Failed to clone the stdout logs file")?;
|
let stderr_logs_file =
|
||||||
let stderr_logs_file = stderr_logs_file
|
stderr_logs_file.try_clone().context("Failed to clone the stderr logs file")?;
|
||||||
.try_clone()
|
|
||||||
.context("Failed to clone the stderr logs file")?;
|
|
||||||
|
|
||||||
let mut command = Command::new(binary_path.as_ref());
|
let mut command = Command::new(binary_path.as_ref());
|
||||||
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
|
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
|
||||||
command
|
command
|
||||||
};
|
};
|
||||||
let mut child = command
|
let mut child = command.spawn().context("Failed to spawn the built command")?;
|
||||||
.spawn()
|
|
||||||
.context("Failed to spawn the built command")?;
|
|
||||||
|
|
||||||
match process_readiness_wait_behavior {
|
match process_readiness_wait_behavior {
|
||||||
ProcessReadinessWaitBehavior::NoStartupWait => {}
|
ProcessReadinessWaitBehavior::NoStartupWait => {},
|
||||||
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
|
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
|
||||||
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
|
||||||
max_wait_duration,
|
max_wait_duration,
|
||||||
mut check_function,
|
mut check_function,
|
||||||
} => {
|
} => {
|
||||||
let spawn_time = Instant::now();
|
let spawn_time = Instant::now();
|
||||||
|
|
||||||
let stdout_logs_file = OpenOptions::new()
|
let stdout_logs_file = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(stdout_logs_file_path)
|
.open(stdout_logs_file_path)
|
||||||
.context("Failed to open the stdout logs file")?;
|
.context("Failed to open the stdout logs file")?;
|
||||||
let stderr_logs_file = OpenOptions::new()
|
let stderr_logs_file = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.open(stderr_logs_file_path)
|
.open(stderr_logs_file_path)
|
||||||
.context("Failed to open the stderr logs file")?;
|
.context("Failed to open the stderr logs file")?;
|
||||||
|
|
||||||
let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
|
let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
|
||||||
let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
|
let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
|
||||||
|
|
||||||
let mut stdout = String::new();
|
let mut stdout = String::new();
|
||||||
let mut stderr = String::new();
|
let mut stderr = String::new();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let stdout_line = stdout_lines.next().and_then(Result::ok);
|
let stdout_line = stdout_lines.next().and_then(Result::ok);
|
||||||
let stderr_line = stderr_lines.next().and_then(Result::ok);
|
let stderr_line = stderr_lines.next().and_then(Result::ok);
|
||||||
|
|
||||||
if let Some(stdout_line) = stdout_line.as_ref() {
|
if let Some(stdout_line) = stdout_line.as_ref() {
|
||||||
stdout.push_str(stdout_line);
|
stdout.push_str(stdout_line);
|
||||||
stdout.push('\n');
|
stdout.push('\n');
|
||||||
}
|
}
|
||||||
if let Some(stderr_line) = stderr_line.as_ref() {
|
if let Some(stderr_line) = stderr_line.as_ref() {
|
||||||
stderr.push_str(stderr_line);
|
stderr.push_str(stderr_line);
|
||||||
stderr.push('\n');
|
stderr.push('\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
let check_result =
|
let check_result =
|
||||||
check_function(stdout_line.as_deref(), stderr_line.as_deref()).context(
|
check_function(stdout_line.as_deref(), stderr_line.as_deref()).context(
|
||||||
format!(
|
format!(
|
||||||
"Failed to wait for the process to be ready - {stdout} - {stderr}"
|
"Failed to wait for the process to be ready - {stdout} - {stderr}"
|
||||||
),
|
),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if check_result {
|
if check_result {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if Instant::now().duration_since(spawn_time) > max_wait_duration {
|
if Instant::now().duration_since(spawn_time) > max_wait_duration {
|
||||||
bail!(
|
bail!(
|
||||||
"Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}"
|
"Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
|
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
|
||||||
if !child
|
if !child.wait().context("Failed waiting for process to finish")?.success() {
|
||||||
.wait()
|
anyhow::bail!("Failed to spawn command");
|
||||||
.context("Failed waiting for process to finish")?
|
}
|
||||||
.success()
|
},
|
||||||
{
|
}
|
||||||
anyhow::bail!("Failed to spawn command");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self { child, stdout_logs_file, stderr_logs_file })
|
||||||
child,
|
}
|
||||||
stdout_logs_file,
|
|
||||||
stderr_logs_file,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for Process {
|
impl Drop for Process {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.child.kill().expect("Failed to kill the process");
|
self.child.kill().expect("Failed to kill the process");
|
||||||
self.stdout_logs_file
|
self.stdout_logs_file.flush().expect("Failed to flush the stdout logs file");
|
||||||
.flush()
|
self.stderr_logs_file.flush().expect("Failed to flush the stderr logs file");
|
||||||
.expect("Failed to flush the stdout logs file");
|
}
|
||||||
self.stderr_logs_file
|
|
||||||
.flush()
|
|
||||||
.expect("Failed to flush the stderr logs file");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum ProcessReadinessWaitBehavior {
|
pub enum ProcessReadinessWaitBehavior {
|
||||||
/// The process does not require any kind of wait after it's been spawned and can be used
|
/// The process does not require any kind of wait after it's been spawned and can be used
|
||||||
/// straight away.
|
/// straight away.
|
||||||
NoStartupWait,
|
NoStartupWait,
|
||||||
|
|
||||||
/// Waits for the command to exit.
|
/// Waits for the command to exit.
|
||||||
WaitForCommandToExit,
|
WaitForCommandToExit,
|
||||||
|
|
||||||
/// The process does require some amount of wait duration after it's been started.
|
/// The process does require some amount of wait duration after it's been started.
|
||||||
WaitDuration(Duration),
|
WaitDuration(Duration),
|
||||||
|
|
||||||
/// The process requires a time bounded wait function which is a function of the lines that
|
/// The process requires a time bounded wait function which is a function of the lines that
|
||||||
/// appear in the log files.
|
/// appear in the log files.
|
||||||
TimeBoundedWaitFunction {
|
TimeBoundedWaitFunction {
|
||||||
/// The maximum amount of time to wait for the check function to return true.
|
/// The maximum amount of time to wait for the check function to return true.
|
||||||
max_wait_duration: Duration,
|
max_wait_duration: Duration,
|
||||||
|
|
||||||
/// The function to use to check if the process spawned is ready to use or not. This
|
/// The function to use to check if the process spawned is ready to use or not. This
|
||||||
/// function should return the following in the following cases:
|
/// function should return the following in the following cases:
|
||||||
///
|
///
|
||||||
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
|
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
|
||||||
/// and the wait is completed.
|
/// and the wait is completed.
|
||||||
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
|
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
|
||||||
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears
|
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears
|
||||||
/// that it has encountered an error when it was being spawned.
|
/// that it has encountered an error when it was being spawned.
|
||||||
///
|
///
|
||||||
/// The first argument is a line from stdout and the second argument is a line from stderr.
|
/// The first argument is a line from stdout and the second argument is a line from stderr.
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
|
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
+10
-10
@@ -10,16 +10,16 @@ pub mod provider_utils;
|
|||||||
|
|
||||||
/// An abstract interface for testing nodes.
|
/// An abstract interface for testing nodes.
|
||||||
pub trait Node: EthereumNode {
|
pub trait Node: EthereumNode {
|
||||||
/// Spawns a node configured according to the genesis json.
|
/// Spawns a node configured according to the genesis json.
|
||||||
///
|
///
|
||||||
/// Blocking until it's ready to accept transactions.
|
/// Blocking until it's ready to accept transactions.
|
||||||
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
|
fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>;
|
||||||
|
|
||||||
/// Prune the node instance and related data.
|
/// Prune the node instance and related data.
|
||||||
///
|
///
|
||||||
/// Blocking until it's completely stopped.
|
/// Blocking until it's completely stopped.
|
||||||
fn shutdown(&mut self) -> anyhow::Result<()>;
|
fn shutdown(&mut self) -> anyhow::Result<()>;
|
||||||
|
|
||||||
/// Returns the node version.
|
/// Returns the node version.
|
||||||
fn version(&self) -> anyhow::Result<String>;
|
fn version(&self) -> anyhow::Result<String>;
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -6,64 +6,56 @@ use tower::{Layer, Service};
|
|||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct ConcurrencyLimiterLayer {
|
pub struct ConcurrencyLimiterLayer {
|
||||||
semaphore: Arc<Semaphore>,
|
semaphore: Arc<Semaphore>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConcurrencyLimiterLayer {
|
impl ConcurrencyLimiterLayer {
|
||||||
pub fn new(permit_count: usize) -> Self {
|
pub fn new(permit_count: usize) -> Self {
|
||||||
Self {
|
Self { semaphore: Arc::new(Semaphore::new(permit_count)) }
|
||||||
semaphore: Arc::new(Semaphore::new(permit_count)),
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S> Layer<S> for ConcurrencyLimiterLayer {
|
impl<S> Layer<S> for ConcurrencyLimiterLayer {
|
||||||
type Service = ConcurrencyLimiterService<S>;
|
type Service = ConcurrencyLimiterService<S>;
|
||||||
|
|
||||||
fn layer(&self, inner: S) -> Self::Service {
|
fn layer(&self, inner: S) -> Self::Service {
|
||||||
ConcurrencyLimiterService {
|
ConcurrencyLimiterService { service: inner, semaphore: self.semaphore.clone() }
|
||||||
service: inner,
|
}
|
||||||
semaphore: self.semaphore.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ConcurrencyLimiterService<S> {
|
pub struct ConcurrencyLimiterService<S> {
|
||||||
service: S,
|
service: S,
|
||||||
semaphore: Arc<Semaphore>,
|
semaphore: Arc<Semaphore>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S, Request> Service<Request> for ConcurrencyLimiterService<S>
|
impl<S, Request> Service<Request> for ConcurrencyLimiterService<S>
|
||||||
where
|
where
|
||||||
S: Service<Request> + Send,
|
S: Service<Request> + Send,
|
||||||
S::Future: Send + 'static,
|
S::Future: Send + 'static,
|
||||||
{
|
{
|
||||||
type Response = S::Response;
|
type Response = S::Response;
|
||||||
type Error = S::Error;
|
type Error = S::Error;
|
||||||
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||||
|
|
||||||
fn poll_ready(
|
fn poll_ready(
|
||||||
&mut self,
|
&mut self,
|
||||||
cx: &mut std::task::Context<'_>,
|
cx: &mut std::task::Context<'_>,
|
||||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||||
self.service.poll_ready(cx)
|
self.service.poll_ready(cx)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn call(&mut self, req: Request) -> Self::Future {
|
fn call(&mut self, req: Request) -> Self::Future {
|
||||||
let semaphore = self.semaphore.clone();
|
let semaphore = self.semaphore.clone();
|
||||||
let future = self.service.call(req);
|
let future = self.service.call(req);
|
||||||
|
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let _permit = semaphore
|
let _permit = semaphore.acquire().await.expect("Semaphore has been closed");
|
||||||
.acquire()
|
tracing::debug!(
|
||||||
.await
|
available_permits = semaphore.available_permits(),
|
||||||
.expect("Semaphore has been closed");
|
"Acquired Semaphore Permit"
|
||||||
tracing::debug!(
|
);
|
||||||
available_permits = semaphore.available_permits(),
|
future.await
|
||||||
"Acquired Semaphore Permit"
|
})
|
||||||
);
|
}
|
||||||
future.await
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,98 +0,0 @@
|
|||||||
use alloy::{
|
|
||||||
network::{Network, TransactionBuilder},
|
|
||||||
providers::{
|
|
||||||
Provider, SendableTx,
|
|
||||||
fillers::{GasFiller, TxFiller},
|
|
||||||
},
|
|
||||||
transports::TransportResult,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
|
|
||||||
const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120;
|
|
||||||
const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct FallbackGasFiller {
|
|
||||||
inner: GasFiller,
|
|
||||||
default_gas_limit: u64,
|
|
||||||
default_max_fee_per_gas: u128,
|
|
||||||
default_priority_fee: u128,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FallbackGasFiller {
|
|
||||||
pub fn new(
|
|
||||||
default_gas_limit: u64,
|
|
||||||
default_max_fee_per_gas: u128,
|
|
||||||
default_priority_fee: u128,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
inner: GasFiller,
|
|
||||||
default_gas_limit,
|
|
||||||
default_max_fee_per_gas,
|
|
||||||
default_priority_fee,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for FallbackGasFiller {
|
|
||||||
fn default() -> Self {
|
|
||||||
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<N> TxFiller<N> for FallbackGasFiller
|
|
||||||
where
|
|
||||||
N: Network,
|
|
||||||
{
|
|
||||||
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
|
|
||||||
|
|
||||||
fn status(
|
|
||||||
&self,
|
|
||||||
tx: &<N as Network>::TransactionRequest,
|
|
||||||
) -> alloy::providers::fillers::FillerControlFlow {
|
|
||||||
<GasFiller as TxFiller<N>>::status(&self.inner, tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
|
|
||||||
|
|
||||||
async fn prepare<P: Provider<N>>(
|
|
||||||
&self,
|
|
||||||
provider: &P,
|
|
||||||
tx: &<N as Network>::TransactionRequest,
|
|
||||||
) -> TransportResult<Self::Fillable> {
|
|
||||||
match self.inner.prepare(provider, tx).await {
|
|
||||||
Ok(fill) => Ok(Some(fill)),
|
|
||||||
Err(err) => {
|
|
||||||
tracing::debug!(error = ?err, "Gas Provider Estimation Failed, using fallback");
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn fill(
|
|
||||||
&self,
|
|
||||||
fillable: Self::Fillable,
|
|
||||||
mut tx: alloy::providers::SendableTx<N>,
|
|
||||||
) -> TransportResult<SendableTx<N>> {
|
|
||||||
if let Some(fill) = fillable {
|
|
||||||
let mut tx = self.inner.fill(fill, tx).await?;
|
|
||||||
if let Some(builder) = tx.as_mut_builder() {
|
|
||||||
if let Some(estimated) = builder.gas_limit() {
|
|
||||||
let padded = estimated
|
|
||||||
.checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR)
|
|
||||||
.and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR))
|
|
||||||
.unwrap_or(u64::MAX);
|
|
||||||
builder.set_gas_limit(padded);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(tx)
|
|
||||||
} else {
|
|
||||||
if let Some(builder) = tx.as_mut_builder() {
|
|
||||||
builder.set_gas_limit(self.default_gas_limit);
|
|
||||||
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
|
|
||||||
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
|
|
||||||
}
|
|
||||||
Ok(tx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,76 @@
|
|||||||
|
use alloy::{
|
||||||
|
network::{Network, TransactionBuilder},
|
||||||
|
providers::{
|
||||||
|
Provider, SendableTx,
|
||||||
|
fillers::{GasFiller, TxFiller},
|
||||||
|
},
|
||||||
|
transports::TransportResult,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct FallbackGasFiller {
|
||||||
|
inner: GasFiller,
|
||||||
|
default_gas_limit: u64,
|
||||||
|
default_max_fee_per_gas: u128,
|
||||||
|
default_priority_fee: u128,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FallbackGasFiller {
|
||||||
|
pub fn new(
|
||||||
|
default_gas_limit: u64,
|
||||||
|
default_max_fee_per_gas: u128,
|
||||||
|
default_priority_fee: u128,
|
||||||
|
) -> Self {
|
||||||
|
Self { inner: GasFiller, default_gas_limit, default_max_fee_per_gas, default_priority_fee }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for FallbackGasFiller {
|
||||||
|
fn default() -> Self {
|
||||||
|
FallbackGasFiller::new(10_000_000, 1_000_000_000, 1_000_000_000)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<N> TxFiller<N> for FallbackGasFiller
|
||||||
|
where
|
||||||
|
N: Network,
|
||||||
|
{
|
||||||
|
type Fillable = Option<<GasFiller as TxFiller<N>>::Fillable>;
|
||||||
|
|
||||||
|
fn status(
|
||||||
|
&self,
|
||||||
|
tx: &<N as Network>::TransactionRequest,
|
||||||
|
) -> alloy::providers::fillers::FillerControlFlow {
|
||||||
|
<GasFiller as TxFiller<N>>::status(&self.inner, tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fill_sync(&self, _: &mut alloy::providers::SendableTx<N>) {}
|
||||||
|
|
||||||
|
async fn prepare<P: Provider<N>>(
|
||||||
|
&self,
|
||||||
|
provider: &P,
|
||||||
|
tx: &<N as Network>::TransactionRequest,
|
||||||
|
) -> TransportResult<Self::Fillable> {
|
||||||
|
// Try to fetch GasFiller's "fillable" (gas_price, base_fee, estimate_gas, …)
|
||||||
|
// Propagate errors so caller can handle them appropriately
|
||||||
|
self.inner.prepare(provider, tx).await.map(Some)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fill(
|
||||||
|
&self,
|
||||||
|
fillable: Self::Fillable,
|
||||||
|
mut tx: alloy::providers::SendableTx<N>,
|
||||||
|
) -> TransportResult<SendableTx<N>> {
|
||||||
|
if let Some(fill) = fillable {
|
||||||
|
// our inner GasFiller succeeded — use it
|
||||||
|
self.inner.fill(fill, tx).await
|
||||||
|
} else {
|
||||||
|
if let Some(builder) = tx.as_mut_builder() {
|
||||||
|
builder.set_gas_limit(self.default_gas_limit);
|
||||||
|
builder.set_max_fee_per_gas(self.default_max_fee_per_gas);
|
||||||
|
builder.set_max_priority_fee_per_gas(self.default_priority_fee);
|
||||||
|
}
|
||||||
|
Ok(tx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
mod concurrency_limiter;
|
mod concurrency_limiter;
|
||||||
mod fallback_gas_filler;
|
mod fallback_gas_provider;
|
||||||
mod provider;
|
mod provider;
|
||||||
|
|
||||||
pub use concurrency_limiter::*;
|
pub use concurrency_limiter::*;
|
||||||
pub use fallback_gas_filler::*;
|
pub use fallback_gas_provider::*;
|
||||||
pub use provider::*;
|
pub use provider::*;
|
||||||
|
|||||||
@@ -1,132 +1,124 @@
|
|||||||
use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
|
use std::{ops::ControlFlow, sync::LazyLock, time::Duration};
|
||||||
|
|
||||||
use alloy::{
|
use alloy::{
|
||||||
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
|
network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844},
|
||||||
providers::{
|
providers::{
|
||||||
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
|
Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider,
|
||||||
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller},
|
||||||
},
|
},
|
||||||
rpc::client::ClientBuilder,
|
rpc::client::ClientBuilder,
|
||||||
};
|
};
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use revive_dt_common::futures::{PollingWaitBehavior, poll};
|
use revive_dt_common::futures::{PollingWaitBehavior, poll};
|
||||||
use tracing::{Instrument, debug, info, info_span};
|
use tracing::debug;
|
||||||
|
|
||||||
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
|
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
|
||||||
|
|
||||||
pub type ConcreteProvider<N, W> = FillProvider<
|
pub type ConcreteProvider<N, W> = FillProvider<
|
||||||
JoinFill<
|
JoinFill<
|
||||||
JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>,
|
JoinFill<JoinFill<JoinFill<Identity, FallbackGasFiller>, ChainIdFiller>, NonceFiller>,
|
||||||
WalletFiller<W>,
|
WalletFiller<W>,
|
||||||
>,
|
>,
|
||||||
RootProvider<N>,
|
RootProvider<N>,
|
||||||
N,
|
N,
|
||||||
>;
|
>;
|
||||||
|
|
||||||
pub async fn construct_concurrency_limited_provider<N, W>(
|
pub async fn construct_concurrency_limited_provider<N, W>(
|
||||||
rpc_url: &str,
|
rpc_url: &str,
|
||||||
fallback_gas_filler: FallbackGasFiller,
|
fallback_gas_filler: FallbackGasFiller,
|
||||||
chain_id_filler: ChainIdFiller,
|
chain_id_filler: ChainIdFiller,
|
||||||
nonce_filler: NonceFiller,
|
nonce_filler: NonceFiller,
|
||||||
wallet: W,
|
wallet: W,
|
||||||
) -> Result<ConcreteProvider<N, W>>
|
) -> Result<ConcreteProvider<N, W>>
|
||||||
where
|
where
|
||||||
N: Network<TransactionRequest: TransactionBuilder4844>,
|
N: Network<TransactionRequest: TransactionBuilder4844>,
|
||||||
W: NetworkWallet<N>,
|
W: NetworkWallet<N>,
|
||||||
Identity: TxFiller<N>,
|
Identity: TxFiller<N>,
|
||||||
FallbackGasFiller: TxFiller<N>,
|
FallbackGasFiller: TxFiller<N>,
|
||||||
ChainIdFiller: TxFiller<N>,
|
ChainIdFiller: TxFiller<N>,
|
||||||
NonceFiller: TxFiller<N>,
|
NonceFiller: TxFiller<N>,
|
||||||
WalletFiller<W>: TxFiller<N>,
|
WalletFiller<W>: TxFiller<N>,
|
||||||
{
|
{
|
||||||
// This is a global limit on the RPC concurrency that applies to all of the providers created
|
// This is a global limit on the RPC concurrency that applies to all of the providers created
|
||||||
// by the framework. With this limit, it means that we can have a maximum of N concurrent
|
// by the framework. With this limit, it means that we can have a maximum of N concurrent
|
||||||
// requests at any point of time and no more than that. This is done in an effort to stabilize
|
// requests at any point of time and no more than that. This is done in an effort to stabilize
|
||||||
// the framework from some of the interment issues that we've been seeing related to RPC calls.
|
// the framework from some of the interment issues that we've been seeing related to RPC calls.
|
||||||
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
|
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
|
||||||
LazyLock::new(|| ConcurrencyLimiterLayer::new(500));
|
LazyLock::new(|| ConcurrencyLimiterLayer::new(10));
|
||||||
|
|
||||||
let client = ClientBuilder::default()
|
let client = ClientBuilder::default()
|
||||||
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
|
||||||
.connect(rpc_url)
|
.connect(rpc_url)
|
||||||
.await
|
.await
|
||||||
.context("Failed to construct the RPC client")?;
|
.context("Failed to construct the RPC client")?;
|
||||||
|
|
||||||
let provider = ProviderBuilder::new()
|
let provider = ProviderBuilder::new()
|
||||||
.disable_recommended_fillers()
|
.disable_recommended_fillers()
|
||||||
.network::<N>()
|
.network::<N>()
|
||||||
.filler(fallback_gas_filler)
|
.filler(fallback_gas_filler)
|
||||||
.filler(chain_id_filler)
|
.filler(chain_id_filler)
|
||||||
.filler(nonce_filler)
|
.filler(nonce_filler)
|
||||||
.wallet(wallet)
|
.wallet(wallet)
|
||||||
.connect_client(client);
|
.connect_client(client);
|
||||||
|
|
||||||
Ok(provider)
|
Ok(provider)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn execute_transaction<N, W>(
|
pub async fn execute_transaction<N, W>(
|
||||||
provider: ConcreteProvider<N, W>,
|
provider: ConcreteProvider<N, W>,
|
||||||
transaction: N::TransactionRequest,
|
transaction: N::TransactionRequest,
|
||||||
) -> Result<N::ReceiptResponse>
|
) -> Result<N::ReceiptResponse>
|
||||||
where
|
where
|
||||||
N: Network<
|
N: Network<
|
||||||
TransactionRequest: TransactionBuilder4844,
|
TransactionRequest: TransactionBuilder4844,
|
||||||
TxEnvelope = <Ethereum as Network>::TxEnvelope,
|
TxEnvelope = <Ethereum as Network>::TxEnvelope,
|
||||||
>,
|
>,
|
||||||
W: NetworkWallet<N>,
|
W: NetworkWallet<N>,
|
||||||
Identity: TxFiller<N>,
|
Identity: TxFiller<N>,
|
||||||
FallbackGasFiller: TxFiller<N>,
|
FallbackGasFiller: TxFiller<N>,
|
||||||
ChainIdFiller: TxFiller<N>,
|
ChainIdFiller: TxFiller<N>,
|
||||||
NonceFiller: TxFiller<N>,
|
NonceFiller: TxFiller<N>,
|
||||||
WalletFiller<W>: TxFiller<N>,
|
WalletFiller<W>: TxFiller<N>,
|
||||||
{
|
{
|
||||||
let sendable_transaction = provider
|
let sendable_transaction =
|
||||||
.fill(transaction)
|
provider.fill(transaction).await.context("Failed to fill transaction")?;
|
||||||
.await
|
|
||||||
.context("Failed to fill transaction")?;
|
|
||||||
|
|
||||||
let transaction_envelope = sendable_transaction
|
let transaction_envelope = sendable_transaction
|
||||||
.try_into_envelope()
|
.try_into_envelope()
|
||||||
.context("Failed to convert transaction into an envelope")?;
|
.context("Failed to convert transaction into an envelope")?;
|
||||||
let tx_hash = *transaction_envelope.tx_hash();
|
let tx_hash = *transaction_envelope.tx_hash();
|
||||||
|
|
||||||
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
|
let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await {
|
||||||
Ok(pending_transaction) => pending_transaction,
|
Ok(pending_transaction) => pending_transaction,
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
let error_string = error.to_string();
|
let error_string = error.to_string();
|
||||||
|
|
||||||
if error_string.contains("Transaction Already Imported") {
|
if error_string.contains("Transaction Already Imported") {
|
||||||
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
|
PendingTransactionBuilder::<N>::new(provider.root().clone(), tx_hash)
|
||||||
} else {
|
} else {
|
||||||
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
|
return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
};
|
};
|
||||||
debug!(%tx_hash, "Submitted Transaction");
|
debug!(%tx_hash, "Submitted Transaction");
|
||||||
|
|
||||||
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
|
pending_transaction.set_timeout(Some(Duration::from_secs(120)));
|
||||||
let tx_hash = pending_transaction.watch().await.context(format!(
|
let tx_hash = pending_transaction
|
||||||
"Transaction inclusion watching timeout for {tx_hash}"
|
.watch()
|
||||||
))?;
|
.await
|
||||||
|
.context(format!("Transaction inclusion watching timeout for {tx_hash}"))?;
|
||||||
|
|
||||||
poll(
|
debug!(%tx_hash, "Transaction included, polling for receipt");
|
||||||
Duration::from_secs(60),
|
|
||||||
PollingWaitBehavior::Constant(Duration::from_secs(3)),
|
|
||||||
|| {
|
|
||||||
let provider = provider.clone();
|
|
||||||
|
|
||||||
async move {
|
poll(Duration::from_secs(30), PollingWaitBehavior::Constant(Duration::from_secs(3)), || {
|
||||||
match provider.get_transaction_receipt(tx_hash).await {
|
let provider = provider.clone();
|
||||||
Ok(Some(receipt)) => {
|
async move {
|
||||||
info!("Found the transaction receipt");
|
match provider.get_transaction_receipt(tx_hash).await {
|
||||||
Ok(ControlFlow::Break(receipt))
|
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
|
||||||
}
|
_ => Ok(ControlFlow::Continue(())),
|
||||||
_ => Ok(ControlFlow::Continue(())),
|
}
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
},
|
.await
|
||||||
)
|
.context(format!("Polling for receipt timed out for {tx_hash}"))
|
||||||
.instrument(info_span!("Polling for receipt", %tx_hash))
|
|
||||||
.await
|
|
||||||
.context(format!("Polling for receipt failed for {tx_hash}"))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ alloy = { workspace = true }
|
|||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
paste = { workspace = true }
|
paste = { workspace = true }
|
||||||
indexmap = { workspace = true, features = ["serde"] }
|
indexmap = { workspace = true, features = ["serde"] }
|
||||||
itertools = { workspace = true }
|
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
|||||||
+409
-869
File diff suppressed because it is too large
Load Diff
+11
-11
@@ -8,30 +8,30 @@ use revive_dt_format::{case::CaseIdx, steps::StepPath};
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct MetadataFilePath(PathBuf);
|
pub struct MetadataFilePath(PathBuf);
|
||||||
);
|
);
|
||||||
|
|
||||||
/// An absolute specifier for a test.
|
/// An absolute specifier for a test.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
pub struct TestSpecifier {
|
pub struct TestSpecifier {
|
||||||
pub solc_mode: Mode,
|
pub solc_mode: Mode,
|
||||||
pub metadata_file_path: PathBuf,
|
pub metadata_file_path: PathBuf,
|
||||||
pub case_idx: CaseIdx,
|
pub case_idx: CaseIdx,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An absolute path for a test that also includes information about the node that it's assigned to
|
/// An absolute path for a test that also includes information about the node that it's assigned to
|
||||||
/// and what platform it belongs to.
|
/// and what platform it belongs to.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
pub struct ExecutionSpecifier {
|
pub struct ExecutionSpecifier {
|
||||||
pub test_specifier: Arc<TestSpecifier>,
|
pub test_specifier: Arc<TestSpecifier>,
|
||||||
pub node_id: usize,
|
pub node_id: usize,
|
||||||
pub platform_identifier: PlatformIdentifier,
|
pub platform_identifier: PlatformIdentifier,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
pub struct StepExecutionSpecifier {
|
pub struct StepExecutionSpecifier {
|
||||||
pub execution_specifier: Arc<ExecutionSpecifier>,
|
pub execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
pub step_idx: StepPath,
|
pub step_idx: StepPath,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,14 +9,14 @@ use crate::{MetadataFilePath, TestCaseStatus};
|
|||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum ReporterEvent {
|
pub enum ReporterEvent {
|
||||||
/// An event sent by the reporter once an entire metadata file and solc mode combination has
|
/// An event sent by the reporter once an entire metadata file and solc mode combination has
|
||||||
/// finished execution.
|
/// finished execution.
|
||||||
MetadataFileSolcModeCombinationExecutionCompleted {
|
MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
/// The path of the metadata file.
|
/// The path of the metadata file.
|
||||||
metadata_file_path: MetadataFilePath,
|
metadata_file_path: MetadataFilePath,
|
||||||
/// The Solc mode that this metadata file was executed in.
|
/// The Solc mode that this metadata file was executed in.
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
/// The status of each one of the cases.
|
/// The status of each one of the cases.
|
||||||
case_status: BTreeMap<CaseIdx, TestCaseStatus>,
|
case_status: BTreeMap<CaseIdx, TestCaseStatus>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
+154
-188
@@ -8,14 +8,13 @@ use anyhow::Context as _;
|
|||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||||
use revive_dt_format::metadata::ContractInstance;
|
use revive_dt_format::{
|
||||||
use revive_dt_format::metadata::Metadata;
|
corpus::Corpus,
|
||||||
use revive_dt_format::steps::StepPath;
|
metadata::{ContractInstance, Metadata},
|
||||||
|
};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use tokio::sync::{broadcast, oneshot};
|
use tokio::sync::{broadcast, oneshot};
|
||||||
|
|
||||||
use crate::MinedBlockInformation;
|
|
||||||
use crate::TransactionInformation;
|
|
||||||
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
||||||
|
|
||||||
macro_rules! __report_gen_emit_test_specific {
|
macro_rules! __report_gen_emit_test_specific {
|
||||||
@@ -347,16 +346,6 @@ macro_rules! define_event {
|
|||||||
),*
|
),*
|
||||||
}
|
}
|
||||||
|
|
||||||
impl $ident {
|
|
||||||
pub fn variant_name(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
$(
|
|
||||||
Self::$variant_ident { .. } => stringify!($variant_ident)
|
|
||||||
),*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
$(
|
$(
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
$(#[$variant_meta])*
|
$(#[$variant_meta])*
|
||||||
@@ -485,183 +474,160 @@ macro_rules! define_event {
|
|||||||
}
|
}
|
||||||
|
|
||||||
define_event! {
|
define_event! {
|
||||||
/// An event type that's sent by the test runners/drivers to the report aggregator.
|
/// An event type that's sent by the test runners/drivers to the report aggregator.
|
||||||
pub(crate) enum RunnerEvent {
|
pub(crate) enum RunnerEvent {
|
||||||
/// An event emitted by the reporter when it wishes to listen to events emitted by the
|
/// An event emitted by the reporter when it wishes to listen to events emitted by the
|
||||||
/// aggregator.
|
/// aggregator.
|
||||||
SubscribeToEvents {
|
SubscribeToEvents {
|
||||||
/// The channel that the aggregator is to send the receive side of the channel on.
|
/// The channel that the aggregator is to send the receive side of the channel on.
|
||||||
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
||||||
},
|
},
|
||||||
/// An event emitted by runners when they've discovered a metadata file.
|
/// An event emitted by runners when they've discovered a corpus file.
|
||||||
MetadataFileDiscovery {
|
CorpusFileDiscovery {
|
||||||
/// The path of the metadata file discovered.
|
/// The contents of the corpus file.
|
||||||
path: MetadataFilePath,
|
corpus: Corpus
|
||||||
/// The content of the metadata file.
|
},
|
||||||
metadata: Metadata
|
/// An event emitted by runners when they've discovered a metadata file.
|
||||||
},
|
MetadataFileDiscovery {
|
||||||
/// An event emitted by the runners when they discover a test case.
|
/// The path of the metadata file discovered.
|
||||||
TestCaseDiscovery {
|
path: MetadataFilePath,
|
||||||
/// A specifier for the test that was discovered.
|
/// The content of the metadata file.
|
||||||
test_specifier: Arc<TestSpecifier>,
|
metadata: Metadata
|
||||||
},
|
},
|
||||||
/// An event emitted by the runners when a test case is ignored.
|
/// An event emitted by the runners when they discover a test case.
|
||||||
TestIgnored {
|
TestCaseDiscovery {
|
||||||
/// A specifier for the test that's been ignored.
|
/// A specifier for the test that was discovered.
|
||||||
test_specifier: Arc<TestSpecifier>,
|
test_specifier: Arc<TestSpecifier>,
|
||||||
/// A reason for the test to be ignored.
|
},
|
||||||
reason: String,
|
/// An event emitted by the runners when a test case is ignored.
|
||||||
/// Additional fields that describe more information on why the test was ignored.
|
TestIgnored {
|
||||||
additional_fields: IndexMap<String, serde_json::Value>
|
/// A specifier for the test that's been ignored.
|
||||||
},
|
test_specifier: Arc<TestSpecifier>,
|
||||||
/// An event emitted by the runners when a test case has succeeded.
|
/// A reason for the test to be ignored.
|
||||||
TestSucceeded {
|
reason: String,
|
||||||
/// A specifier for the test that succeeded.
|
/// Additional fields that describe more information on why the test was ignored.
|
||||||
test_specifier: Arc<TestSpecifier>,
|
additional_fields: IndexMap<String, serde_json::Value>
|
||||||
/// The number of steps of the case that were executed by the driver.
|
},
|
||||||
steps_executed: usize,
|
/// An event emitted by the runners when a test case has succeeded.
|
||||||
},
|
TestSucceeded {
|
||||||
/// An event emitted by the runners when a test case has failed.
|
/// A specifier for the test that succeeded.
|
||||||
TestFailed {
|
test_specifier: Arc<TestSpecifier>,
|
||||||
/// A specifier for the test that succeeded.
|
/// The number of steps of the case that were executed by the driver.
|
||||||
test_specifier: Arc<TestSpecifier>,
|
steps_executed: usize,
|
||||||
/// A reason for the failure of the test.
|
},
|
||||||
reason: String,
|
/// An event emitted by the runners when a test case has failed.
|
||||||
},
|
TestFailed {
|
||||||
/// An event emitted when the test case is assigned a platform node.
|
/// A specifier for the test that succeeded.
|
||||||
NodeAssigned {
|
test_specifier: Arc<TestSpecifier>,
|
||||||
/// A specifier for the test that the assignment is for.
|
/// A reason for the failure of the test.
|
||||||
test_specifier: Arc<TestSpecifier>,
|
reason: String,
|
||||||
/// The ID of the node that this case is being executed on.
|
},
|
||||||
id: usize,
|
/// An event emitted when the test case is assigned a platform node.
|
||||||
/// The identifier of the platform used.
|
NodeAssigned {
|
||||||
platform_identifier: PlatformIdentifier,
|
/// A specifier for the test that the assignment is for.
|
||||||
/// The connection string of the node.
|
test_specifier: Arc<TestSpecifier>,
|
||||||
connection_string: String,
|
/// The ID of the node that this case is being executed on.
|
||||||
},
|
id: usize,
|
||||||
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
/// The identifier of the platform used.
|
||||||
/// on the pre-link contracts.
|
platform_identifier: PlatformIdentifier,
|
||||||
PreLinkContractsCompilationSucceeded {
|
/// The connection string of the node.
|
||||||
/// A specifier for the execution that's taking place.
|
connection_string: String,
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
},
|
||||||
/// The version of the compiler used to compile the contracts.
|
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||||
compiler_version: Version,
|
/// on the pre-link contracts.
|
||||||
/// The path of the compiler used to compile the contracts.
|
PreLinkContractsCompilationSucceeded {
|
||||||
compiler_path: PathBuf,
|
/// A specifier for the execution that's taking place.
|
||||||
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
/// anew.
|
/// The version of the compiler used to compile the contracts.
|
||||||
is_cached: bool,
|
compiler_version: Version,
|
||||||
/// The input provided to the compiler - this is optional and not provided if the
|
/// The path of the compiler used to compile the contracts.
|
||||||
/// contracts were obtained from the cache.
|
compiler_path: PathBuf,
|
||||||
compiler_input: Option<CompilerInput>,
|
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||||
/// The output of the compiler.
|
/// anew.
|
||||||
compiler_output: CompilerOutput
|
is_cached: bool,
|
||||||
},
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
/// contracts were obtained from the cache.
|
||||||
/// on the post-link contracts.
|
compiler_input: Option<CompilerInput>,
|
||||||
PostLinkContractsCompilationSucceeded {
|
/// The output of the compiler.
|
||||||
/// A specifier for the execution that's taking place.
|
compiler_output: CompilerOutput
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
},
|
||||||
/// The version of the compiler used to compile the contracts.
|
/// An event emitted by the runners when the compilation of the contracts has succeeded
|
||||||
compiler_version: Version,
|
/// on the post-link contracts.
|
||||||
/// The path of the compiler used to compile the contracts.
|
PostLinkContractsCompilationSucceeded {
|
||||||
compiler_path: PathBuf,
|
/// A specifier for the execution that's taking place.
|
||||||
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
/// anew.
|
/// The version of the compiler used to compile the contracts.
|
||||||
is_cached: bool,
|
compiler_version: Version,
|
||||||
/// The input provided to the compiler - this is optional and not provided if the
|
/// The path of the compiler used to compile the contracts.
|
||||||
/// contracts were obtained from the cache.
|
compiler_path: PathBuf,
|
||||||
compiler_input: Option<CompilerInput>,
|
/// A flag of whether the contract bytecode and ABI were cached or if they were compiled
|
||||||
/// The output of the compiler.
|
/// anew.
|
||||||
compiler_output: CompilerOutput
|
is_cached: bool,
|
||||||
},
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
/// An event emitted by the runners when the compilation of the pre-link contract has
|
/// contracts were obtained from the cache.
|
||||||
/// failed.
|
compiler_input: Option<CompilerInput>,
|
||||||
PreLinkContractsCompilationFailed {
|
/// The output of the compiler.
|
||||||
/// A specifier for the execution that's taking place.
|
compiler_output: CompilerOutput
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
},
|
||||||
/// The version of the compiler used to compile the contracts.
|
/// An event emitted by the runners when the compilation of the pre-link contract has
|
||||||
compiler_version: Option<Version>,
|
/// failed.
|
||||||
/// The path of the compiler used to compile the contracts.
|
PreLinkContractsCompilationFailed {
|
||||||
compiler_path: Option<PathBuf>,
|
/// A specifier for the execution that's taking place.
|
||||||
/// The input provided to the compiler - this is optional and not provided if the
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
/// contracts were obtained from the cache.
|
/// The version of the compiler used to compile the contracts.
|
||||||
compiler_input: Option<CompilerInput>,
|
compiler_version: Option<Version>,
|
||||||
/// The failure reason.
|
/// The path of the compiler used to compile the contracts.
|
||||||
reason: String,
|
compiler_path: Option<PathBuf>,
|
||||||
},
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
/// An event emitted by the runners when the compilation of the post-link contract has
|
/// contracts were obtained from the cache.
|
||||||
/// failed.
|
compiler_input: Option<CompilerInput>,
|
||||||
PostLinkContractsCompilationFailed {
|
/// The failure reason.
|
||||||
/// A specifier for the execution that's taking place.
|
reason: String,
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
},
|
||||||
/// The version of the compiler used to compile the contracts.
|
/// An event emitted by the runners when the compilation of the post-link contract has
|
||||||
compiler_version: Option<Version>,
|
/// failed.
|
||||||
/// The path of the compiler used to compile the contracts.
|
PostLinkContractsCompilationFailed {
|
||||||
compiler_path: Option<PathBuf>,
|
/// A specifier for the execution that's taking place.
|
||||||
/// The input provided to the compiler - this is optional and not provided if the
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
/// contracts were obtained from the cache.
|
/// The version of the compiler used to compile the contracts.
|
||||||
compiler_input: Option<CompilerInput>,
|
compiler_version: Option<Version>,
|
||||||
/// The failure reason.
|
/// The path of the compiler used to compile the contracts.
|
||||||
reason: String,
|
compiler_path: Option<PathBuf>,
|
||||||
},
|
/// The input provided to the compiler - this is optional and not provided if the
|
||||||
/// An event emitted by the runners when a library has been deployed.
|
/// contracts were obtained from the cache.
|
||||||
LibrariesDeployed {
|
compiler_input: Option<CompilerInput>,
|
||||||
/// A specifier for the execution that's taking place.
|
/// The failure reason.
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
reason: String,
|
||||||
/// The addresses of the libraries that were deployed.
|
},
|
||||||
libraries: BTreeMap<ContractInstance, Address>
|
/// An event emitted by the runners when a library has been deployed.
|
||||||
},
|
LibrariesDeployed {
|
||||||
/// An event emitted by the runners when they've deployed a new contract.
|
/// A specifier for the execution that's taking place.
|
||||||
ContractDeployed {
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
/// A specifier for the execution that's taking place.
|
/// The addresses of the libraries that were deployed.
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
libraries: BTreeMap<ContractInstance, Address>
|
||||||
/// The instance name of the contract.
|
},
|
||||||
contract_instance: ContractInstance,
|
/// An event emitted by the runners when they've deployed a new contract.
|
||||||
/// The address of the contract.
|
ContractDeployed {
|
||||||
address: Address
|
/// A specifier for the execution that's taking place.
|
||||||
},
|
execution_specifier: Arc<ExecutionSpecifier>,
|
||||||
/// Reports the completion of the run.
|
/// The instance name of the contract.
|
||||||
Completion {},
|
contract_instance: ContractInstance,
|
||||||
|
/// The address of the contract.
|
||||||
/* Benchmarks Events */
|
address: Address
|
||||||
/// An event emitted with information on a transaction that was submitted for a certain step
|
},
|
||||||
/// of the execution.
|
/// Reports the completion of the run.
|
||||||
StepTransactionInformation {
|
Completion {}
|
||||||
/// A specifier for the execution that's taking place.
|
}
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
|
||||||
/// The path of the step that this transaction belongs to.
|
|
||||||
step_path: StepPath,
|
|
||||||
/// Information about the transaction
|
|
||||||
transaction_information: TransactionInformation
|
|
||||||
},
|
|
||||||
ContractInformation {
|
|
||||||
/// A specifier for the execution that's taking place.
|
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
|
||||||
/// The path of the solidity source code that contains the contract.
|
|
||||||
source_code_path: PathBuf,
|
|
||||||
/// The name of the contract
|
|
||||||
contract_name: String,
|
|
||||||
/// The size of the contract
|
|
||||||
contract_size: usize
|
|
||||||
},
|
|
||||||
BlockMined {
|
|
||||||
/// A specifier for the execution that's taking place.
|
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
|
||||||
/// Information on the mined block,
|
|
||||||
mined_block_information: MinedBlockInformation
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An extension to the [`Reporter`] implemented by the macro.
|
/// An extension to the [`Reporter`] implemented by the macro.
|
||||||
impl RunnerEventReporter {
|
impl RunnerEventReporter {
|
||||||
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
|
pub async fn subscribe(&self) -> anyhow::Result<broadcast::Receiver<ReporterEvent>> {
|
||||||
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
|
let (tx, rx) = oneshot::channel::<broadcast::Receiver<ReporterEvent>>();
|
||||||
self.report_subscribe_to_events_event(tx)
|
self.report_subscribe_to_events_event(tx)
|
||||||
.context("Failed to send subscribe request to reporter task")?;
|
.context("Failed to send subscribe request to reporter task")?;
|
||||||
rx.await.map_err(Into::into)
|
rx.await.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Reporter = RunnerEventReporter;
|
pub type Reporter = RunnerEventReporter;
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
//! Helper for caching the solc binaries.
|
//! Helper for caching the solc binaries.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashSet,
|
collections::HashSet,
|
||||||
fs::{File, create_dir_all},
|
fs::{File, create_dir_all},
|
||||||
io::{BufWriter, Write},
|
io::{BufWriter, Write},
|
||||||
os::unix::fs::PermissionsExt,
|
os::unix::fs::PermissionsExt,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
sync::LazyLock,
|
sync::LazyLock,
|
||||||
};
|
};
|
||||||
|
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
@@ -19,90 +19,71 @@ pub const SOLC_CACHE_DIRECTORY: &str = "solc";
|
|||||||
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
|
pub(crate) static SOLC_CACHER: LazyLock<Mutex<HashSet<PathBuf>>> = LazyLock::new(Default::default);
|
||||||
|
|
||||||
pub(crate) async fn get_or_download(
|
pub(crate) async fn get_or_download(
|
||||||
working_directory: &Path,
|
working_directory: &Path,
|
||||||
downloader: &SolcDownloader,
|
downloader: &SolcDownloader,
|
||||||
) -> anyhow::Result<(Version, PathBuf)> {
|
) -> anyhow::Result<(Version, PathBuf)> {
|
||||||
let target_directory = working_directory
|
let target_directory = working_directory
|
||||||
.join(SOLC_CACHE_DIRECTORY)
|
.join(SOLC_CACHE_DIRECTORY)
|
||||||
.join(downloader.version.to_string());
|
.join(downloader.version.to_string());
|
||||||
let target_file = target_directory.join(downloader.target);
|
let target_file = target_directory.join(downloader.target);
|
||||||
|
|
||||||
let mut cache = SOLC_CACHER.lock().await;
|
let mut cache = SOLC_CACHER.lock().await;
|
||||||
if cache.contains(&target_file) {
|
if cache.contains(&target_file) {
|
||||||
tracing::debug!("using cached solc: {}", target_file.display());
|
tracing::debug!("using cached solc: {}", target_file.display());
|
||||||
return Ok((downloader.version.clone(), target_file));
|
return Ok((downloader.version.clone(), target_file));
|
||||||
}
|
}
|
||||||
|
|
||||||
create_dir_all(&target_directory).with_context(|| {
|
create_dir_all(&target_directory).with_context(|| {
|
||||||
format!(
|
format!("Failed to create solc cache directory: {}", target_directory.display())
|
||||||
"Failed to create solc cache directory: {}",
|
})?;
|
||||||
target_directory.display()
|
download_to_file(&target_file, downloader)
|
||||||
)
|
.await
|
||||||
})?;
|
.with_context(|| format!("Failed to write downloaded solc to {}", target_file.display()))?;
|
||||||
download_to_file(&target_file, downloader)
|
cache.insert(target_file.clone());
|
||||||
.await
|
|
||||||
.with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed to write downloaded solc to {}",
|
|
||||||
target_file.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
cache.insert(target_file.clone());
|
|
||||||
|
|
||||||
Ok((downloader.version.clone(), target_file))
|
Ok((downloader.version.clone(), target_file))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
|
async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> {
|
||||||
let Ok(file) = File::create_new(path) else {
|
let Ok(file) = File::create_new(path) else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
{
|
{
|
||||||
let mut permissions = file
|
let mut permissions = file
|
||||||
.metadata()
|
.metadata()
|
||||||
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
|
.with_context(|| format!("Failed to read metadata for {}", path.display()))?
|
||||||
.permissions();
|
.permissions();
|
||||||
permissions.set_mode(permissions.mode() | 0o111);
|
permissions.set_mode(permissions.mode() | 0o111);
|
||||||
file.set_permissions(permissions).with_context(|| {
|
file.set_permissions(permissions).with_context(|| {
|
||||||
format!("Failed to set executable permissions on {}", path.display())
|
format!("Failed to set executable permissions on {}", path.display())
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut file = BufWriter::new(file);
|
let mut file = BufWriter::new(file);
|
||||||
file.write_all(
|
file.write_all(&downloader.download().await.context("Failed to download solc binary bytes")?)
|
||||||
&downloader
|
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
|
||||||
.download()
|
file.flush()
|
||||||
.await
|
.with_context(|| format!("Failed to flush file {}", path.display()))?;
|
||||||
.context("Failed to download solc binary bytes")?,
|
drop(file);
|
||||||
)
|
|
||||||
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
|
|
||||||
file.flush()
|
|
||||||
.with_context(|| format!("Failed to flush file {}", path.display()))?;
|
|
||||||
drop(file);
|
|
||||||
|
|
||||||
#[cfg(target_os = "macos")]
|
#[cfg(target_os = "macos")]
|
||||||
std::process::Command::new("xattr")
|
std::process::Command::new("xattr")
|
||||||
.arg("-d")
|
.arg("-d")
|
||||||
.arg("com.apple.quarantine")
|
.arg("com.apple.quarantine")
|
||||||
.arg(path)
|
.arg(path)
|
||||||
.stderr(std::process::Stdio::null())
|
.stderr(std::process::Stdio::null())
|
||||||
.stdout(std::process::Stdio::null())
|
.stdout(std::process::Stdio::null())
|
||||||
.stdout(std::process::Stdio::null())
|
.stdout(std::process::Stdio::null())
|
||||||
.spawn()
|
.spawn()
|
||||||
.with_context(|| {
|
.with_context(|| {
|
||||||
format!(
|
format!("Failed to spawn xattr to remove quarantine attribute on {}", path.display())
|
||||||
"Failed to spawn xattr to remove quarantine attribute on {}",
|
})?
|
||||||
path.display()
|
.wait()
|
||||||
)
|
.with_context(|| {
|
||||||
})?
|
format!("Failed waiting for xattr operation to complete on {}", path.display())
|
||||||
.wait()
|
})?;
|
||||||
.with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Failed waiting for xattr operation to complete on {}",
|
|
||||||
path.display()
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,219 +1,171 @@
|
|||||||
//! This module downloads solc binaries.
|
//! This module downloads solc binaries.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
str::FromStr,
|
sync::{LazyLock, Mutex},
|
||||||
sync::{LazyLock, Mutex},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
|
|
||||||
use semver::{Version, VersionReq};
|
use semver::Version;
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
use crate::list::List;
|
use crate::list::List;
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
|
|
||||||
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
|
pub static LIST_CACHE: LazyLock<Mutex<HashMap<&'static str, List>>> =
|
||||||
LazyLock::new(Default::default);
|
LazyLock::new(Default::default);
|
||||||
|
|
||||||
impl List {
|
impl List {
|
||||||
pub const LINUX_URL: &str = "https://binaries.soliditylang.org/linux-amd64/list.json";
|
pub const LINUX_URL: &str = "https://binaries.soliditylang.org/linux-amd64/list.json";
|
||||||
pub const WINDOWS_URL: &str = "https://binaries.soliditylang.org/windows-amd64/list.json";
|
pub const WINDOWS_URL: &str = "https://binaries.soliditylang.org/windows-amd64/list.json";
|
||||||
pub const MACOSX_URL: &str = "https://binaries.soliditylang.org/macosx-amd64/list.json";
|
pub const MACOSX_URL: &str = "https://binaries.soliditylang.org/macosx-amd64/list.json";
|
||||||
pub const WASM_URL: &str = "https://binaries.soliditylang.org/wasm/list.json";
|
pub const WASM_URL: &str = "https://binaries.soliditylang.org/wasm/list.json";
|
||||||
|
|
||||||
/// Try to downloads the list from the given URL.
|
/// Try to downloads the list from the given URL.
|
||||||
///
|
///
|
||||||
/// Caches the list retrieved from the `url` into [LIST_CACHE],
|
/// Caches the list retrieved from the `url` into [LIST_CACHE],
|
||||||
/// subsequent calls with the same `url` will return the cached list.
|
/// subsequent calls with the same `url` will return the cached list.
|
||||||
pub async fn download(url: &'static str) -> anyhow::Result<Self> {
|
pub async fn download(url: &'static str) -> anyhow::Result<Self> {
|
||||||
if let Some(list) = LIST_CACHE.lock().unwrap().get(url) {
|
if let Some(list) = LIST_CACHE.lock().unwrap().get(url) {
|
||||||
return Ok(list.clone());
|
return Ok(list.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let body: List = reqwest::get(url)
|
let body: List = reqwest::get(url)
|
||||||
.await
|
.await
|
||||||
.with_context(|| format!("Failed to GET solc list from {url}"))?
|
.with_context(|| format!("Failed to GET solc list from {url}"))?
|
||||||
.json()
|
.json()
|
||||||
.await
|
.await
|
||||||
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
|
.with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?;
|
||||||
|
|
||||||
LIST_CACHE.lock().unwrap().insert(url, body.clone());
|
LIST_CACHE.lock().unwrap().insert(url, body.clone());
|
||||||
|
|
||||||
Ok(body)
|
Ok(body)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Download solc binaries from the official SolidityLang site
|
/// Download solc binaries from the official SolidityLang site
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SolcDownloader {
|
pub struct SolcDownloader {
|
||||||
pub version: Version,
|
pub version: Version,
|
||||||
pub target: &'static str,
|
pub target: &'static str,
|
||||||
pub list: &'static str,
|
pub list: &'static str,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SolcDownloader {
|
impl SolcDownloader {
|
||||||
pub const BASE_URL: &str = "https://binaries.soliditylang.org";
|
pub const BASE_URL: &str = "https://binaries.soliditylang.org";
|
||||||
|
|
||||||
pub const LINUX_NAME: &str = "linux-amd64";
|
pub const LINUX_NAME: &str = "linux-amd64";
|
||||||
pub const MACOSX_NAME: &str = "macosx-amd64";
|
pub const MACOSX_NAME: &str = "macosx-amd64";
|
||||||
pub const WINDOWS_NAME: &str = "windows-amd64";
|
pub const WINDOWS_NAME: &str = "windows-amd64";
|
||||||
pub const WASM_NAME: &str = "wasm";
|
pub const WASM_NAME: &str = "wasm";
|
||||||
|
|
||||||
async fn new(
|
async fn new(
|
||||||
version: impl Into<VersionOrRequirement>,
|
version: impl Into<VersionOrRequirement>,
|
||||||
target: &'static str,
|
target: &'static str,
|
||||||
list: &'static str,
|
list: &'static str,
|
||||||
) -> anyhow::Result<Self> {
|
) -> anyhow::Result<Self> {
|
||||||
static MAXIMUM_COMPILER_VERSION_REQUIREMENT: LazyLock<VersionReq> =
|
let version_or_requirement = version.into();
|
||||||
LazyLock::new(|| VersionReq::from_str("<=0.8.30").unwrap());
|
match version_or_requirement {
|
||||||
|
VersionOrRequirement::Version(version) => Ok(Self { version, target, list }),
|
||||||
|
VersionOrRequirement::Requirement(requirement) => {
|
||||||
|
let Some(version) = List::download(list)
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("Failed to download solc builds list from {list}"))?
|
||||||
|
.builds
|
||||||
|
.into_iter()
|
||||||
|
.map(|build| build.version)
|
||||||
|
.filter(|version| requirement.matches(version))
|
||||||
|
.max()
|
||||||
|
else {
|
||||||
|
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
|
||||||
|
};
|
||||||
|
Ok(Self { version, target, list })
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let version_or_requirement = version.into();
|
pub async fn linux(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||||
match version_or_requirement {
|
Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await
|
||||||
VersionOrRequirement::Version(version) => Ok(Self {
|
}
|
||||||
version,
|
|
||||||
target,
|
|
||||||
list,
|
|
||||||
}),
|
|
||||||
VersionOrRequirement::Requirement(requirement) => {
|
|
||||||
let Some(version) = List::download(list)
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("Failed to download solc builds list from {list}"))?
|
|
||||||
.builds
|
|
||||||
.into_iter()
|
|
||||||
.map(|build| build.version)
|
|
||||||
.filter(|version| {
|
|
||||||
MAXIMUM_COMPILER_VERSION_REQUIREMENT.matches(version)
|
|
||||||
&& requirement.matches(version)
|
|
||||||
})
|
|
||||||
.max()
|
|
||||||
else {
|
|
||||||
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
|
|
||||||
};
|
|
||||||
Ok(Self {
|
|
||||||
version,
|
|
||||||
target,
|
|
||||||
list,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn linux(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
pub async fn macosx(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||||
Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await
|
Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn macosx(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
pub async fn windows(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||||
Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await
|
Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn windows(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
pub async fn wasm(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
||||||
Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await
|
Self::new(version, Self::WASM_NAME, List::WASM_URL).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn wasm(version: impl Into<VersionOrRequirement>) -> anyhow::Result<Self> {
|
/// Download the solc binary.
|
||||||
Self::new(version, Self::WASM_NAME, List::WASM_URL).await
|
///
|
||||||
}
|
/// Errors out if the download fails or the digest of the downloaded file
|
||||||
|
/// mismatches the expected digest from the release [List].
|
||||||
|
pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
|
||||||
|
let builds = List::download(self.list)
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("Failed to download solc builds list from {}", self.list))?
|
||||||
|
.builds;
|
||||||
|
let build = builds
|
||||||
|
.iter()
|
||||||
|
.find(|build| build.version == self.version)
|
||||||
|
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
|
||||||
|
.with_context(|| {
|
||||||
|
format!(
|
||||||
|
"Requested solc version {} was not found in builds list fetched from {}",
|
||||||
|
self.version, self.list
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
/// Download the solc binary.
|
let path = build.path.clone();
|
||||||
///
|
let expected_digest = build.sha256.strip_prefix("0x").unwrap_or(&build.sha256).to_string();
|
||||||
/// Errors out if the download fails or the digest of the downloaded file
|
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
|
||||||
/// mismatches the expected digest from the release [List].
|
|
||||||
pub async fn download(&self) -> anyhow::Result<Vec<u8>> {
|
|
||||||
let builds = List::download(self.list)
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("Failed to download solc builds list from {}", self.list))?
|
|
||||||
.builds;
|
|
||||||
let build = builds
|
|
||||||
.iter()
|
|
||||||
.find(|build| build.version == self.version)
|
|
||||||
.ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))
|
|
||||||
.with_context(|| {
|
|
||||||
format!(
|
|
||||||
"Requested solc version {} was not found in builds list fetched from {}",
|
|
||||||
self.version, self.list
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let path = build.path.clone();
|
let file = reqwest::get(&url)
|
||||||
let expected_digest = build
|
.await
|
||||||
.sha256
|
.with_context(|| format!("Failed to GET solc binary from {url}"))?
|
||||||
.strip_prefix("0x")
|
.bytes()
|
||||||
.unwrap_or(&build.sha256)
|
.await
|
||||||
.to_string();
|
.with_context(|| format!("Failed to read solc binary bytes from {url}"))?
|
||||||
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
|
.to_vec();
|
||||||
|
|
||||||
let file = reqwest::get(&url)
|
if hex::encode(Sha256::digest(&file)) != expected_digest {
|
||||||
.await
|
anyhow::bail!("sha256 mismatch for solc version {}", self.version);
|
||||||
.with_context(|| format!("Failed to GET solc binary from {url}"))?
|
}
|
||||||
.bytes()
|
|
||||||
.await
|
|
||||||
.with_context(|| format!("Failed to read solc binary bytes from {url}"))?
|
|
||||||
.to_vec();
|
|
||||||
|
|
||||||
if hex::encode(Sha256::digest(&file)) != expected_digest {
|
Ok(file)
|
||||||
anyhow::bail!("sha256 mismatch for solc version {}", self.version);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Ok(file)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::{download::SolcDownloader, list::List};
|
use crate::{download::SolcDownloader, list::List};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn try_get_windows() {
|
async fn try_get_windows() {
|
||||||
let version = List::download(List::WINDOWS_URL)
|
let version = List::download(List::WINDOWS_URL).await.unwrap().latest_release;
|
||||||
.await
|
SolcDownloader::windows(version).await.unwrap().download().await.unwrap();
|
||||||
.unwrap()
|
}
|
||||||
.latest_release;
|
|
||||||
SolcDownloader::windows(version)
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.download()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn try_get_macosx() {
|
async fn try_get_macosx() {
|
||||||
let version = List::download(List::MACOSX_URL)
|
let version = List::download(List::MACOSX_URL).await.unwrap().latest_release;
|
||||||
.await
|
SolcDownloader::macosx(version).await.unwrap().download().await.unwrap();
|
||||||
.unwrap()
|
}
|
||||||
.latest_release;
|
|
||||||
SolcDownloader::macosx(version)
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.download()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn try_get_linux() {
|
async fn try_get_linux() {
|
||||||
let version = List::download(List::LINUX_URL)
|
let version = List::download(List::LINUX_URL).await.unwrap().latest_release;
|
||||||
.await
|
SolcDownloader::linux(version).await.unwrap().download().await.unwrap();
|
||||||
.unwrap()
|
}
|
||||||
.latest_release;
|
|
||||||
SolcDownloader::linux(version)
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.download()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn try_get_wasm() {
|
async fn try_get_wasm() {
|
||||||
let version = List::download(List::WASM_URL).await.unwrap().latest_release;
|
let version = List::download(List::WASM_URL).await.unwrap().latest_release;
|
||||||
SolcDownloader::wasm(version)
|
SolcDownloader::wasm(version).await.unwrap().download().await.unwrap();
|
||||||
.await
|
}
|
||||||
.unwrap()
|
|
||||||
.download()
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,22 +22,22 @@ pub mod list;
|
|||||||
/// Subsequent calls for the same version will use a cached artifact
|
/// Subsequent calls for the same version will use a cached artifact
|
||||||
/// and not download it again.
|
/// and not download it again.
|
||||||
pub async fn download_solc(
|
pub async fn download_solc(
|
||||||
cache_directory: &Path,
|
cache_directory: &Path,
|
||||||
version: impl Into<VersionOrRequirement>,
|
version: impl Into<VersionOrRequirement>,
|
||||||
wasm: bool,
|
wasm: bool,
|
||||||
) -> anyhow::Result<(Version, PathBuf)> {
|
) -> anyhow::Result<(Version, PathBuf)> {
|
||||||
let downloader = if wasm {
|
let downloader = if wasm {
|
||||||
SolcDownloader::wasm(version).await
|
SolcDownloader::wasm(version).await
|
||||||
} else if cfg!(target_os = "linux") {
|
} else if cfg!(target_os = "linux") {
|
||||||
SolcDownloader::linux(version).await
|
SolcDownloader::linux(version).await
|
||||||
} else if cfg!(target_os = "macos") {
|
} else if cfg!(target_os = "macos") {
|
||||||
SolcDownloader::macosx(version).await
|
SolcDownloader::macosx(version).await
|
||||||
} else if cfg!(target_os = "windows") {
|
} else if cfg!(target_os = "windows") {
|
||||||
SolcDownloader::windows(version).await
|
SolcDownloader::windows(version).await
|
||||||
} else {
|
} else {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
.context("Failed to initialize the Solc Downloader")?;
|
.context("Failed to initialize the Solc Downloader")?;
|
||||||
|
|
||||||
get_or_download(cache_directory, &downloader).await
|
get_or_download(cache_directory, &downloader).await
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,20 +7,20 @@ use serde::Deserialize;
|
|||||||
|
|
||||||
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
|
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
|
||||||
pub struct List {
|
pub struct List {
|
||||||
pub builds: Vec<Build>,
|
pub builds: Vec<Build>,
|
||||||
pub releases: HashMap<Version, String>,
|
pub releases: HashMap<Version, String>,
|
||||||
#[serde(rename = "latestRelease")]
|
#[serde(rename = "latestRelease")]
|
||||||
pub latest_release: Version,
|
pub latest_release: Version,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
|
#[derive(Debug, Deserialize, Clone, Eq, PartialEq)]
|
||||||
pub struct Build {
|
pub struct Build {
|
||||||
pub path: PathBuf,
|
pub path: PathBuf,
|
||||||
pub version: Version,
|
pub version: Version,
|
||||||
pub build: String,
|
pub build: String,
|
||||||
#[serde(rename = "longVersion")]
|
#[serde(rename = "longVersion")]
|
||||||
pub long_version: String,
|
pub long_version: String,
|
||||||
pub keccak256: String,
|
pub keccak256: String,
|
||||||
pub sha256: String,
|
pub sha256: String,
|
||||||
pub urls: Vec<String>,
|
pub urls: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|||||||
Submodule
+1
Submodule polkadot-sdk added at dc3d0e5ab7
Submodule resolc-compiler-tests deleted from 7bc445491e
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Revive Differential Tests - Quick Start Script
|
# Revive Differential Tests - Quick Start Script
|
||||||
# This script clones the test repository, and runs the tool
|
# This script clones the test repository, sets up the corpus file, and runs the tool
|
||||||
|
|
||||||
set -e # Exit on any error
|
set -e # Exit on any error
|
||||||
|
|
||||||
@@ -14,6 +14,7 @@ NC='\033[0m' # No Color
|
|||||||
# Configuration
|
# Configuration
|
||||||
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
|
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
|
||||||
TEST_REPO_DIR="resolc-compiler-tests"
|
TEST_REPO_DIR="resolc-compiler-tests"
|
||||||
|
CORPUS_FILE="./corpus.json"
|
||||||
WORKDIR="workdir"
|
WORKDIR="workdir"
|
||||||
|
|
||||||
# Optional positional argument: path to polkadot-sdk directory
|
# Optional positional argument: path to polkadot-sdk directory
|
||||||
@@ -22,6 +23,7 @@ POLKADOT_SDK_DIR="${1:-}"
|
|||||||
# Binary paths (default to names in $PATH)
|
# Binary paths (default to names in $PATH)
|
||||||
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
||||||
ETH_RPC_BIN="eth-rpc"
|
ETH_RPC_BIN="eth-rpc"
|
||||||
|
SUBSTRATE_NODE_BIN="substrate-node"
|
||||||
|
|
||||||
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
@@ -49,13 +51,14 @@ if [ -n "$POLKADOT_SDK_DIR" ]; then
|
|||||||
|
|
||||||
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
||||||
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
||||||
|
SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node"
|
||||||
|
|
||||||
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ]; then
|
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then
|
||||||
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
||||||
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN"; do
|
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do
|
||||||
if [ ! -x "$bin" ]; then
|
if [ ! -x "$bin" ]; then
|
||||||
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -65,6 +68,21 @@ else
|
|||||||
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
|
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Create corpus file with absolute path resolved at runtime
|
||||||
|
echo -e "${GREEN}Creating corpus file...${NC}"
|
||||||
|
ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
|
||||||
|
|
||||||
|
cat > "$CORPUS_FILE" << EOF
|
||||||
|
{
|
||||||
|
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
|
||||||
|
"paths": [
|
||||||
|
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/simple")"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}"
|
||||||
|
|
||||||
# Create workdir if it doesn't exist
|
# Create workdir if it doesn't exist
|
||||||
mkdir -p "$WORKDIR"
|
mkdir -p "$WORKDIR"
|
||||||
|
|
||||||
@@ -75,13 +93,14 @@ echo ""
|
|||||||
# Run the tool
|
# Run the tool
|
||||||
cargo build --release;
|
cargo build --release;
|
||||||
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
|
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
|
||||||
--platform revive-dev-node-polkavm-resolc \
|
--platform revive-dev-node-revm-solc \
|
||||||
--test $(realpath "$TEST_REPO_DIR/fixtures/solidity") \
|
--corpus "$CORPUS_FILE" \
|
||||||
--working-directory "$WORKDIR" \
|
--working-directory "$WORKDIR" \
|
||||||
--concurrency.number-of-nodes 10 \
|
--concurrency.number-of-nodes 10 \
|
||||||
--concurrency.number-of-threads 5 \
|
--concurrency.number-of-threads 5 \
|
||||||
--concurrency.number-of-concurrent-tasks 500 \
|
--concurrency.number-of-concurrent-tasks 1000 \
|
||||||
--wallet.additional-keys 100000 \
|
--wallet.additional-keys 100000 \
|
||||||
|
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
|
||||||
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
||||||
--eth-rpc.path "$ETH_RPC_BIN" \
|
--eth-rpc.path "$ETH_RPC_BIN" \
|
||||||
> logs.log \
|
> logs.log \
|
||||||
@@ -1,246 +0,0 @@
|
|||||||
"""
|
|
||||||
Utilities to print benchmark metrics from a report JSON into CSV.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
python scripts/print_benchmark_metrics_csv.py /absolute/path/to/report.json
|
|
||||||
|
|
||||||
The script prints, for each metadata path, case index, and mode combination,
|
|
||||||
CSV rows aligned to mined blocks with the following columns:
|
|
||||||
- block_number
|
|
||||||
- number_of_txs
|
|
||||||
- tps (transaction_per_second)
|
|
||||||
- gps (gas_per_second)
|
|
||||||
- gas_block_fullness
|
|
||||||
- ref_time (if available)
|
|
||||||
- max_ref_time (if available)
|
|
||||||
- proof_size (if available)
|
|
||||||
- max_proof_size (if available)
|
|
||||||
- ref_time_block_fullness (if available)
|
|
||||||
- proof_size_block_fullness (if available)
|
|
||||||
|
|
||||||
Important nuance: TPS and GPS arrays have (number_of_blocks - 1) items. The
|
|
||||||
first block row has no TPS/GPS; the CSV leaves those cells empty for the first
|
|
||||||
row and aligns subsequent values to their corresponding next block.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import csv
|
|
||||||
from typing import List, Mapping, TypedDict
|
|
||||||
|
|
||||||
|
|
||||||
class EthereumMinedBlockInformation(TypedDict):
|
|
||||||
"""EVM block information extracted from the report.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
block_number: The block height.
|
|
||||||
block_timestamp: The UNIX timestamp of the block.
|
|
||||||
mined_gas: Total gas used (mined) in the block.
|
|
||||||
block_gas_limit: The gas limit of the block.
|
|
||||||
transaction_hashes: List of transaction hashes included in the block.
|
|
||||||
"""
|
|
||||||
|
|
||||||
block_number: int
|
|
||||||
block_timestamp: int
|
|
||||||
mined_gas: int
|
|
||||||
block_gas_limit: int
|
|
||||||
transaction_hashes: List[str]
|
|
||||||
|
|
||||||
|
|
||||||
class SubstrateMinedBlockInformation(TypedDict):
|
|
||||||
"""Substrate-specific block resource usage fields.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
ref_time: The consumed ref time in the block.
|
|
||||||
max_ref_time: The maximum ref time allowed for the block.
|
|
||||||
proof_size: The consumed proof size in the block.
|
|
||||||
max_proof_size: The maximum proof size allowed for the block.
|
|
||||||
"""
|
|
||||||
|
|
||||||
ref_time: int
|
|
||||||
max_ref_time: int
|
|
||||||
proof_size: int
|
|
||||||
max_proof_size: int
|
|
||||||
|
|
||||||
|
|
||||||
class MinedBlockInformation(TypedDict):
|
|
||||||
"""Block-level information for a mined block with both EVM and optional Substrate fields."""
|
|
||||||
|
|
||||||
ethereum_block_information: EthereumMinedBlockInformation
|
|
||||||
substrate_block_information: SubstrateMinedBlockInformation
|
|
||||||
|
|
||||||
|
|
||||||
class Metric(TypedDict):
|
|
||||||
"""Metric data of integer values keyed by platform identifier.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
minimum: Single scalar minimum per platform.
|
|
||||||
maximum: Single scalar maximum per platform.
|
|
||||||
mean: Single scalar mean per platform.
|
|
||||||
median: Single scalar median per platform.
|
|
||||||
raw: Time-series (or list) of values per platform.
|
|
||||||
"""
|
|
||||||
|
|
||||||
minimum: Mapping[str, int]
|
|
||||||
maximum: Mapping[str, int]
|
|
||||||
mean: Mapping[str, int]
|
|
||||||
median: Mapping[str, int]
|
|
||||||
raw: Mapping[str, List[int]]
|
|
||||||
|
|
||||||
|
|
||||||
class Metrics(TypedDict):
|
|
||||||
"""All metrics that may be present for a given execution report.
|
|
||||||
|
|
||||||
Note that some metrics are optional and present only for specific platforms
|
|
||||||
or execution modes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
transaction_per_second: Metric
|
|
||||||
gas_per_second: Metric
|
|
||||||
gas_block_fullness: Metric
|
|
||||||
ref_time_block_fullness: Metric
|
|
||||||
proof_size_block_fullness: Metric
|
|
||||||
|
|
||||||
|
|
||||||
class ExecutionReport(TypedDict):
|
|
||||||
"""Execution report for a mode containing mined blocks and metrics.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
mined_block_information: Mapping from platform identifier to the list of
|
|
||||||
mined blocks observed for that platform.
|
|
||||||
metrics: The computed metrics for the execution.
|
|
||||||
"""
|
|
||||||
|
|
||||||
mined_block_information: Mapping[str, List[MinedBlockInformation]]
|
|
||||||
metrics: Metrics
|
|
||||||
|
|
||||||
|
|
||||||
class CaseReport(TypedDict):
|
|
||||||
"""Report for a single case, keyed by mode string."""
|
|
||||||
|
|
||||||
mode_execution_reports: Mapping[str, ExecutionReport]
|
|
||||||
|
|
||||||
|
|
||||||
class MetadataFileReport(TypedDict):
|
|
||||||
"""Report subtree keyed by case indices for a metadata file path."""
|
|
||||||
|
|
||||||
case_reports: Mapping[str, CaseReport]
|
|
||||||
|
|
||||||
|
|
||||||
class ReportRoot(TypedDict):
|
|
||||||
"""Top-level report schema with execution information keyed by metadata path."""
|
|
||||||
|
|
||||||
execution_information: Mapping[str, MetadataFileReport]
|
|
||||||
|
|
||||||
|
|
||||||
BlockInformation = TypedDict(
|
|
||||||
"BlockInformation",
|
|
||||||
{
|
|
||||||
"Block Number": int,
|
|
||||||
"Timestamp": int,
|
|
||||||
"Datetime": None,
|
|
||||||
"Transaction Count": int,
|
|
||||||
"TPS": int | None,
|
|
||||||
"GPS": int | None,
|
|
||||||
"Ref Time": int,
|
|
||||||
"Max Ref Time": int,
|
|
||||||
"Block Fullness Ref Time": int,
|
|
||||||
"Proof Size": int,
|
|
||||||
"Max Proof Size": int,
|
|
||||||
"Block Fullness Proof Size": int,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
"""A typed dictionary used to hold all of the block information"""
|
|
||||||
|
|
||||||
|
|
||||||
def load_report(path: str) -> ReportRoot:
|
|
||||||
"""Load the report JSON from disk.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
path: Absolute or relative filesystem path to the JSON report file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The parsed report as a typed dictionary structure.
|
|
||||||
"""
|
|
||||||
|
|
||||||
with open(path, "r", encoding="utf-8") as f:
|
|
||||||
data: ReportRoot = json.load(f)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
report_path: str = sys.argv[1]
|
|
||||||
report: ReportRoot = load_report(report_path)
|
|
||||||
|
|
||||||
# TODO: Remove this in the future, but for now, the target is fixed.
|
|
||||||
target: str = "revive-dev-node-revm-solc"
|
|
||||||
|
|
||||||
csv_writer = csv.writer(sys.stdout)
|
|
||||||
|
|
||||||
for _, metadata_file_report in report["execution_information"].items():
|
|
||||||
for _, case_report in metadata_file_report["case_reports"].items():
|
|
||||||
for _, execution_report in case_report["mode_execution_reports"].items():
|
|
||||||
blocks_information: list[MinedBlockInformation] = execution_report[
|
|
||||||
"mined_block_information"
|
|
||||||
][target]
|
|
||||||
|
|
||||||
resolved_blocks: list[BlockInformation] = []
|
|
||||||
for i, block_information in enumerate(blocks_information):
|
|
||||||
resolved_blocks.append(
|
|
||||||
{
|
|
||||||
"Block Number": block_information[
|
|
||||||
"ethereum_block_information"
|
|
||||||
]["block_number"],
|
|
||||||
"Timestamp": block_information[
|
|
||||||
"ethereum_block_information"
|
|
||||||
]["block_timestamp"],
|
|
||||||
"Datetime": None,
|
|
||||||
"Transaction Count": len(
|
|
||||||
block_information["ethereum_block_information"][
|
|
||||||
"transaction_hashes"
|
|
||||||
]
|
|
||||||
),
|
|
||||||
"TPS": (
|
|
||||||
None
|
|
||||||
if i == 0
|
|
||||||
else execution_report["metrics"][
|
|
||||||
"transaction_per_second"
|
|
||||||
]["raw"][target][i - 1]
|
|
||||||
),
|
|
||||||
"GPS": (
|
|
||||||
None
|
|
||||||
if i == 0
|
|
||||||
else execution_report["metrics"]["gas_per_second"][
|
|
||||||
"raw"
|
|
||||||
][target][i - 1]
|
|
||||||
),
|
|
||||||
"Ref Time": block_information[
|
|
||||||
"substrate_block_information"
|
|
||||||
]["ref_time"],
|
|
||||||
"Max Ref Time": block_information[
|
|
||||||
"substrate_block_information"
|
|
||||||
]["max_ref_time"],
|
|
||||||
"Block Fullness Ref Time": execution_report["metrics"][
|
|
||||||
"ref_time_block_fullness"
|
|
||||||
]["raw"][target][i],
|
|
||||||
"Proof Size": block_information[
|
|
||||||
"substrate_block_information"
|
|
||||||
]["proof_size"],
|
|
||||||
"Max Proof Size": block_information[
|
|
||||||
"substrate_block_information"
|
|
||||||
]["max_proof_size"],
|
|
||||||
"Block Fullness Proof Size": execution_report["metrics"][
|
|
||||||
"proof_size_block_fullness"
|
|
||||||
]["raw"][target][i],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
csv_writer = csv.DictWriter(sys.stdout, resolved_blocks[0].keys())
|
|
||||||
csv_writer.writeheader()
|
|
||||||
csv_writer.writerows(resolved_blocks)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,226 +0,0 @@
|
|||||||
"""
|
|
||||||
This script is used to turn the JSON report produced by the revive differential tests tool into an
|
|
||||||
easy to consume markdown document for the purpose of reporting this information in the Polkadot SDK
|
|
||||||
CI. The full models used in the JSON report can be found in the revive differential tests repo and
|
|
||||||
the models used in this script are just a partial reproduction of the full report models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import TypedDict, Literal, Union
|
|
||||||
|
|
||||||
import json, io
|
|
||||||
|
|
||||||
|
|
||||||
class Report(TypedDict):
|
|
||||||
context: "Context"
|
|
||||||
execution_information: dict[
|
|
||||||
"MetadataFilePathString",
|
|
||||||
dict["ModeString", dict["CaseIdxString", "CaseReport"]],
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class Context(TypedDict):
|
|
||||||
Test: "TestContext"
|
|
||||||
|
|
||||||
|
|
||||||
class TestContext(TypedDict):
|
|
||||||
corpus_configuration: "CorpusConfiguration"
|
|
||||||
|
|
||||||
|
|
||||||
class CorpusConfiguration(TypedDict):
|
|
||||||
test_specifiers: list["TestSpecifier"]
|
|
||||||
|
|
||||||
|
|
||||||
class CaseReport(TypedDict):
|
|
||||||
status: "CaseStatus"
|
|
||||||
|
|
||||||
|
|
||||||
class CaseStatusSuccess(TypedDict):
|
|
||||||
status: Literal["Succeeded"]
|
|
||||||
steps_executed: int
|
|
||||||
|
|
||||||
|
|
||||||
class CaseStatusFailure(TypedDict):
|
|
||||||
status: Literal["Failed"]
|
|
||||||
reason: str
|
|
||||||
|
|
||||||
|
|
||||||
class CaseStatusIgnored(TypedDict):
|
|
||||||
status: Literal["Ignored"]
|
|
||||||
reason: str
|
|
||||||
|
|
||||||
|
|
||||||
CaseStatus = Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
|
|
||||||
"""A union type of all of the possible statuses that could be reported for a case."""
|
|
||||||
|
|
||||||
TestSpecifier = str
|
|
||||||
"""A test specifier string. For example resolc-compiler-tests/fixtures/solidity/test.json::0::Y+"""
|
|
||||||
|
|
||||||
ModeString = str
|
|
||||||
"""The mode string. For example Y+ >=0.8.13"""
|
|
||||||
|
|
||||||
MetadataFilePathString = str
|
|
||||||
"""The path to a metadata file. For example resolc-compiler-tests/fixtures/solidity/test.json"""
|
|
||||||
|
|
||||||
CaseIdxString = str
|
|
||||||
"""The index of a case as a string. For example '0'"""
|
|
||||||
|
|
||||||
|
|
||||||
def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
|
|
||||||
"""
|
|
||||||
Given a path, this function returns the path relative to the resolc-compiler-test directory. The
|
|
||||||
following is an example of an input and an output:
|
|
||||||
|
|
||||||
Input: ~/polkadot-sdk/revive-differential-tests/resolc-compiler-tests/fixtures/solidity/test.json
|
|
||||||
Output: test.json
|
|
||||||
"""
|
|
||||||
|
|
||||||
return f"{path.split('resolc-compiler-tests/fixtures/solidity')[-1].strip('/')}"
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
with open("report.json", "r") as file:
|
|
||||||
report: Report = json.load(file)
|
|
||||||
|
|
||||||
# Starting the markdown document and adding information to it as we go.
|
|
||||||
markdown_document: io.TextIOWrapper = open("report.md", "w")
|
|
||||||
print("# Differential Tests Results", file=markdown_document)
|
|
||||||
|
|
||||||
# Getting all of the test specifiers from the report and making them relative to the tests dir.
|
|
||||||
test_specifiers: list[str] = list(
|
|
||||||
map(
|
|
||||||
path_relative_to_resolc_compiler_test_directory,
|
|
||||||
report["context"]["Test"]["corpus_configuration"]["test_specifiers"],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print("## Specified Tests", file=markdown_document)
|
|
||||||
for test_specifier in test_specifiers:
|
|
||||||
print(f"* `{test_specifier}`", file=markdown_document)
|
|
||||||
|
|
||||||
# Counting the total number of test cases, successes, failures, and ignored tests
|
|
||||||
total_number_of_cases: int = 0
|
|
||||||
total_number_of_successes: int = 0
|
|
||||||
total_number_of_failures: int = 0
|
|
||||||
total_number_of_ignores: int = 0
|
|
||||||
for _, mode_to_case_mapping in report["execution_information"].items():
|
|
||||||
for _, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
|
||||||
for _, case_report in case_idx_to_report_mapping.items():
|
|
||||||
status: CaseStatus = case_report["status"]
|
|
||||||
|
|
||||||
total_number_of_cases += 1
|
|
||||||
if status["status"] == "Succeeded":
|
|
||||||
total_number_of_successes += 1
|
|
||||||
elif status["status"] == "Failed":
|
|
||||||
total_number_of_failures += 1
|
|
||||||
elif status["status"] == "Ignored":
|
|
||||||
total_number_of_ignores += 1
|
|
||||||
else:
|
|
||||||
raise Exception(
|
|
||||||
f"Encountered a status that's unknown to the script: {status}"
|
|
||||||
)
|
|
||||||
|
|
||||||
print("## Counts", file=markdown_document)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Test Cases:** {total_number_of_cases}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Successes:** {total_number_of_successes}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Failures:** {total_number_of_failures}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Ignores:** {total_number_of_ignores}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Grouping the various test cases into dictionaries and groups depending on their status to make
|
|
||||||
# them easier to include in the markdown document later on.
|
|
||||||
successful_cases: dict[
|
|
||||||
MetadataFilePathString, dict[CaseIdxString, set[ModeString]]
|
|
||||||
] = {}
|
|
||||||
for metadata_file_path, mode_to_case_mapping in report[
|
|
||||||
"execution_information"
|
|
||||||
].items():
|
|
||||||
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
|
||||||
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
|
||||||
status: CaseStatus = case_report["status"]
|
|
||||||
metadata_file_path: str = (
|
|
||||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
|
||||||
)
|
|
||||||
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
|
||||||
|
|
||||||
if status["status"] == "Succeeded":
|
|
||||||
successful_cases.setdefault(
|
|
||||||
metadata_file_path,
|
|
||||||
{},
|
|
||||||
).setdefault(
|
|
||||||
case_idx_string, set()
|
|
||||||
).add(mode_string)
|
|
||||||
|
|
||||||
print("## Failures", file=markdown_document)
|
|
||||||
print(
|
|
||||||
"The test specifiers seen in this section have the format 'path::case_idx::compilation_mode'\
|
|
||||||
and they're compatible with the revive differential tests framework and can be specified\
|
|
||||||
to it directly in the same way that they're provided through the `--test` argument of the\
|
|
||||||
framework.\n",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"The failures are provided in an expandable section to ensure that the PR does not get \
|
|
||||||
polluted with information. Please click on the section below for more information",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"<details><summary>Detailed Differential Tests Failure Information</summary>\n\n",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print("| Test Specifier | Failure Reason | Note |", file=markdown_document)
|
|
||||||
print("| -- | -- | -- |", file=markdown_document)
|
|
||||||
|
|
||||||
for metadata_file_path, mode_to_case_mapping in report[
|
|
||||||
"execution_information"
|
|
||||||
].items():
|
|
||||||
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
|
||||||
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
|
||||||
status: CaseStatus = case_report["status"]
|
|
||||||
metadata_file_path: str = (
|
|
||||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
|
||||||
)
|
|
||||||
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
|
||||||
|
|
||||||
if status["status"] != "Failed":
|
|
||||||
continue
|
|
||||||
|
|
||||||
failure_reason: str = status["reason"].replace("\n", " ")
|
|
||||||
|
|
||||||
note: str = ""
|
|
||||||
modes_where_this_case_succeeded: set[ModeString] = (
|
|
||||||
successful_cases.setdefault(
|
|
||||||
metadata_file_path,
|
|
||||||
{},
|
|
||||||
).setdefault(case_idx_string, set())
|
|
||||||
)
|
|
||||||
if len(modes_where_this_case_succeeded) != 0:
|
|
||||||
note: str = (
|
|
||||||
f"This test case succeeded with other compilation modes: {modes_where_this_case_succeeded}"
|
|
||||||
)
|
|
||||||
|
|
||||||
test_specifier: str = (
|
|
||||||
f"{metadata_file_path}::{case_idx_string}::{mode_string}"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"| `{test_specifier}` | `{failure_reason}` | {note} |",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print("\n\n</details>", file=markdown_document)
|
|
||||||
|
|
||||||
# The primary downside of not using `with`, but I guess it's better since I don't want to over
|
|
||||||
# indent the code.
|
|
||||||
markdown_document.close()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
Reference in New Issue
Block a user