mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-22 21:57:58 +00:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 05b609dd1b | |||
| 1b43b0dd3f |
+168
-143
@@ -18,95 +18,134 @@ env:
|
|||||||
POLKADOT_VERSION: polkadot-stable2506-2
|
POLKADOT_VERSION: polkadot-stable2506-2
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
machete:
|
cache-polkadot:
|
||||||
name: Check for Unneeded Dependencies
|
name: Build and cache Polkadot binaries on ${{ matrix.os }}
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
steps:
|
|
||||||
- name: Checkout This Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Install the Cargo Make Binary
|
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Run Cargo Machete
|
|
||||||
run: cargo make machete
|
|
||||||
check-fmt:
|
|
||||||
name: Check Formatting
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
steps:
|
|
||||||
- name: Checkout This Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Install the Cargo Make Binary
|
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Run Cargo Formatter
|
|
||||||
run: cargo make fmt-check
|
|
||||||
check-clippy:
|
|
||||||
name: Check Clippy Lints
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
steps:
|
|
||||||
- name: Checkout This Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
- name: Install the Cargo Make Binary
|
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Run Cargo Clippy
|
|
||||||
run: cargo make clippy
|
|
||||||
test:
|
|
||||||
name: Unit Tests
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
needs: cache-polkadot
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-24.04, macos-14]
|
os: [ubuntu-24.04, macos-14]
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout This Repository
|
- name: Checkout repo and submodules
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
- name: Install dependencies (Linux)
|
||||||
- name: Install the Rust Toolchain
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
run: |
|
||||||
with:
|
sudo apt-get update
|
||||||
target: "wasm32-unknown-unknown"
|
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
||||||
components: "rust-src,rust-std"
|
rustup target add wasm32-unknown-unknown
|
||||||
- name: Install the Cargo Make Binary
|
rustup component add rust-src
|
||||||
uses: davidB/rust-cargo-make@v1
|
|
||||||
- name: Caching Step
|
- name: Install dependencies (macOS)
|
||||||
uses: actions/cache@v4
|
if: matrix.os == 'macos-14'
|
||||||
|
run: |
|
||||||
|
brew install protobuf
|
||||||
|
rustup target add wasm32-unknown-unknown
|
||||||
|
rustup component add rust-src
|
||||||
|
|
||||||
|
- name: Cache binaries
|
||||||
|
id: cache
|
||||||
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
|
~/.cargo/bin/substrate-node
|
||||||
~/.cargo/bin/eth-rpc
|
~/.cargo/bin/eth-rpc
|
||||||
~/.cargo/bin/revive-dev-node
|
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
||||||
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
|
||||||
|
- name: Build substrate-node
|
||||||
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
cd polkadot-sdk
|
||||||
|
cargo install --locked --force --profile=production --path substrate/bin/node/cli --bin substrate-node --features cli
|
||||||
|
|
||||||
|
- name: Build eth-rpc
|
||||||
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
cd polkadot-sdk
|
||||||
|
cargo install --path substrate/frame/revive/rpc --bin eth-rpc
|
||||||
|
|
||||||
|
- name: Cache downloaded Polkadot binaries
|
||||||
|
id: cache-polkadot
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/polkadot-cache/polkadot
|
||||||
|
~/polkadot-cache/polkadot-execute-worker
|
||||||
|
~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
~/polkadot-cache/polkadot-parachain
|
||||||
|
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||||
|
|
||||||
|
- name: Download Polkadot binaries on macOS
|
||||||
|
if: matrix.os == 'macos-14' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/polkadot-cache
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-aarch64-apple-darwin -o ~/polkadot-cache/polkadot
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-execute-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain-aarch64-apple-darwin -o ~/polkadot-cache/polkadot-parachain
|
||||||
|
chmod +x ~/polkadot-cache/*
|
||||||
|
|
||||||
|
- name: Download Polkadot binaries on Ubuntu
|
||||||
|
if: matrix.os == 'ubuntu-24.04' && steps.cache-polkadot.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/polkadot-cache
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot -o ~/polkadot-cache/polkadot
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-execute-worker -o ~/polkadot-cache/polkadot-execute-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-prepare-worker -o ~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
curl -sL https://github.com/paritytech/polkadot-sdk/releases/download/${{ env.POLKADOT_VERSION }}/polkadot-parachain -o ~/polkadot-cache/polkadot-parachain
|
||||||
|
chmod +x ~/polkadot-cache/*
|
||||||
|
|
||||||
|
ci:
|
||||||
|
name: CI on ${{ matrix.os }}
|
||||||
|
needs: cache-polkadot
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-24.04, macos-14]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Restore binaries from cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/bin/substrate-node
|
||||||
|
~/.cargo/bin/eth-rpc
|
||||||
|
key: polkadot-binaries-${{ matrix.os }}-${{ hashFiles('polkadot-sdk/.git') }}
|
||||||
|
|
||||||
|
- name: Restore downloaded Polkadot binaries from cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/polkadot-cache/polkadot
|
||||||
|
~/polkadot-cache/polkadot-execute-worker
|
||||||
|
~/polkadot-cache/polkadot-prepare-worker
|
||||||
|
~/polkadot-cache/polkadot-parachain
|
||||||
|
key: polkadot-downloaded-${{ matrix.os }}-${{ env.POLKADOT_VERSION }}
|
||||||
|
|
||||||
|
- name: Install Polkadot binaries
|
||||||
|
run: |
|
||||||
|
sudo cp ~/polkadot-cache/polkadot /usr/local/bin/
|
||||||
|
sudo cp ~/polkadot-cache/polkadot-execute-worker /usr/local/bin/
|
||||||
|
sudo cp ~/polkadot-cache/polkadot-prepare-worker /usr/local/bin/
|
||||||
|
sudo cp ~/polkadot-cache/polkadot-parachain /usr/local/bin/
|
||||||
|
sudo chmod +x /usr/local/bin/polkadot*
|
||||||
|
|
||||||
|
- name: Setup Rust toolchain
|
||||||
|
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||||
|
with:
|
||||||
|
rustflags: ""
|
||||||
|
|
||||||
|
- name: Add wasm32 target and formatting
|
||||||
|
run: |
|
||||||
|
rustup target add wasm32-unknown-unknown
|
||||||
|
rustup component add rust-src rustfmt clippy
|
||||||
|
|
||||||
- name: Install Geth on Ubuntu
|
- name: Install Geth on Ubuntu
|
||||||
if: matrix.os == 'ubuntu-24.04'
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
run: |
|
run: |
|
||||||
@@ -139,6 +178,7 @@ jobs:
|
|||||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-x86_64-unknown-linux-musl -o resolc
|
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-x86_64-unknown-linux-musl -o resolc
|
||||||
chmod +x resolc
|
chmod +x resolc
|
||||||
sudo mv resolc /usr/local/bin
|
sudo mv resolc /usr/local/bin
|
||||||
|
|
||||||
- name: Install Geth on macOS
|
- name: Install Geth on macOS
|
||||||
if: matrix.os == 'macos-14'
|
if: matrix.os == 'macos-14'
|
||||||
run: |
|
run: |
|
||||||
@@ -150,79 +190,64 @@ jobs:
|
|||||||
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-universal-apple-darwin -o resolc
|
curl -sL https://github.com/paritytech/revive/releases/download/v0.3.0/resolc-universal-apple-darwin -o resolc
|
||||||
chmod +x resolc
|
chmod +x resolc
|
||||||
sudo mv resolc /usr/local/bin
|
sudo mv resolc /usr/local/bin
|
||||||
|
|
||||||
- name: Install Kurtosis on macOS
|
- name: Install Kurtosis on macOS
|
||||||
if: matrix.os == 'macos-14'
|
if: matrix.os == 'macos-14'
|
||||||
run: brew install kurtosis-tech/tap/kurtosis-cli
|
run: brew install kurtosis-tech/tap/kurtosis-cli
|
||||||
|
|
||||||
- name: Install Kurtosis on Ubuntu
|
- name: Install Kurtosis on Ubuntu
|
||||||
if: matrix.os == 'ubuntu-24.04'
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
run: |
|
run: |
|
||||||
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt install kurtosis-cli
|
sudo apt install kurtosis-cli
|
||||||
- name: Run Tests
|
|
||||||
run: cargo make test
|
|
||||||
cache-polkadot:
|
|
||||||
name: Build and Cache Polkadot Binaries on ${{ matrix.os }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-24.04, macos-14]
|
|
||||||
env:
|
|
||||||
SCCACHE_GHA_ENABLED: "true"
|
|
||||||
RUSTC_WRAPPER: "sccache"
|
|
||||||
RUSTFLAGS: "-Awarnings"
|
|
||||||
POLKADOT_SDK_COMMIT_HASH: "30cda2aad8612a10ff729d494acd9d5353294d63"
|
|
||||||
steps:
|
|
||||||
- name: Caching Step
|
|
||||||
id: cache-step
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/eth-rpc
|
|
||||||
~/.cargo/bin/revive-dev-node
|
|
||||||
key: polkadot-binaries-${{ env.POLKADOT_SDK_COMMIT_HASH }}-${{ matrix.os }}
|
|
||||||
- name: Checkout the Polkadot SDK Repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
with:
|
|
||||||
repository: paritytech/polkadot-sdk
|
|
||||||
ref: ${{ env.POLKADOT_SDK_COMMIT_HASH }}
|
|
||||||
submodules: recursive
|
|
||||||
- name: Run Sccache
|
|
||||||
uses: mozilla-actions/sccache-action@v0.0.9
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
- name: Install the Rust Toolchain
|
|
||||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
with:
|
|
||||||
target: "wasm32-unknown-unknown"
|
|
||||||
components: "rust-src"
|
|
||||||
toolchain: "1.90.0"
|
|
||||||
|
|
||||||
- name: Install dependencies (Linux)
|
- name: Machete
|
||||||
if: matrix.os == 'ubuntu-24.04' && steps.cache-step.outputs.cache-hit != 'true'
|
uses: bnjbvr/cargo-machete@v0.7.1
|
||||||
|
|
||||||
|
- name: Format
|
||||||
|
run: make format
|
||||||
|
|
||||||
|
- name: Clippy
|
||||||
|
run: make clippy
|
||||||
|
|
||||||
|
- name: Check substrate-node version
|
||||||
|
run: substrate-node --version
|
||||||
|
|
||||||
|
- name: Check eth-rpc version
|
||||||
|
run: eth-rpc --version
|
||||||
|
|
||||||
|
- name: Check resolc version
|
||||||
|
run: resolc --version
|
||||||
|
|
||||||
|
- name: Check polkadot version
|
||||||
|
run: polkadot --version
|
||||||
|
|
||||||
|
- name: Check polkadot-parachain version
|
||||||
|
run: polkadot-parachain --version
|
||||||
|
|
||||||
|
- name: Check polkadot-execute-worker version
|
||||||
|
run: polkadot-execute-worker --version
|
||||||
|
|
||||||
|
- name: Check polkadot-prepare-worker version
|
||||||
|
run: polkadot-prepare-worker --version
|
||||||
|
|
||||||
|
- name: Test Formatting
|
||||||
|
run: make format
|
||||||
|
|
||||||
|
- name: Test Clippy
|
||||||
|
run: make clippy
|
||||||
|
|
||||||
|
- name: Test Machete
|
||||||
|
run: make machete
|
||||||
|
|
||||||
|
- name: Unit Tests
|
||||||
|
if: matrix.os == 'ubuntu-24.04'
|
||||||
|
run: cargo test --workspace -- --nocapture
|
||||||
|
|
||||||
|
# We can't install docker in the MacOS image used in CI and therefore we need to skip the
|
||||||
|
# Kurtosis and lighthouse related tests when running the CI on MacOS.
|
||||||
|
- name: Unit Tests
|
||||||
|
if: matrix.os == 'macos-14'
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
cargo test --workspace -- --nocapture --skip lighthouse_geth::tests::
|
||||||
sudo apt-get install -y protobuf-compiler clang libclang-dev
|
|
||||||
- name: Install dependencies (macOS)
|
|
||||||
if: matrix.os == 'macos-14' && steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
brew install protobuf llvm
|
|
||||||
LLVM_PREFIX="$(brew --prefix llvm)"
|
|
||||||
echo "LDFLAGS=-L${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
|
||||||
echo "CPPFLAGS=-I${LLVM_PREFIX}/include" >> "$GITHUB_ENV"
|
|
||||||
echo "CMAKE_PREFIX_PATH=${LLVM_PREFIX}" >> "$GITHUB_ENV"
|
|
||||||
echo "LIBCLANG_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
|
||||||
echo "DYLD_FALLBACK_LIBRARY_PATH=${LLVM_PREFIX}/lib" >> "$GITHUB_ENV"
|
|
||||||
echo "${LLVM_PREFIX}/bin" >> "$GITHUB_PATH"
|
|
||||||
- name: Build Polkadot Dependencies
|
|
||||||
if: steps.cache-step.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
cargo build \
|
|
||||||
--locked \
|
|
||||||
--profile production \
|
|
||||||
--package revive-dev-node \
|
|
||||||
--package pallet-revive-eth-rpc;
|
|
||||||
mv ./target/production/revive-dev-node ~/.cargo/bin
|
|
||||||
mv ./target/production/eth-rpc ~/.cargo/bin
|
|
||||||
chmod +x ~/.cargo/bin/*
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
node_modules
|
node_modules
|
||||||
/*.json
|
/*.json
|
||||||
*.sh
|
|
||||||
|
|
||||||
# We do not want to commit any log files that we produce from running the code locally so this is
|
# We do not want to commit any log files that we produce from running the code locally so this is
|
||||||
# added to the .gitignore file.
|
# added to the .gitignore file.
|
||||||
@@ -14,4 +13,3 @@ workdir
|
|||||||
|
|
||||||
!/schema.json
|
!/schema.json
|
||||||
!/dev-genesis.json
|
!/dev-genesis.json
|
||||||
!/scripts/*
|
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
[submodule "polkadot-sdk"]
|
||||||
|
path = polkadot-sdk
|
||||||
|
url = https://github.com/paritytech/polkadot-sdk.git
|
||||||
[submodule "resolc-compiler-tests"]
|
[submodule "resolc-compiler-tests"]
|
||||||
path = resolc-compiler-tests
|
path = resolc-compiler-tests
|
||||||
url = https://github.com/paritytech/resolc-compiler-tests
|
url = https://github.com/paritytech/resolc-compiler-tests
|
||||||
|
|||||||
Generated
-15
@@ -1920,7 +1920,6 @@ dependencies = [
|
|||||||
"anstyle",
|
"anstyle",
|
||||||
"clap_lex",
|
"clap_lex",
|
||||||
"strsim",
|
"strsim",
|
||||||
"terminal_size",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -5657,7 +5656,6 @@ dependencies = [
|
|||||||
"semver 1.0.26",
|
"semver 1.0.26",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"subxt 0.44.0",
|
|
||||||
"tokio",
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-appender",
|
"tracing-appender",
|
||||||
@@ -5694,7 +5692,6 @@ dependencies = [
|
|||||||
"revive-dt-config",
|
"revive-dt-config",
|
||||||
"revive-dt-format",
|
"revive-dt-format",
|
||||||
"revive-dt-node-interaction",
|
"revive-dt-node-interaction",
|
||||||
"revive-dt-report",
|
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_with",
|
"serde_with",
|
||||||
@@ -5718,7 +5715,6 @@ dependencies = [
|
|||||||
"futures",
|
"futures",
|
||||||
"revive-common",
|
"revive-common",
|
||||||
"revive-dt-format",
|
"revive-dt-format",
|
||||||
"revive-dt-report",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -5728,7 +5724,6 @@ dependencies = [
|
|||||||
"alloy",
|
"alloy",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"indexmap 2.10.0",
|
"indexmap 2.10.0",
|
||||||
"itertools 0.14.0",
|
|
||||||
"paste",
|
"paste",
|
||||||
"revive-dt-common",
|
"revive-dt-common",
|
||||||
"revive-dt-compiler",
|
"revive-dt-compiler",
|
||||||
@@ -7840,16 +7835,6 @@ dependencies = [
|
|||||||
"winapi-util",
|
"winapi-util",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "terminal_size"
|
|
||||||
version = "0.4.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed"
|
|
||||||
dependencies = [
|
|
||||||
"rustix",
|
|
||||||
"windows-sys 0.59.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror"
|
name = "thiserror"
|
||||||
version = "1.0.69"
|
version = "1.0.69"
|
||||||
|
|||||||
+1
-1
@@ -26,7 +26,7 @@ ansi_term = "0.12.1"
|
|||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bson = { version = "2.15.0" }
|
bson = { version = "2.15.0" }
|
||||||
cacache = { version = "13.1.0" }
|
cacache = { version = "13.1.0" }
|
||||||
clap = { version = "4", features = ["derive", "wrap_help"] }
|
clap = { version = "4", features = ["derive"] }
|
||||||
dashmap = { version = "6.1.0" }
|
dashmap = { version = "6.1.0" }
|
||||||
foundry-compilers-artifacts = { version = "0.18.0" }
|
foundry-compilers-artifacts = { version = "0.18.0" }
|
||||||
futures = { version = "0.3.31" }
|
futures = { version = "0.3.31" }
|
||||||
|
|||||||
@@ -0,0 +1,15 @@
|
|||||||
|
.PHONY: format clippy test machete
|
||||||
|
|
||||||
|
format:
|
||||||
|
cargo fmt --all -- --check
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
cargo clippy --all-features --workspace -- --deny warnings
|
||||||
|
|
||||||
|
machete:
|
||||||
|
cargo install cargo-machete
|
||||||
|
cargo machete crates
|
||||||
|
|
||||||
|
test: format clippy machete
|
||||||
|
cargo test --workspace -- --nocapture
|
||||||
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
[config]
|
|
||||||
default_to_workspace = false
|
|
||||||
|
|
||||||
[tasks.machete]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["machete", "crates"]
|
|
||||||
install_crate = "cargo-machete"
|
|
||||||
|
|
||||||
[tasks.fmt-check]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["fmt", "--all", "--", "--check"]
|
|
||||||
install_crate = "rustfmt"
|
|
||||||
|
|
||||||
[tasks.clippy]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["clippy", "--all-features", "--workspace", "--", "--deny", "warnings"]
|
|
||||||
install_crate = "clippy"
|
|
||||||
|
|
||||||
[tasks.test]
|
|
||||||
command = "cargo"
|
|
||||||
args = ["test", "--workspace", "--", "--nocapture"]
|
|
||||||
@@ -9,7 +9,7 @@
|
|||||||
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
This project compiles and executes declarative smart-contract tests against multiple platforms, then compares behavior (status, return data, events, and state diffs). Today it supports:
|
||||||
|
|
||||||
- Geth (EVM reference implementation)
|
- Geth (EVM reference implementation)
|
||||||
- Revive Dev Node (Substrate-based PolkaVM + `eth-rpc` proxy)
|
- Revive Kitchensink (Substrate-based PolkaVM + `eth-rpc` proxy)
|
||||||
|
|
||||||
Use it to:
|
Use it to:
|
||||||
|
|
||||||
@@ -39,9 +39,9 @@ This repository contains none of the tests and only contains the testing framewo
|
|||||||
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
This section describes the required dependencies that this framework requires to run. Compiling this framework is pretty straightforward and no additional dependencies beyond what's specified in the `Cargo.toml` file should be required.
|
||||||
|
|
||||||
- Stable Rust
|
- Stable Rust
|
||||||
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
- Geth - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||||
- Revive Dev Node - When doing differential testing against the PVM we submit transactions to a Geth node and to Revive Dev Node to compare them.
|
- Kitchensink - When doing differential testing against the PVM we submit transactions to a Geth node and to Kitchensink to compare them.
|
||||||
- ETH-RPC - All communication with Revive Dev Node is done through the ETH RPC.
|
- ETH-RPC - All communication with Kitchensink is done through the ETH RPC.
|
||||||
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
|
||||||
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
- Resolc - This is required to compile the contracts to PolkaVM bytecode.
|
||||||
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
|
||||||
|
|||||||
Binary file not shown.
@@ -31,6 +31,10 @@ pub enum PlatformIdentifier {
|
|||||||
GethEvmSolc,
|
GethEvmSolc,
|
||||||
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
|
||||||
LighthouseGethEvmSolc,
|
LighthouseGethEvmSolc,
|
||||||
|
/// The kitchensink node with the PolkaVM backend with the resolc compiler.
|
||||||
|
KitchensinkPolkavmResolc,
|
||||||
|
/// The kitchensink node with the REVM backend with the solc compiler.
|
||||||
|
KitchensinkRevmSolc,
|
||||||
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
/// The revive dev node with the PolkaVM backend with the resolc compiler.
|
||||||
ReviveDevNodePolkavmResolc,
|
ReviveDevNodePolkavmResolc,
|
||||||
/// The revive dev node with the REVM backend with the solc compiler.
|
/// The revive dev node with the REVM backend with the solc compiler.
|
||||||
@@ -91,6 +95,8 @@ pub enum NodeIdentifier {
|
|||||||
Geth,
|
Geth,
|
||||||
/// The go-ethereum node implementation.
|
/// The go-ethereum node implementation.
|
||||||
LighthouseGeth,
|
LighthouseGeth,
|
||||||
|
/// The Kitchensink node implementation.
|
||||||
|
Kitchensink,
|
||||||
/// The revive dev node implementation.
|
/// The revive dev node implementation.
|
||||||
ReviveDevNode,
|
ReviveDevNode,
|
||||||
/// A zombienet spawned nodes
|
/// A zombienet spawned nodes
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ use alloy::{
|
|||||||
primitives::{B256, FixedBytes, U256},
|
primitives::{B256, FixedBytes, U256},
|
||||||
signers::local::PrivateKeySigner,
|
signers::local::PrivateKeySigner,
|
||||||
};
|
};
|
||||||
use anyhow::Context as _;
|
|
||||||
use clap::{Parser, ValueEnum, ValueHint};
|
use clap::{Parser, ValueEnum, ValueHint};
|
||||||
use revive_dt_common::types::{ParsedTestSpecifier, PlatformIdentifier};
|
use revive_dt_common::types::{ParsedTestSpecifier, PlatformIdentifier};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
@@ -25,7 +24,7 @@ use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
|||||||
use temp_dir::TempDir;
|
use temp_dir::TempDir;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
#[command(name = "retester", term_width = 100)]
|
#[command(name = "retester")]
|
||||||
pub enum Context {
|
pub enum Context {
|
||||||
/// Executes tests in the MatterLabs format differentially on multiple targets concurrently.
|
/// Executes tests in the MatterLabs format differentially on multiple targets concurrently.
|
||||||
Test(Box<TestExecutionContext>),
|
Test(Box<TestExecutionContext>),
|
||||||
@@ -132,6 +131,17 @@ impl AsRef<PolkadotParachainConfiguration> for Context {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AsRef<KitchensinkConfiguration> for Context {
|
||||||
|
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||||
|
match self {
|
||||||
|
Self::Test(context) => context.as_ref().as_ref(),
|
||||||
|
Self::Benchmark(context) => context.as_ref().as_ref(),
|
||||||
|
Self::ExportGenesis(context) => context.as_ref().as_ref(),
|
||||||
|
Self::ExportJsonSchema => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl AsRef<ReviveDevNodeConfiguration> for Context {
|
impl AsRef<ReviveDevNodeConfiguration> for Context {
|
||||||
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||||
match self {
|
match self {
|
||||||
@@ -273,6 +283,10 @@ pub struct TestExecutionContext {
|
|||||||
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
||||||
pub lighthouse_configuration: KurtosisConfiguration,
|
pub lighthouse_configuration: KurtosisConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the Kitchensink.
|
||||||
|
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
||||||
|
pub kitchensink_configuration: KitchensinkConfiguration,
|
||||||
|
|
||||||
/// Configuration parameters for the Revive Dev Node.
|
/// Configuration parameters for the Revive Dev Node.
|
||||||
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
|
||||||
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
|
||||||
@@ -395,6 +409,10 @@ pub struct BenchmarkingContext {
|
|||||||
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
||||||
pub lighthouse_configuration: KurtosisConfiguration,
|
pub lighthouse_configuration: KurtosisConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the Kitchensink.
|
||||||
|
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
||||||
|
pub kitchensink_configuration: KitchensinkConfiguration,
|
||||||
|
|
||||||
/// Configuration parameters for the Polkadot Parachain.
|
/// Configuration parameters for the Polkadot Parachain.
|
||||||
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
|
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
|
||||||
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
|
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
|
||||||
@@ -473,6 +491,10 @@ pub struct ExportGenesisContext {
|
|||||||
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
|
||||||
pub lighthouse_configuration: KurtosisConfiguration,
|
pub lighthouse_configuration: KurtosisConfiguration,
|
||||||
|
|
||||||
|
/// Configuration parameters for the Kitchensink.
|
||||||
|
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
|
||||||
|
pub kitchensink_configuration: KitchensinkConfiguration,
|
||||||
|
|
||||||
/// Configuration parameters for the Polkadot Parachain.
|
/// Configuration parameters for the Polkadot Parachain.
|
||||||
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
|
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
|
||||||
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
|
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
|
||||||
@@ -488,7 +510,7 @@ pub struct ExportGenesisContext {
|
|||||||
|
|
||||||
impl Default for TestExecutionContext {
|
impl Default for TestExecutionContext {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self::parse_from(["execution-context", "--test", "."])
|
Self::parse_from(["execution-context"])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -534,6 +556,12 @@ impl AsRef<KurtosisConfiguration> for TestExecutionContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AsRef<KitchensinkConfiguration> for TestExecutionContext {
|
||||||
|
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||||
|
&self.kitchensink_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl AsRef<ReviveDevNodeConfiguration> for TestExecutionContext {
|
impl AsRef<ReviveDevNodeConfiguration> for TestExecutionContext {
|
||||||
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||||
&self.revive_dev_node_configuration
|
&self.revive_dev_node_configuration
|
||||||
@@ -584,7 +612,7 @@ impl AsRef<IgnoreSuccessConfiguration> for TestExecutionContext {
|
|||||||
|
|
||||||
impl Default for BenchmarkingContext {
|
impl Default for BenchmarkingContext {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self::parse_from(["benchmarking-context", "--test", "."])
|
Self::parse_from(["benchmarking-context"])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -630,6 +658,12 @@ impl AsRef<PolkadotParachainConfiguration> for BenchmarkingContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AsRef<KitchensinkConfiguration> for BenchmarkingContext {
|
||||||
|
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||||
|
&self.kitchensink_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl AsRef<ReviveDevNodeConfiguration> for BenchmarkingContext {
|
impl AsRef<ReviveDevNodeConfiguration> for BenchmarkingContext {
|
||||||
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
|
||||||
&self.revive_dev_node_configuration
|
&self.revive_dev_node_configuration
|
||||||
@@ -684,6 +718,12 @@ impl AsRef<KurtosisConfiguration> for ExportGenesisContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AsRef<KitchensinkConfiguration> for ExportGenesisContext {
|
||||||
|
fn as_ref(&self) -> &KitchensinkConfiguration {
|
||||||
|
&self.kitchensink_configuration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl AsRef<PolkadotParachainConfiguration> for ExportGenesisContext {
|
impl AsRef<PolkadotParachainConfiguration> for ExportGenesisContext {
|
||||||
fn as_ref(&self) -> &PolkadotParachainConfiguration {
|
fn as_ref(&self) -> &PolkadotParachainConfiguration {
|
||||||
&self.polkadot_parachain_configuration
|
&self.polkadot_parachain_configuration
|
||||||
@@ -719,7 +759,7 @@ pub struct CorpusConfiguration {
|
|||||||
/// - `{metadata_file_path}::{case_idx}::{mode}`: This is very similar to the above specifier
|
/// - `{metadata_file_path}::{case_idx}::{mode}`: This is very similar to the above specifier
|
||||||
/// with the exception that in this case the mode is specified and will be used in the test.
|
/// with the exception that in this case the mode is specified and will be used in the test.
|
||||||
#[serde_as(as = "Vec<serde_with::DisplayFromStr>")]
|
#[serde_as(as = "Vec<serde_with::DisplayFromStr>")]
|
||||||
#[arg(short = 't', long = "test", required = true)]
|
#[arg(short = 't', long = "test")]
|
||||||
pub test_specifiers: Vec<ParsedTestSpecifier>,
|
pub test_specifiers: Vec<ParsedTestSpecifier>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -802,6 +842,30 @@ pub struct KurtosisConfiguration {
|
|||||||
pub path: PathBuf,
|
pub path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A set of configuration parameters for Kitchensink.
|
||||||
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
|
pub struct KitchensinkConfiguration {
|
||||||
|
/// Specifies the path of the kitchensink node to be used by the tool.
|
||||||
|
///
|
||||||
|
/// If this is not specified, then the tool assumes that it should use the kitchensink binary
|
||||||
|
/// that's provided in the user's $PATH.
|
||||||
|
#[clap(
|
||||||
|
id = "kitchensink.path",
|
||||||
|
long = "kitchensink.path",
|
||||||
|
default_value = "substrate-node"
|
||||||
|
)]
|
||||||
|
pub path: PathBuf,
|
||||||
|
|
||||||
|
/// The amount of time to wait upon startup before considering that the node timed out.
|
||||||
|
#[clap(
|
||||||
|
id = "kitchensink.start-timeout-ms",
|
||||||
|
long = "kitchensink.start-timeout-ms",
|
||||||
|
default_value = "30000",
|
||||||
|
value_parser = parse_duration
|
||||||
|
)]
|
||||||
|
pub start_timeout_ms: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
/// A set of configuration parameters for the revive dev node.
|
/// A set of configuration parameters for the revive dev node.
|
||||||
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
|
||||||
pub struct ReviveDevNodeConfiguration {
|
pub struct ReviveDevNodeConfiguration {
|
||||||
@@ -1080,10 +1144,7 @@ impl FromStr for WorkingDirectoryConfiguration {
|
|||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
match s {
|
match s {
|
||||||
"" => Ok(Default::default()),
|
"" => Ok(Default::default()),
|
||||||
_ => PathBuf::from(s)
|
_ => Ok(Self::Path(PathBuf::from(s))),
|
||||||
.canonicalize()
|
|
||||||
.context("Failed to canonicalize the working directory path")
|
|
||||||
.map(Self::Path),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1100,6 +1161,35 @@ fn parse_duration(s: &str) -> anyhow::Result<Duration> {
|
|||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The Solidity compatible node implementation.
|
||||||
|
///
|
||||||
|
/// This describes the solutions to be tested against on a high level.
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
Hash,
|
||||||
|
Serialize,
|
||||||
|
ValueEnum,
|
||||||
|
EnumString,
|
||||||
|
Display,
|
||||||
|
AsRefStr,
|
||||||
|
IntoStaticStr,
|
||||||
|
)]
|
||||||
|
#[strum(serialize_all = "kebab-case")]
|
||||||
|
pub enum TestingPlatform {
|
||||||
|
/// The go-ethereum reference full node EVM implementation.
|
||||||
|
Geth,
|
||||||
|
/// The kitchensink runtime provides the PolkaVM (PVM) based node implementation.
|
||||||
|
Kitchensink,
|
||||||
|
/// A polkadot/Substrate based network
|
||||||
|
Zombienet,
|
||||||
|
}
|
||||||
|
|
||||||
/// The output format to use for the test execution output.
|
/// The output format to use for the test execution output.
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ schemars = { workspace = true }
|
|||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
subxt = { workspace = true }
|
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|||||||
@@ -127,8 +127,6 @@ where
|
|||||||
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
|
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
|
||||||
.context("Failed to produce the pre-linking compiled contracts")?;
|
.context("Failed to produce the pre-linking compiled contracts")?;
|
||||||
|
|
||||||
let deployer_address = self.test_definition.case.deployer_address();
|
|
||||||
|
|
||||||
let mut deployed_libraries = None::<HashMap<_, _>>;
|
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||||
let mut contract_sources = self
|
let mut contract_sources = self
|
||||||
.test_definition
|
.test_definition
|
||||||
@@ -161,12 +159,29 @@ where
|
|||||||
|
|
||||||
let code = alloy::hex::decode(code)?;
|
let code = alloy::hex::decode(code)?;
|
||||||
|
|
||||||
|
// Getting the deployer address from the cases themselves. This is to ensure
|
||||||
|
// that we're doing the deployments from different accounts and therefore we're
|
||||||
|
// not slowed down by the nonce.
|
||||||
|
let deployer_address = self
|
||||||
|
.test_definition
|
||||||
|
.case
|
||||||
|
.steps
|
||||||
|
.iter()
|
||||||
|
.filter_map(|step| match step {
|
||||||
|
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
||||||
|
Step::BalanceAssertion(..) => None,
|
||||||
|
Step::StorageEmptyAssertion(..) => None,
|
||||||
|
Step::Repeat(..) => None,
|
||||||
|
Step::AllocateAccount(..) => None,
|
||||||
|
})
|
||||||
|
.next()
|
||||||
|
.unwrap_or(FunctionCallStep::default_caller_address());
|
||||||
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||||
TransactionRequest::default().from(deployer_address),
|
TransactionRequest::default().from(deployer_address),
|
||||||
code,
|
code,
|
||||||
);
|
);
|
||||||
let receipt = self
|
let receipt = self
|
||||||
.execute_transaction(tx, None)
|
.execute_transaction(tx)
|
||||||
.and_then(|(_, receipt_fut)| receipt_fut)
|
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||||
.await
|
.await
|
||||||
.inspect_err(|err| {
|
.inspect_err(|err| {
|
||||||
@@ -203,22 +218,6 @@ where
|
|||||||
.inspect_err(|err| error!(?err, "Post-linking compilation failed"))
|
.inspect_err(|err| error!(?err, "Post-linking compilation failed"))
|
||||||
.context("Failed to compile the post-link contracts")?;
|
.context("Failed to compile the post-link contracts")?;
|
||||||
|
|
||||||
for (contract_path, contract_name_to_info_mapping) in compiler_output.contracts.iter() {
|
|
||||||
for (contract_name, (contract_bytecode, _)) in contract_name_to_info_mapping.iter() {
|
|
||||||
let contract_bytecode = hex::decode(contract_bytecode)
|
|
||||||
.expect("Impossible for us to get an undecodable bytecode after linking");
|
|
||||||
|
|
||||||
self.platform_information
|
|
||||||
.reporter
|
|
||||||
.report_contract_information_event(
|
|
||||||
contract_path.to_path_buf(),
|
|
||||||
contract_name.clone(),
|
|
||||||
contract_bytecode.len(),
|
|
||||||
)
|
|
||||||
.expect("Should not fail");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.execution_state = ExecutionState::new(
|
self.execution_state = ExecutionState::new(
|
||||||
compiler_output.contracts,
|
compiler_output.contracts,
|
||||||
deployed_libraries.unwrap_or_default(),
|
deployed_libraries.unwrap_or_default(),
|
||||||
@@ -280,15 +279,15 @@ where
|
|||||||
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
|
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
|
||||||
pub async fn execute_function_call(
|
pub async fn execute_function_call(
|
||||||
&mut self,
|
&mut self,
|
||||||
step_path: &StepPath,
|
_: &StepPath,
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
) -> Result<usize> {
|
) -> Result<usize> {
|
||||||
let deployment_receipts = self
|
let deployment_receipts = self
|
||||||
.handle_function_call_contract_deployment(step_path, step)
|
.handle_function_call_contract_deployment(step)
|
||||||
.await
|
.await
|
||||||
.context("Failed to deploy contracts for the function call step")?;
|
.context("Failed to deploy contracts for the function call step")?;
|
||||||
let transaction_hash = self
|
let transaction_hash = self
|
||||||
.handle_function_call_execution(step_path, step, deployment_receipts)
|
.handle_function_call_execution(step, deployment_receipts)
|
||||||
.await
|
.await
|
||||||
.context("Failed to handle the function call execution")?;
|
.context("Failed to handle the function call execution")?;
|
||||||
self.handle_function_call_variable_assignment(step, transaction_hash)
|
self.handle_function_call_variable_assignment(step, transaction_hash)
|
||||||
@@ -299,7 +298,6 @@ where
|
|||||||
|
|
||||||
async fn handle_function_call_contract_deployment(
|
async fn handle_function_call_contract_deployment(
|
||||||
&mut self,
|
&mut self,
|
||||||
step_path: &StepPath,
|
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
|
) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
|
||||||
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
||||||
@@ -331,13 +329,7 @@ where
|
|||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
if let (_, _, Some(receipt)) = self
|
if let (_, _, Some(receipt)) = self
|
||||||
.get_or_deploy_contract_instance(
|
.get_or_deploy_contract_instance(&instance, caller, calldata, value)
|
||||||
&instance,
|
|
||||||
caller,
|
|
||||||
calldata,
|
|
||||||
value,
|
|
||||||
Some(step_path),
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
.context("Failed to get or deploy contract instance during input execution")?
|
.context("Failed to get or deploy contract instance during input execution")?
|
||||||
{
|
{
|
||||||
@@ -350,7 +342,6 @@ where
|
|||||||
|
|
||||||
async fn handle_function_call_execution(
|
async fn handle_function_call_execution(
|
||||||
&mut self,
|
&mut self,
|
||||||
step_path: &StepPath,
|
|
||||||
step: &FunctionCallStep,
|
step: &FunctionCallStep,
|
||||||
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
||||||
) -> Result<TxHash> {
|
) -> Result<TxHash> {
|
||||||
@@ -365,7 +356,7 @@ where
|
|||||||
let tx = step
|
let tx = step
|
||||||
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
|
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
|
||||||
.await?;
|
.await?;
|
||||||
Ok(self.execute_transaction(tx, Some(step_path)).await?.0)
|
Ok(self.execute_transaction(tx).await?.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -529,7 +520,6 @@ where
|
|||||||
deployer: Address,
|
deployer: Address,
|
||||||
calldata: Option<&Calldata>,
|
calldata: Option<&Calldata>,
|
||||||
value: Option<EtherValue>,
|
value: Option<EtherValue>,
|
||||||
step_path: Option<&StepPath>,
|
|
||||||
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
||||||
if let Some((_, address, abi)) = self
|
if let Some((_, address, abi)) = self
|
||||||
.execution_state
|
.execution_state
|
||||||
@@ -545,7 +535,7 @@ where
|
|||||||
} else {
|
} else {
|
||||||
info!("Contract instance requires deployment.");
|
info!("Contract instance requires deployment.");
|
||||||
let (address, abi, receipt) = self
|
let (address, abi, receipt) = self
|
||||||
.deploy_contract(contract_instance, deployer, calldata, value, step_path)
|
.deploy_contract(contract_instance, deployer, calldata, value)
|
||||||
.await
|
.await
|
||||||
.context("Failed to deploy contract")?;
|
.context("Failed to deploy contract")?;
|
||||||
info!(
|
info!(
|
||||||
@@ -572,7 +562,6 @@ where
|
|||||||
deployer: Address,
|
deployer: Address,
|
||||||
calldata: Option<&Calldata>,
|
calldata: Option<&Calldata>,
|
||||||
value: Option<EtherValue>,
|
value: Option<EtherValue>,
|
||||||
step_path: Option<&StepPath>,
|
|
||||||
) -> Result<(Address, JsonAbi, TransactionReceipt)> {
|
) -> Result<(Address, JsonAbi, TransactionReceipt)> {
|
||||||
let Some(ContractPathAndIdent {
|
let Some(ContractPathAndIdent {
|
||||||
contract_source_path,
|
contract_source_path,
|
||||||
@@ -632,7 +621,7 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
let receipt = match self
|
let receipt = match self
|
||||||
.execute_transaction(tx, step_path)
|
.execute_transaction(tx)
|
||||||
.and_then(|(_, receipt_fut)| receipt_fut)
|
.and_then(|(_, receipt_fut)| receipt_fut)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@@ -682,7 +671,6 @@ where
|
|||||||
async fn execute_transaction(
|
async fn execute_transaction(
|
||||||
&self,
|
&self,
|
||||||
transaction: TransactionRequest,
|
transaction: TransactionRequest,
|
||||||
step_path: Option<&StepPath>,
|
|
||||||
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
|
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
|
||||||
let node = self.platform_information.node;
|
let node = self.platform_information.node;
|
||||||
let transaction_hash = node
|
let transaction_hash = node
|
||||||
@@ -692,14 +680,9 @@ where
|
|||||||
Span::current().record("transaction_hash", display(transaction_hash));
|
Span::current().record("transaction_hash", display(transaction_hash));
|
||||||
|
|
||||||
info!("Submitted transaction");
|
info!("Submitted transaction");
|
||||||
if let Some(step_path) = step_path {
|
|
||||||
self.watcher_tx
|
self.watcher_tx
|
||||||
.send(WatcherEvent::SubmittedTransaction {
|
.send(WatcherEvent::SubmittedTransaction { transaction_hash })
|
||||||
transaction_hash,
|
|
||||||
step_path: step_path.clone(),
|
|
||||||
})
|
|
||||||
.context("Failed to send the transaction hash to the watcher")?;
|
.context("Failed to send the transaction hash to the watcher")?;
|
||||||
};
|
|
||||||
|
|
||||||
Ok((transaction_hash, async move {
|
Ok((transaction_hash, async move {
|
||||||
info!("Starting to poll for transaction receipt");
|
info!("Starting to poll for transaction receipt");
|
||||||
|
|||||||
@@ -145,14 +145,12 @@ pub async fn handle_differential_benchmarks(
|
|||||||
context.wallet_configuration.highest_private_key_exclusive(),
|
context.wallet_configuration.highest_private_key_exclusive(),
|
||||||
)));
|
)));
|
||||||
let (watcher, watcher_tx) = Watcher::new(
|
let (watcher, watcher_tx) = Watcher::new(
|
||||||
|
platform_identifier,
|
||||||
platform_information
|
platform_information
|
||||||
.node
|
.node
|
||||||
.subscribe_to_full_blocks_information()
|
.subscribe_to_full_blocks_information()
|
||||||
.await
|
.await
|
||||||
.context("Failed to subscribe to full blocks information from the node")?,
|
.context("Failed to subscribe to full blocks information from the node")?,
|
||||||
test_definition
|
|
||||||
.reporter
|
|
||||||
.execution_specific_reporter(0usize, platform_identifier),
|
|
||||||
);
|
);
|
||||||
let driver = Driver::new(
|
let driver = Driver::new(
|
||||||
platform_information,
|
platform_information,
|
||||||
|
|||||||
@@ -1,15 +1,10 @@
|
|||||||
use std::{
|
use std::{collections::HashSet, pin::Pin, sync::Arc};
|
||||||
collections::HashMap,
|
|
||||||
pin::Pin,
|
|
||||||
sync::Arc,
|
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
|
||||||
};
|
|
||||||
|
|
||||||
use alloy::primitives::{BlockNumber, TxHash};
|
use alloy::primitives::{BlockNumber, TxHash};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{Stream, StreamExt};
|
||||||
use revive_dt_format::steps::StepPath;
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
use revive_dt_report::{ExecutionSpecificReporter, MinedBlockInformation, TransactionInformation};
|
use revive_dt_node_interaction::MinedBlockInformation;
|
||||||
use tokio::sync::{
|
use tokio::sync::{
|
||||||
RwLock,
|
RwLock,
|
||||||
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel},
|
||||||
@@ -20,6 +15,9 @@ use tracing::{info, instrument};
|
|||||||
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
/// and MUST NOT be re-used between workloads since it holds important internal state for a given
|
||||||
/// workload and is not designed for reuse.
|
/// workload and is not designed for reuse.
|
||||||
pub struct Watcher {
|
pub struct Watcher {
|
||||||
|
/// The identifier of the platform that this watcher is for.
|
||||||
|
platform_identifier: PlatformIdentifier,
|
||||||
|
|
||||||
/// The receive side of the channel that all of the drivers and various other parts of the code
|
/// The receive side of the channel that all of the drivers and various other parts of the code
|
||||||
/// send events to the watcher on.
|
/// send events to the watcher on.
|
||||||
rx: UnboundedReceiver<WatcherEvent>,
|
rx: UnboundedReceiver<WatcherEvent>,
|
||||||
@@ -27,22 +25,19 @@ pub struct Watcher {
|
|||||||
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
/// This is a stream of the blocks that were mined by the node. This is for a single platform
|
||||||
/// and a single node from that platform.
|
/// and a single node from that platform.
|
||||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||||
|
|
||||||
/// The reporter used to send events to the report aggregator.
|
|
||||||
reporter: ExecutionSpecificReporter,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Watcher {
|
impl Watcher {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
platform_identifier: PlatformIdentifier,
|
||||||
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
|
||||||
reporter: ExecutionSpecificReporter,
|
|
||||||
) -> (Self, UnboundedSender<WatcherEvent>) {
|
) -> (Self, UnboundedSender<WatcherEvent>) {
|
||||||
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
let (tx, rx) = unbounded_channel::<WatcherEvent>();
|
||||||
(
|
(
|
||||||
Self {
|
Self {
|
||||||
|
platform_identifier,
|
||||||
rx,
|
rx,
|
||||||
blocks_stream,
|
blocks_stream,
|
||||||
reporter,
|
|
||||||
},
|
},
|
||||||
tx,
|
tx,
|
||||||
)
|
)
|
||||||
@@ -66,8 +61,7 @@ impl Watcher {
|
|||||||
// This is the set of the transaction hashes that the watcher should be looking for and
|
// This is the set of the transaction hashes that the watcher should be looking for and
|
||||||
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
// watch for them in the blocks. The watcher will keep watching for blocks until it sees
|
||||||
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
// that all of the transactions that it was watching for has been seen in the mined blocks.
|
||||||
let watch_for_transaction_hashes =
|
let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::<TxHash>::new()));
|
||||||
Arc::new(RwLock::new(HashMap::<TxHash, (StepPath, SystemTime)>::new()));
|
|
||||||
|
|
||||||
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
// A boolean that keeps track of whether all of the transactions were submitted or if more
|
||||||
// txs are expected to come through the receive side of the channel. We do not want to rely
|
// txs are expected to come through the receive side of the channel. We do not want to rely
|
||||||
@@ -87,14 +81,11 @@ impl Watcher {
|
|||||||
// contain nested repetitions and therefore there's no use in doing any
|
// contain nested repetitions and therefore there's no use in doing any
|
||||||
// action if the repetitions are nested.
|
// action if the repetitions are nested.
|
||||||
WatcherEvent::RepetitionStartEvent { .. } => {}
|
WatcherEvent::RepetitionStartEvent { .. } => {}
|
||||||
WatcherEvent::SubmittedTransaction {
|
WatcherEvent::SubmittedTransaction { transaction_hash } => {
|
||||||
transaction_hash,
|
|
||||||
step_path,
|
|
||||||
} => {
|
|
||||||
watch_for_transaction_hashes
|
watch_for_transaction_hashes
|
||||||
.write()
|
.write()
|
||||||
.await
|
.await
|
||||||
.insert(transaction_hash, (step_path, SystemTime::now()));
|
.insert(transaction_hash);
|
||||||
}
|
}
|
||||||
WatcherEvent::AllTransactionsSubmitted => {
|
WatcherEvent::AllTransactionsSubmitted => {
|
||||||
*all_transactions_submitted.write().await = true;
|
*all_transactions_submitted.write().await = true;
|
||||||
@@ -106,32 +97,25 @@ impl Watcher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let reporter = self.reporter.clone();
|
|
||||||
let block_information_watching_task = {
|
let block_information_watching_task = {
|
||||||
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
let watch_for_transaction_hashes = watch_for_transaction_hashes.clone();
|
||||||
let all_transactions_submitted = all_transactions_submitted.clone();
|
let all_transactions_submitted = all_transactions_submitted.clone();
|
||||||
let mut blocks_information_stream = self.blocks_stream;
|
let mut blocks_information_stream = self.blocks_stream;
|
||||||
async move {
|
async move {
|
||||||
while let Some(mut block) = blocks_information_stream.next().await {
|
let mut mined_blocks_information = Vec::new();
|
||||||
|
|
||||||
|
// region:TEMPORARY
|
||||||
|
eprintln!("Watcher information for {}", self.platform_identifier);
|
||||||
|
eprintln!(
|
||||||
|
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count,ref_time,max_ref_time,proof_size,max_proof_size"
|
||||||
|
);
|
||||||
|
// endregion:TEMPORARY
|
||||||
|
while let Some(block) = blocks_information_stream.next().await {
|
||||||
// If the block number is equal to or less than the last block before the
|
// If the block number is equal to or less than the last block before the
|
||||||
// repetition then we ignore it and continue on to the next block.
|
// repetition then we ignore it and continue on to the next block.
|
||||||
if block.ethereum_block_information.block_number <= ignore_block_before {
|
if block.block_number <= ignore_block_before {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
{
|
|
||||||
let watch_for_transaction_hashes =
|
|
||||||
watch_for_transaction_hashes.read().await;
|
|
||||||
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
|
||||||
let Some((step_path, _)) = watch_for_transaction_hashes.get(tx_hash)
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
*block.tx_counts.entry(step_path.clone()).or_default() += 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reporter
|
|
||||||
.report_block_mined_event(block.clone())
|
|
||||||
.expect("Can't fail");
|
|
||||||
|
|
||||||
if *all_transactions_submitted.read().await
|
if *all_transactions_submitted.read().await
|
||||||
&& watch_for_transaction_hashes.read().await.is_empty()
|
&& watch_for_transaction_hashes.read().await.is_empty()
|
||||||
@@ -140,8 +124,8 @@ impl Watcher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
block_number = block.ethereum_block_information.block_number,
|
block_number = block.block_number,
|
||||||
block_tx_count = block.ethereum_block_information.transaction_hashes.len(),
|
block_tx_count = block.transaction_hashes.len(),
|
||||||
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
|
||||||
"Observed a block"
|
"Observed a block"
|
||||||
);
|
);
|
||||||
@@ -150,31 +134,33 @@ impl Watcher {
|
|||||||
// are currently watching for.
|
// are currently watching for.
|
||||||
let mut watch_for_transaction_hashes =
|
let mut watch_for_transaction_hashes =
|
||||||
watch_for_transaction_hashes.write().await;
|
watch_for_transaction_hashes.write().await;
|
||||||
for tx_hash in block.ethereum_block_information.transaction_hashes.iter() {
|
for tx_hash in block.transaction_hashes.iter() {
|
||||||
let Some((step_path, submission_time)) =
|
watch_for_transaction_hashes.remove(tx_hash);
|
||||||
watch_for_transaction_hashes.remove(tx_hash)
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let transaction_information = TransactionInformation {
|
|
||||||
transaction_hash: *tx_hash,
|
|
||||||
submission_timestamp: submission_time
|
|
||||||
.duration_since(UNIX_EPOCH)
|
|
||||||
.expect("Can't fail")
|
|
||||||
.as_secs() as _,
|
|
||||||
block_timestamp: block.ethereum_block_information.block_timestamp,
|
|
||||||
block_number: block.ethereum_block_information.block_number,
|
|
||||||
};
|
|
||||||
reporter
|
|
||||||
.report_step_transaction_information_event(
|
|
||||||
step_path,
|
|
||||||
transaction_information,
|
|
||||||
)
|
|
||||||
.expect("Can't fail")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// region:TEMPORARY
|
||||||
|
// TODO: The following core is TEMPORARY and will be removed once we have proper
|
||||||
|
// reporting in place and then it can be removed. This serves as as way of doing
|
||||||
|
// some very simple reporting for the time being.
|
||||||
|
eprintln!(
|
||||||
|
"\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\"",
|
||||||
|
block.block_number,
|
||||||
|
block.block_timestamp,
|
||||||
|
block.mined_gas,
|
||||||
|
block.block_gas_limit,
|
||||||
|
block.transaction_hashes.len(),
|
||||||
|
block.ref_time,
|
||||||
|
block.max_ref_time,
|
||||||
|
block.proof_size,
|
||||||
|
block.max_proof_size,
|
||||||
|
);
|
||||||
|
// endregion:TEMPORARY
|
||||||
|
|
||||||
|
mined_blocks_information.push(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Watcher's Block Watching Task Finished");
|
info!("Watcher's Block Watching Task Finished");
|
||||||
|
mined_blocks_information
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -186,7 +172,7 @@ impl Watcher {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
pub enum WatcherEvent {
|
pub enum WatcherEvent {
|
||||||
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
/// Informs the watcher that it should begin watching for the blocks mined by the platforms.
|
||||||
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
/// Before the watcher receives this event it will not be watching for the mined blocks. The
|
||||||
@@ -200,14 +186,14 @@ pub enum WatcherEvent {
|
|||||||
/// streaming the blocks.
|
/// streaming the blocks.
|
||||||
ignore_block_before: BlockNumber,
|
ignore_block_before: BlockNumber,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
/// Informs the watcher that a transaction was submitted and that the watcher should watch for a
|
||||||
/// transaction with this hash in the blocks that it watches.
|
/// transaction with this hash in the blocks that it watches.
|
||||||
SubmittedTransaction {
|
SubmittedTransaction {
|
||||||
/// The hash of the submitted transaction.
|
/// The hash of the submitted transaction.
|
||||||
transaction_hash: TxHash,
|
transaction_hash: TxHash,
|
||||||
/// The step path of the step that the transaction belongs to.
|
|
||||||
step_path: StepPath,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
/// Informs the watcher that all of the transactions of this benchmark have been submitted and
|
||||||
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
/// that it can expect to receive no further transaction hashes and not even watch the channel
|
||||||
/// any longer.
|
/// any longer.
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use alloy::{
|
|||||||
hex,
|
hex,
|
||||||
json_abi::JsonAbi,
|
json_abi::JsonAbi,
|
||||||
network::{Ethereum, TransactionBuilder},
|
network::{Ethereum, TransactionBuilder},
|
||||||
primitives::{Address, TxHash, U256, address},
|
primitives::{Address, TxHash, U256},
|
||||||
rpc::types::{
|
rpc::types::{
|
||||||
TransactionReceipt, TransactionRequest,
|
TransactionReceipt, TransactionRequest,
|
||||||
trace::geth::{
|
trace::geth::{
|
||||||
@@ -18,9 +18,9 @@ use alloy::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
use anyhow::{Context as _, Result, bail};
|
use anyhow::{Context as _, Result, bail};
|
||||||
use futures::{TryStreamExt, future::try_join_all};
|
use futures::TryStreamExt;
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator, VmIdentifier};
|
use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator};
|
||||||
use revive_dt_format::{
|
use revive_dt_format::{
|
||||||
metadata::{ContractInstance, ContractPathAndIdent},
|
metadata::{ContractInstance, ContractPathAndIdent},
|
||||||
steps::{
|
steps::{
|
||||||
@@ -30,7 +30,6 @@ use revive_dt_format::{
|
|||||||
},
|
},
|
||||||
traits::ResolutionContext,
|
traits::ResolutionContext,
|
||||||
};
|
};
|
||||||
use subxt::{ext::codec::Decode, metadata::Metadata, tx::Payload};
|
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
use tracing::{error, info, instrument};
|
use tracing::{error, info, instrument};
|
||||||
|
|
||||||
@@ -199,8 +198,6 @@ where
|
|||||||
})
|
})
|
||||||
.context("Failed to produce the pre-linking compiled contracts")?;
|
.context("Failed to produce the pre-linking compiled contracts")?;
|
||||||
|
|
||||||
let deployer_address = test_definition.case.deployer_address();
|
|
||||||
|
|
||||||
let mut deployed_libraries = None::<HashMap<_, _>>;
|
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||||
let mut contract_sources = test_definition
|
let mut contract_sources = test_definition
|
||||||
.metadata
|
.metadata
|
||||||
@@ -235,6 +232,22 @@ where
|
|||||||
|
|
||||||
let code = alloy::hex::decode(code)?;
|
let code = alloy::hex::decode(code)?;
|
||||||
|
|
||||||
|
// Getting the deployer address from the cases themselves. This is to ensure
|
||||||
|
// that we're doing the deployments from different accounts and therefore we're
|
||||||
|
// not slowed down by the nonce.
|
||||||
|
let deployer_address = test_definition
|
||||||
|
.case
|
||||||
|
.steps
|
||||||
|
.iter()
|
||||||
|
.filter_map(|step| match step {
|
||||||
|
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
||||||
|
Step::BalanceAssertion(..) => None,
|
||||||
|
Step::StorageEmptyAssertion(..) => None,
|
||||||
|
Step::Repeat(..) => None,
|
||||||
|
Step::AllocateAccount(..) => None,
|
||||||
|
})
|
||||||
|
.next()
|
||||||
|
.unwrap_or(FunctionCallStep::default_caller_address());
|
||||||
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||||
TransactionRequest::default().from(deployer_address),
|
TransactionRequest::default().from(deployer_address),
|
||||||
code,
|
code,
|
||||||
@@ -282,51 +295,6 @@ where
|
|||||||
})
|
})
|
||||||
.context("Failed to compile the post-link contracts")?;
|
.context("Failed to compile the post-link contracts")?;
|
||||||
|
|
||||||
// Factory contracts on the PVM refer to the code that they're instantiating by hash rather
|
|
||||||
// than including the actual bytecode. This creates a problem where a factory contract could
|
|
||||||
// be deployed but the code it's supposed to create is not on chain. Therefore, we upload
|
|
||||||
// all the code to the chain prior to running any transactions on the driver.
|
|
||||||
if platform_information.platform.vm_identifier() == VmIdentifier::PolkaVM {
|
|
||||||
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
|
|
||||||
pub mod revive {}
|
|
||||||
|
|
||||||
let metadata_bytes = include_bytes!("../../../../assets/revive_metadata.scale");
|
|
||||||
let metadata = Metadata::decode(&mut &metadata_bytes[..])
|
|
||||||
.context("Failed to decode the revive metadata")?;
|
|
||||||
|
|
||||||
const RUNTIME_PALLET_ADDRESS: Address =
|
|
||||||
address!("0x6d6f646c70792f70616464720000000000000000");
|
|
||||||
|
|
||||||
let code_upload_tasks = compiler_output
|
|
||||||
.contracts
|
|
||||||
.values()
|
|
||||||
.flat_map(|item| item.values())
|
|
||||||
.map(|(code_string, _)| {
|
|
||||||
let metadata = metadata.clone();
|
|
||||||
async move {
|
|
||||||
let code = alloy::hex::decode(code_string)
|
|
||||||
.context("Failed to hex-decode the post-link code. This is a bug")?;
|
|
||||||
let payload = revive::tx().revive().upload_code(code, u128::MAX);
|
|
||||||
let encoded_payload = payload
|
|
||||||
.encode_call_data(&metadata)
|
|
||||||
.context("Failed to encode the upload code payload")?;
|
|
||||||
|
|
||||||
let tx_request = TransactionRequest::default()
|
|
||||||
.from(deployer_address)
|
|
||||||
.to(RUNTIME_PALLET_ADDRESS)
|
|
||||||
.input(encoded_payload.into());
|
|
||||||
platform_information
|
|
||||||
.node
|
|
||||||
.execute_transaction(tx_request)
|
|
||||||
.await
|
|
||||||
.context("Failed to execute transaction")
|
|
||||||
}
|
|
||||||
});
|
|
||||||
try_join_all(code_upload_tasks)
|
|
||||||
.await
|
|
||||||
.context("Code upload failed")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ExecutionState::new(
|
Ok(ExecutionState::new(
|
||||||
compiler_output.contracts,
|
compiler_output.contracts,
|
||||||
deployed_libraries.unwrap_or_default(),
|
deployed_libraries.unwrap_or_default(),
|
||||||
@@ -385,17 +353,12 @@ where
|
|||||||
.execute_account_allocation(step_path, step.as_ref())
|
.execute_account_allocation(step_path, step.as_ref())
|
||||||
.await
|
.await
|
||||||
.context("Account Allocation Step Failed"),
|
.context("Account Allocation Step Failed"),
|
||||||
}
|
}?;
|
||||||
.context(format!("Failure on step {step_path}"))?;
|
|
||||||
self.steps_executed += steps_executed;
|
self.steps_executed += steps_executed;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(
|
#[instrument(level = "info", skip_all)]
|
||||||
level = "info",
|
|
||||||
skip_all,
|
|
||||||
fields(block_number = tracing::field::Empty)
|
|
||||||
)]
|
|
||||||
pub async fn execute_function_call(
|
pub async fn execute_function_call(
|
||||||
&mut self,
|
&mut self,
|
||||||
_: &StepPath,
|
_: &StepPath,
|
||||||
@@ -409,7 +372,6 @@ where
|
|||||||
.handle_function_call_execution(step, deployment_receipts)
|
.handle_function_call_execution(step, deployment_receipts)
|
||||||
.await
|
.await
|
||||||
.context("Failed to handle the function call execution")?;
|
.context("Failed to handle the function call execution")?;
|
||||||
tracing::Span::current().record("block_number", execution_receipt.block_number);
|
|
||||||
let tracing_result = self
|
let tracing_result = self
|
||||||
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
|
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
|
||||||
.await
|
.await
|
||||||
@@ -635,26 +597,21 @@ where
|
|||||||
let expected = !assertion.exception;
|
let expected = !assertion.exception;
|
||||||
let actual = receipt.status();
|
let actual = receipt.status();
|
||||||
if actual != expected {
|
if actual != expected {
|
||||||
let revert_reason = tracing_result
|
|
||||||
.revert_reason
|
|
||||||
.as_ref()
|
|
||||||
.or(tracing_result.error.as_ref());
|
|
||||||
tracing::error!(
|
tracing::error!(
|
||||||
expected,
|
expected,
|
||||||
actual,
|
actual,
|
||||||
?receipt,
|
?receipt,
|
||||||
?tracing_result,
|
?tracing_result,
|
||||||
?revert_reason,
|
|
||||||
"Transaction status assertion failed"
|
"Transaction status assertion failed"
|
||||||
);
|
);
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"Transaction status assertion failed - Expected {expected} but got {actual}. Revert reason: {revert_reason:?}",
|
"Transaction status assertion failed - Expected {expected} but got {actual}",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handling the calldata assertion
|
// Handling the calldata assertion
|
||||||
if let Some(ref expected_output) = assertion.return_data {
|
if let Some(ref expected_calldata) = assertion.return_data {
|
||||||
let expected = expected_output;
|
let expected = expected_calldata;
|
||||||
let actual = &tracing_result.output.as_ref().unwrap_or_default();
|
let actual = &tracing_result.output.as_ref().unwrap_or_default();
|
||||||
if !expected
|
if !expected
|
||||||
.is_equivalent(actual, resolver.as_ref(), resolution_context)
|
.is_equivalent(actual, resolver.as_ref(), resolution_context)
|
||||||
@@ -665,9 +622,9 @@ where
|
|||||||
?receipt,
|
?receipt,
|
||||||
?expected,
|
?expected,
|
||||||
%actual,
|
%actual,
|
||||||
"Output assertion failed"
|
"Calldata assertion failed"
|
||||||
);
|
);
|
||||||
anyhow::bail!("Output assertion failed - Expected {expected:?} but got {actual}",);
|
anyhow::bail!("Calldata assertion failed - Expected {expected:?} but got {actual}",);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -330,7 +330,6 @@ async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporte
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
writeln!(buf).unwrap();
|
writeln!(buf).unwrap();
|
||||||
|
|
||||||
if aggregator_events_rx.is_empty() {
|
|
||||||
buf = tokio::task::spawn_blocking(move || {
|
buf = tokio::task::spawn_blocking(move || {
|
||||||
buf.flush().unwrap();
|
buf.flush().unwrap();
|
||||||
buf
|
buf
|
||||||
@@ -340,8 +339,6 @@ async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporte
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
info!("Aggregator Broadcast Channel Closed");
|
|
||||||
|
|
||||||
// Summary at the end.
|
// Summary at the end.
|
||||||
match output_format {
|
match output_format {
|
||||||
|
|||||||
@@ -32,18 +32,9 @@ pub async fn create_test_definitions_stream<'a>(
|
|||||||
only_execute_failed_tests: Option<&Report>,
|
only_execute_failed_tests: Option<&Report>,
|
||||||
reporter: Reporter,
|
reporter: Reporter,
|
||||||
) -> impl Stream<Item = TestDefinition<'a>> {
|
) -> impl Stream<Item = TestDefinition<'a>> {
|
||||||
let cloned_reporter = reporter.clone();
|
|
||||||
stream::iter(
|
stream::iter(
|
||||||
corpus
|
corpus
|
||||||
.cases_iterator()
|
.cases_iterator()
|
||||||
.inspect(move |(metadata_file, ..)| {
|
|
||||||
cloned_reporter
|
|
||||||
.report_metadata_file_discovery_event(
|
|
||||||
metadata_file.metadata_file_path.clone(),
|
|
||||||
metadata_file.content.clone(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
})
|
|
||||||
.map(move |(metadata_file, case_idx, case, mode)| {
|
.map(move |(metadata_file, case_idx, case, mode)| {
|
||||||
let reporter = reporter.clone();
|
let reporter = reporter.clone();
|
||||||
|
|
||||||
@@ -319,10 +310,10 @@ impl<'a> TestDefinition<'a> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let test_case_status = report
|
let test_case_status = report
|
||||||
.execution_information
|
.test_case_information
|
||||||
.get(&(self.metadata_file_path.to_path_buf().into()))
|
.get(&(self.metadata_file_path.to_path_buf().into()))
|
||||||
.and_then(|obj| obj.case_reports.get(&self.case_idx))
|
.and_then(|obj| obj.get(&self.mode))
|
||||||
.and_then(|obj| obj.mode_execution_reports.get(&self.mode))
|
.and_then(|obj| obj.get(&self.case_idx))
|
||||||
.and_then(|obj| obj.status.as_ref());
|
.and_then(|obj| obj.status.as_ref());
|
||||||
|
|
||||||
match test_case_status {
|
match test_case_status {
|
||||||
|
|||||||
@@ -172,6 +172,134 @@ impl Platform for LighthouseGethEvmSolcPlatform {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct KitchensinkPolkavmResolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for KitchensinkPolkavmResolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::KitchensinkPolkavmResolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Kitchensink
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::PolkaVM
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Resolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
kitchensink_path,
|
||||||
|
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
None,
|
||||||
|
context,
|
||||||
|
&[],
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Resolc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
let export_chainspec_command = SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND;
|
||||||
|
|
||||||
|
SubstrateNode::node_genesis(kitchensink_path, export_chainspec_command, &wallet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
|
pub struct KitchensinkRevmSolcPlatform;
|
||||||
|
|
||||||
|
impl Platform for KitchensinkRevmSolcPlatform {
|
||||||
|
fn platform_identifier(&self) -> PlatformIdentifier {
|
||||||
|
PlatformIdentifier::KitchensinkRevmSolc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn node_identifier(&self) -> NodeIdentifier {
|
||||||
|
NodeIdentifier::Kitchensink
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_identifier(&self) -> VmIdentifier {
|
||||||
|
VmIdentifier::Evm
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||||
|
CompilerIdentifier::Solc
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_node(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
|
||||||
|
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||||
|
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.clone();
|
||||||
|
let genesis = genesis_configuration.genesis()?.clone();
|
||||||
|
Ok(thread::spawn(move || {
|
||||||
|
let node = SubstrateNode::new(
|
||||||
|
kitchensink_path,
|
||||||
|
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||||
|
None,
|
||||||
|
context,
|
||||||
|
&[],
|
||||||
|
);
|
||||||
|
let node = spawn_node(node, genesis)?;
|
||||||
|
Ok(Box::new(node) as Box<_>)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_compiler(
|
||||||
|
&self,
|
||||||
|
context: Context,
|
||||||
|
version: Option<VersionOrRequirement>,
|
||||||
|
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
|
||||||
|
Box::pin(async move {
|
||||||
|
let compiler = Solc::new(context, version).await;
|
||||||
|
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
|
||||||
|
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
|
||||||
|
.path
|
||||||
|
.as_path();
|
||||||
|
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
|
||||||
|
let export_chainspec_command = SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND;
|
||||||
|
|
||||||
|
SubstrateNode::node_genesis(kitchensink_path, export_chainspec_command, &wallet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||||
pub struct ReviveDevNodePolkavmResolcPlatform;
|
pub struct ReviveDevNodePolkavmResolcPlatform;
|
||||||
|
|
||||||
@@ -429,6 +557,12 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
|
|||||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||||
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
|
||||||
}
|
}
|
||||||
|
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||||
|
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
|
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||||
|
Box::new(KitchensinkRevmSolcPlatform) as Box<_>
|
||||||
|
}
|
||||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||||
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>
|
||||||
}
|
}
|
||||||
@@ -450,6 +584,12 @@ impl From<PlatformIdentifier> for &dyn Platform {
|
|||||||
PlatformIdentifier::LighthouseGethEvmSolc => {
|
PlatformIdentifier::LighthouseGethEvmSolc => {
|
||||||
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
&LighthouseGethEvmSolcPlatform as &dyn Platform
|
||||||
}
|
}
|
||||||
|
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||||
|
&KitchensinkPolkavmResolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
|
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||||
|
&KitchensinkRevmSolcPlatform as &dyn Platform
|
||||||
|
}
|
||||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||||
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use alloy::primitives::Address;
|
|
||||||
use schemars::JsonSchema;
|
use schemars::JsonSchema;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
@@ -108,20 +107,6 @@ impl Case {
|
|||||||
None => Mode::all().cloned().collect(),
|
None => Mode::all().cloned().collect(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deployer_address(&self) -> Address {
|
|
||||||
self.steps
|
|
||||||
.iter()
|
|
||||||
.filter_map(|step| match step {
|
|
||||||
Step::FunctionCall(input) => input.caller.as_address().copied(),
|
|
||||||
Step::BalanceAssertion(..) => None,
|
|
||||||
Step::StorageEmptyAssertion(..) => None,
|
|
||||||
Step::Repeat(..) => None,
|
|
||||||
Step::AllocateAccount(..) => None,
|
|
||||||
})
|
|
||||||
.next()
|
|
||||||
.unwrap_or(FunctionCallStep::default_caller_address())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use std::{collections::HashMap, fmt::Display, str::FromStr};
|
use std::{collections::HashMap, fmt::Display, str::FromStr};
|
||||||
|
|
||||||
use alloy::hex::ToHexExt;
|
|
||||||
use alloy::primitives::{FixedBytes, utils::parse_units};
|
use alloy::primitives::{FixedBytes, utils::parse_units};
|
||||||
use alloy::{
|
use alloy::{
|
||||||
eips::BlockNumberOrTag,
|
eips::BlockNumberOrTag,
|
||||||
@@ -46,12 +45,12 @@ pub enum Step {
|
|||||||
}
|
}
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
pub struct StepIdx(usize) impl Display, FromStr;
|
pub struct StepIdx(usize) impl Display, FromStr;
|
||||||
);
|
);
|
||||||
|
|
||||||
define_wrapper_type!(
|
define_wrapper_type!(
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
#[serde(try_from = "String", into = "String")]
|
#[serde(try_from = "String", into = "String")]
|
||||||
pub struct StepPath(Vec<StepIdx>);
|
pub struct StepPath(Vec<StepIdx>);
|
||||||
);
|
);
|
||||||
@@ -687,8 +686,8 @@ impl Calldata {
|
|||||||
Calldata::Compound(items) => {
|
Calldata::Compound(items) => {
|
||||||
stream::iter(items.iter().zip(other.chunks(32)))
|
stream::iter(items.iter().zip(other.chunks(32)))
|
||||||
.map(|(this, other)| async move {
|
.map(|(this, other)| async move {
|
||||||
// The MatterLabs format supports wildcards and therefore we also need to
|
// The matterlabs format supports wildcards and therefore we
|
||||||
// support them.
|
// also need to support them.
|
||||||
if this.as_ref() == "*" {
|
if this.as_ref() == "*" {
|
||||||
return Ok::<_, anyhow::Error>(true);
|
return Ok::<_, anyhow::Error>(true);
|
||||||
}
|
}
|
||||||
@@ -706,7 +705,6 @@ impl Calldata {
|
|||||||
.await
|
.await
|
||||||
.context("Failed to resolve calldata item during equivalence check")?;
|
.context("Failed to resolve calldata item during equivalence check")?;
|
||||||
let other = U256::from_be_slice(&other);
|
let other = U256::from_be_slice(&other);
|
||||||
|
|
||||||
Ok(this == other)
|
Ok(this == other)
|
||||||
})
|
})
|
||||||
.buffered(0xFF)
|
.buffered(0xFF)
|
||||||
@@ -719,7 +717,7 @@ impl Calldata {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CalldataItem {
|
impl CalldataItem {
|
||||||
#[instrument(level = "info", skip_all, err(Debug))]
|
#[instrument(level = "info", skip_all, err)]
|
||||||
async fn resolve(
|
async fn resolve(
|
||||||
&self,
|
&self,
|
||||||
resolver: &(impl ResolverApi + ?Sized),
|
resolver: &(impl ResolverApi + ?Sized),
|
||||||
@@ -770,14 +768,7 @@ impl CalldataItem {
|
|||||||
match stack.as_slice() {
|
match stack.as_slice() {
|
||||||
// Empty stack means that we got an empty compound calldata which we resolve to zero.
|
// Empty stack means that we got an empty compound calldata which we resolve to zero.
|
||||||
[] => Ok(U256::ZERO),
|
[] => Ok(U256::ZERO),
|
||||||
[CalldataToken::Item(item)] => {
|
[CalldataToken::Item(item)] => Ok(*item),
|
||||||
tracing::debug!(
|
|
||||||
original_item = ?self,
|
|
||||||
resolved_item = item.to_be_bytes::<32>().encode_hex(),
|
|
||||||
"Resolution Done"
|
|
||||||
);
|
|
||||||
Ok(*item)
|
|
||||||
}
|
|
||||||
_ => Err(anyhow::anyhow!(
|
_ => Err(anyhow::anyhow!(
|
||||||
"Invalid calldata arithmetic operation - Invalid stack"
|
"Invalid calldata arithmetic operation - Invalid stack"
|
||||||
)),
|
)),
|
||||||
@@ -907,7 +898,7 @@ impl<T: AsRef<str>> CalldataToken<T> {
|
|||||||
let block_hash = resolver
|
let block_hash = resolver
|
||||||
.block_hash(desired_block_number.into())
|
.block_hash(desired_block_number.into())
|
||||||
.await
|
.await
|
||||||
.context(format!("Failed to resolve the block hash of block number {desired_block_number}"))?;
|
.context("Failed to resolve block hash for desired block number")?;
|
||||||
|
|
||||||
Ok(U256::from_be_bytes(block_hash.0))
|
Ok(U256::from_be_bytes(block_hash.0))
|
||||||
} else if item == Self::BLOCK_NUMBER_VARIABLE {
|
} else if item == Self::BLOCK_NUMBER_VARIABLE {
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ rust-version.workspace = true
|
|||||||
revive-common = { workspace = true }
|
revive-common = { workspace = true }
|
||||||
|
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
|
||||||
|
|
||||||
alloy = { workspace = true }
|
alloy = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::pin::Pin;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use alloy::network::Ethereum;
|
use alloy::network::Ethereum;
|
||||||
use alloy::primitives::{Address, StorageKey, TxHash, U256};
|
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256};
|
||||||
use alloy::providers::DynProvider;
|
use alloy::providers::DynProvider;
|
||||||
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
||||||
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
||||||
@@ -13,7 +13,6 @@ use anyhow::Result;
|
|||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_report::MinedBlockInformation;
|
|
||||||
|
|
||||||
/// An interface for all interactions with Ethereum compatible nodes.
|
/// An interface for all interactions with Ethereum compatible nodes.
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
@@ -81,3 +80,33 @@ pub trait EthereumNode {
|
|||||||
fn provider(&self)
|
fn provider(&self)
|
||||||
-> Pin<Box<dyn Future<Output = anyhow::Result<DynProvider<Ethereum>>> + '_>>;
|
-> Pin<Box<dyn Future<Output = anyhow::Result<DynProvider<Ethereum>>> + '_>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
|
pub struct MinedBlockInformation {
|
||||||
|
/// The block number.
|
||||||
|
pub block_number: BlockNumber,
|
||||||
|
|
||||||
|
/// The block timestamp.
|
||||||
|
pub block_timestamp: BlockTimestamp,
|
||||||
|
|
||||||
|
/// The amount of gas mined in the block.
|
||||||
|
pub mined_gas: u128,
|
||||||
|
|
||||||
|
/// The gas limit of the block.
|
||||||
|
pub block_gas_limit: u128,
|
||||||
|
|
||||||
|
/// The hashes of the transactions that were mined as part of the block.
|
||||||
|
pub transaction_hashes: Vec<TxHash>,
|
||||||
|
|
||||||
|
/// The ref time for substrate based chains.
|
||||||
|
pub ref_time: u128,
|
||||||
|
|
||||||
|
/// The max ref time for substrate based chains.
|
||||||
|
pub max_ref_time: u64,
|
||||||
|
|
||||||
|
/// The proof size for substrate based chains.
|
||||||
|
pub proof_size: u128,
|
||||||
|
|
||||||
|
/// The max proof size for substrate based chains.
|
||||||
|
pub max_proof_size: u64,
|
||||||
|
}
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ revive-dt-common = { workspace = true }
|
|||||||
revive-dt-config = { workspace = true }
|
revive-dt-config = { workspace = true }
|
||||||
revive-dt-format = { workspace = true }
|
revive-dt-format = { workspace = true }
|
||||||
revive-dt-node-interaction = { workspace = true }
|
revive-dt-node-interaction = { workspace = true }
|
||||||
revive-dt-report = { workspace = true }
|
|
||||||
|
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
|||||||
@@ -43,8 +43,7 @@ use revive_dt_common::{
|
|||||||
};
|
};
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
|
||||||
use revive_dt_report::{EthereumMinedBlockInformation, MinedBlockInformation};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Node,
|
Node,
|
||||||
@@ -527,7 +526,6 @@ impl EthereumNode for GethNode {
|
|||||||
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
||||||
let block = block.ok()?;
|
let block = block.ok()?;
|
||||||
Some(MinedBlockInformation {
|
Some(MinedBlockInformation {
|
||||||
ethereum_block_information: EthereumMinedBlockInformation {
|
|
||||||
block_number: block.number(),
|
block_number: block.number(),
|
||||||
block_timestamp: block.header.timestamp,
|
block_timestamp: block.header.timestamp,
|
||||||
mined_gas: block.header.gas_used as _,
|
mined_gas: block.header.gas_used as _,
|
||||||
@@ -538,9 +536,10 @@ impl EthereumNode for GethNode {
|
|||||||
.as_hashes()
|
.as_hashes()
|
||||||
.expect("Must be hashes")
|
.expect("Must be hashes")
|
||||||
.to_vec(),
|
.to_vec(),
|
||||||
},
|
ref_time: 0,
|
||||||
substrate_block_information: None,
|
max_ref_time: 0,
|
||||||
tx_counts: Default::default(),
|
proof_size: 0,
|
||||||
|
max_proof_size: 0,
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -56,8 +56,7 @@ use revive_dt_common::{
|
|||||||
};
|
};
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
|
||||||
use revive_dt_report::{EthereumMinedBlockInformation, MinedBlockInformation};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Node,
|
Node,
|
||||||
@@ -758,7 +757,6 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
let mined_block_information_stream = block_stream.filter_map(|block| async {
|
||||||
let block = block.ok()?;
|
let block = block.ok()?;
|
||||||
Some(MinedBlockInformation {
|
Some(MinedBlockInformation {
|
||||||
ethereum_block_information: EthereumMinedBlockInformation {
|
|
||||||
block_number: block.number(),
|
block_number: block.number(),
|
||||||
block_timestamp: block.header.timestamp,
|
block_timestamp: block.header.timestamp,
|
||||||
mined_gas: block.header.gas_used as _,
|
mined_gas: block.header.gas_used as _,
|
||||||
@@ -769,9 +767,10 @@ impl EthereumNode for LighthouseGethNode {
|
|||||||
.as_hashes()
|
.as_hashes()
|
||||||
.expect("Must be hashes")
|
.expect("Must be hashes")
|
||||||
.to_vec(),
|
.to_vec(),
|
||||||
},
|
ref_time: 0,
|
||||||
substrate_block_information: None,
|
max_ref_time: 0,
|
||||||
tx_counts: Default::default(),
|
proof_size: 0,
|
||||||
|
max_proof_size: 0,
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::{
|
|||||||
pin::Pin,
|
pin::Pin,
|
||||||
process::{Command, Stdio},
|
process::{Command, Stdio},
|
||||||
sync::{
|
sync::{
|
||||||
Arc, Mutex,
|
Arc,
|
||||||
atomic::{AtomicU32, Ordering},
|
atomic::{AtomicU32, Ordering},
|
||||||
},
|
},
|
||||||
time::Duration,
|
time::Duration,
|
||||||
@@ -32,15 +32,12 @@ use futures::{FutureExt, Stream, StreamExt};
|
|||||||
use revive_common::EVMVersion;
|
use revive_common::EVMVersion;
|
||||||
use revive_dt_common::fs::clear_directory;
|
use revive_dt_common::fs::clear_directory;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use serde_json::{Value, json};
|
use serde_json::json;
|
||||||
use sp_core::crypto::Ss58Codec;
|
use sp_core::crypto::Ss58Codec;
|
||||||
use sp_runtime::AccountId32;
|
use sp_runtime::AccountId32;
|
||||||
|
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_node_interaction::EthereumNode;
|
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
|
||||||
use revive_dt_report::{
|
|
||||||
EthereumMinedBlockInformation, MinedBlockInformation, SubstrateMinedBlockInformation,
|
|
||||||
};
|
|
||||||
use subxt::{OnlineClient, SubstrateConfig};
|
use subxt::{OnlineClient, SubstrateConfig};
|
||||||
use tokio::sync::OnceCell;
|
use tokio::sync::OnceCell;
|
||||||
use tracing::{instrument, trace};
|
use tracing::{instrument, trace};
|
||||||
@@ -57,9 +54,6 @@ use crate::{
|
|||||||
|
|
||||||
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
|
||||||
|
|
||||||
/// The number of blocks that should be cached by the revive-dev-node and the eth-rpc.
|
|
||||||
const NUMBER_OF_CACHED_BLOCKS: u32 = 100_000;
|
|
||||||
|
|
||||||
/// A node implementation for Substrate based chains. Currently, this supports either substrate
|
/// A node implementation for Substrate based chains. Currently, this supports either substrate
|
||||||
/// or the revive-dev-node which is done by changing the path and some of the other arguments passed
|
/// or the revive-dev-node which is done by changing the path and some of the other arguments passed
|
||||||
/// to the command.
|
/// to the command.
|
||||||
@@ -95,6 +89,7 @@ impl SubstrateNode {
|
|||||||
const SUBSTRATE_LOG_ENV: &str = "error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
|
const SUBSTRATE_LOG_ENV: &str = "error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
|
||||||
const PROXY_LOG_ENV: &str = "info,eth-rpc=debug";
|
const PROXY_LOG_ENV: &str = "info,eth-rpc=debug";
|
||||||
|
|
||||||
|
pub const KITCHENSINK_EXPORT_CHAINSPEC_COMMAND: &str = "export-chain-spec";
|
||||||
pub const REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND: &str = "build-spec";
|
pub const REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND: &str = "build-spec";
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
@@ -141,8 +136,6 @@ impl SubstrateNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
|
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
|
||||||
static CHAINSPEC_MUTEX: Mutex<Option<Value>> = Mutex::new(None);
|
|
||||||
|
|
||||||
if !self.rpc_url.is_empty() {
|
if !self.rpc_url.is_empty() {
|
||||||
return Ok(self);
|
return Ok(self);
|
||||||
}
|
}
|
||||||
@@ -161,22 +154,12 @@ impl SubstrateNode {
|
|||||||
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
|
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
|
||||||
|
|
||||||
trace!("Creating the node genesis");
|
trace!("Creating the node genesis");
|
||||||
let chainspec_json = {
|
|
||||||
let mut chainspec_mutex = CHAINSPEC_MUTEX.lock().expect("Poisoned");
|
|
||||||
match chainspec_mutex.as_ref() {
|
|
||||||
Some(chainspec_json) => chainspec_json.clone(),
|
|
||||||
None => {
|
|
||||||
let chainspec_json = Self::node_genesis(
|
let chainspec_json = Self::node_genesis(
|
||||||
&self.node_binary,
|
&self.node_binary,
|
||||||
&self.export_chainspec_command,
|
&self.export_chainspec_command,
|
||||||
&self.wallet,
|
&self.wallet,
|
||||||
)
|
)
|
||||||
.context("Failed to prepare the chainspec command")?;
|
.context("Failed to prepare the chainspec command")?;
|
||||||
*chainspec_mutex = Some(chainspec_json.clone());
|
|
||||||
chainspec_json
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
trace!("Writing the node genesis");
|
trace!("Writing the node genesis");
|
||||||
serde_json::to_writer_pretty(
|
serde_json::to_writer_pretty(
|
||||||
@@ -227,8 +210,6 @@ impl SubstrateNode {
|
|||||||
.arg(u32::MAX.to_string())
|
.arg(u32::MAX.to_string())
|
||||||
.arg("--pool-kbytes")
|
.arg("--pool-kbytes")
|
||||||
.arg(u32::MAX.to_string())
|
.arg(u32::MAX.to_string())
|
||||||
.arg("--state-pruning")
|
|
||||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
|
||||||
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
|
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
|
||||||
.stdout(stdout_file)
|
.stdout(stdout_file)
|
||||||
.stderr(stderr_file);
|
.stderr(stderr_file);
|
||||||
@@ -268,10 +249,6 @@ impl SubstrateNode {
|
|||||||
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}"))
|
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}"))
|
||||||
.arg("--rpc-max-connections")
|
.arg("--rpc-max-connections")
|
||||||
.arg(u32::MAX.to_string())
|
.arg(u32::MAX.to_string())
|
||||||
.arg("--index-last-n-blocks")
|
|
||||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
|
||||||
.arg("--cache-size")
|
|
||||||
.arg(NUMBER_OF_CACHED_BLOCKS.to_string())
|
|
||||||
.env("RUST_LOG", Self::PROXY_LOG_ENV)
|
.env("RUST_LOG", Self::PROXY_LOG_ENV)
|
||||||
.stdout(stdout_file)
|
.stdout(stdout_file)
|
||||||
.stderr(stderr_file);
|
.stderr(stderr_file);
|
||||||
@@ -324,7 +301,7 @@ impl SubstrateNode {
|
|||||||
.get_or_try_init(|| async move {
|
.get_or_try_init(|| async move {
|
||||||
construct_concurrency_limited_provider::<Ethereum, _>(
|
construct_concurrency_limited_provider::<Ethereum, _>(
|
||||||
self.rpc_url.as_str(),
|
self.rpc_url.as_str(),
|
||||||
FallbackGasFiller::new(u64::MAX, 50_000_000_000, 1_000_000_000),
|
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
|
||||||
ChainIdFiller::new(Some(CHAIN_ID)),
|
ChainIdFiller::new(Some(CHAIN_ID)),
|
||||||
NonceFiller::new(self.nonce_manager.clone()),
|
NonceFiller::new(self.nonce_manager.clone()),
|
||||||
self.wallet.clone(),
|
self.wallet.clone(),
|
||||||
@@ -353,7 +330,7 @@ impl SubstrateNode {
|
|||||||
trace!("Waiting for chainspec export");
|
trace!("Waiting for chainspec export");
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"substrate-node export-chain-spec failed: {}",
|
"Substrate-node export-chain-spec failed: {}",
|
||||||
String::from_utf8_lossy(&output.stderr)
|
String::from_utf8_lossy(&output.stderr)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -580,8 +557,7 @@ impl EthereumNode for SubstrateNode {
|
|||||||
let max_proof_size = limits.max_block.proof_size;
|
let max_proof_size = limits.max_block.proof_size;
|
||||||
|
|
||||||
Some(MinedBlockInformation {
|
Some(MinedBlockInformation {
|
||||||
ethereum_block_information: EthereumMinedBlockInformation {
|
block_number: substrate_block.number() as _,
|
||||||
block_number: revive_block.number(),
|
|
||||||
block_timestamp: revive_block.header.timestamp,
|
block_timestamp: revive_block.header.timestamp,
|
||||||
mined_gas: revive_block.header.gas_used as _,
|
mined_gas: revive_block.header.gas_used as _,
|
||||||
block_gas_limit: revive_block.header.gas_limit as _,
|
block_gas_limit: revive_block.header.gas_limit as _,
|
||||||
@@ -591,14 +567,10 @@ impl EthereumNode for SubstrateNode {
|
|||||||
.as_hashes()
|
.as_hashes()
|
||||||
.expect("Must be hashes")
|
.expect("Must be hashes")
|
||||||
.to_vec(),
|
.to_vec(),
|
||||||
},
|
|
||||||
substrate_block_information: Some(SubstrateMinedBlockInformation {
|
|
||||||
ref_time: block_ref_time,
|
ref_time: block_ref_time,
|
||||||
max_ref_time,
|
max_ref_time,
|
||||||
proof_size: block_proof_size,
|
proof_size: block_proof_size,
|
||||||
max_proof_size,
|
max_proof_size,
|
||||||
}),
|
|
||||||
tx_counts: Default::default(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -820,8 +792,8 @@ mod tests {
|
|||||||
|
|
||||||
let context = test_config();
|
let context = test_config();
|
||||||
let mut node = SubstrateNode::new(
|
let mut node = SubstrateNode::new(
|
||||||
context.revive_dev_node_configuration.path.clone(),
|
context.kitchensink_configuration.path.clone(),
|
||||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||||
None,
|
None,
|
||||||
&context,
|
&context,
|
||||||
&[],
|
&[],
|
||||||
@@ -843,7 +815,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since it takes a long time to run"]
|
|
||||||
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let (context, node) = shared_state();
|
let (context, node) = shared_state();
|
||||||
@@ -860,14 +831,11 @@ mod tests {
|
|||||||
.value(U256::from(100_000_000_000_000u128));
|
.value(U256::from(100_000_000_000_000u128));
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
let mut pending_transaction = provider
|
let receipt = provider.send_transaction(transaction).await;
|
||||||
.send_transaction(transaction)
|
|
||||||
.await
|
|
||||||
.expect("Submission failed");
|
|
||||||
pending_transaction.set_timeout(Some(Duration::from_secs(60)));
|
|
||||||
|
|
||||||
// Assert
|
// Assert
|
||||||
let _ = pending_transaction
|
let _ = receipt
|
||||||
|
.expect("Failed to send the transfer transaction")
|
||||||
.get_receipt()
|
.get_receipt()
|
||||||
.await
|
.await
|
||||||
.expect("Failed to get the receipt for the transfer");
|
.expect("Failed to get the receipt for the transfer");
|
||||||
@@ -891,8 +859,8 @@ mod tests {
|
|||||||
|
|
||||||
let context = test_config();
|
let context = test_config();
|
||||||
let mut dummy_node = SubstrateNode::new(
|
let mut dummy_node = SubstrateNode::new(
|
||||||
context.revive_dev_node_configuration.path.clone(),
|
context.kitchensink_configuration.path.clone(),
|
||||||
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
|
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
|
||||||
None,
|
None,
|
||||||
&context,
|
&context,
|
||||||
&[],
|
&[],
|
||||||
@@ -985,7 +953,7 @@ mod tests {
|
|||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
version.starts_with("substrate-node"),
|
version.starts_with("substrate-node"),
|
||||||
"Expected substrate-node version string, got: {version}"
|
"Expected Substrate-node version string, got: {version}"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -60,10 +60,7 @@ use revive_common::EVMVersion;
|
|||||||
use revive_dt_common::fs::clear_directory;
|
use revive_dt_common::fs::clear_directory;
|
||||||
use revive_dt_config::*;
|
use revive_dt_config::*;
|
||||||
use revive_dt_format::traits::ResolverApi;
|
use revive_dt_format::traits::ResolverApi;
|
||||||
use revive_dt_node_interaction::*;
|
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
|
||||||
use revive_dt_report::{
|
|
||||||
EthereumMinedBlockInformation, MinedBlockInformation, SubstrateMinedBlockInformation,
|
|
||||||
};
|
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use sp_core::crypto::Ss58Codec;
|
use sp_core::crypto::Ss58Codec;
|
||||||
use sp_runtime::AccountId32;
|
use sp_runtime::AccountId32;
|
||||||
@@ -210,7 +207,6 @@ impl ZombienetNode {
|
|||||||
.with_args(vec![
|
.with_args(vec![
|
||||||
("--pool-limit", u32::MAX.to_string().as_str()).into(),
|
("--pool-limit", u32::MAX.to_string().as_str()).into(),
|
||||||
("--pool-kbytes", u32::MAX.to_string().as_str()).into(),
|
("--pool-kbytes", u32::MAX.to_string().as_str()).into(),
|
||||||
("--dev-block-time", 12000u16.to_string().as_str()).into(),
|
|
||||||
])
|
])
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -356,7 +352,7 @@ impl ZombienetNode {
|
|||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
anyhow::bail!(
|
anyhow::bail!(
|
||||||
"substrate-node export-chain-spec failed: {}",
|
"Substrate-node export-chain-spec failed: {}",
|
||||||
String::from_utf8_lossy(&output.stderr)
|
String::from_utf8_lossy(&output.stderr)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -582,8 +578,7 @@ impl EthereumNode for ZombienetNode {
|
|||||||
let max_proof_size = limits.max_block.proof_size;
|
let max_proof_size = limits.max_block.proof_size;
|
||||||
|
|
||||||
Some(MinedBlockInformation {
|
Some(MinedBlockInformation {
|
||||||
ethereum_block_information: EthereumMinedBlockInformation {
|
block_number: substrate_block.number() as _,
|
||||||
block_number: revive_block.number(),
|
|
||||||
block_timestamp: revive_block.header.timestamp,
|
block_timestamp: revive_block.header.timestamp,
|
||||||
mined_gas: revive_block.header.gas_used as _,
|
mined_gas: revive_block.header.gas_used as _,
|
||||||
block_gas_limit: revive_block.header.gas_limit as _,
|
block_gas_limit: revive_block.header.gas_limit as _,
|
||||||
@@ -593,14 +588,10 @@ impl EthereumNode for ZombienetNode {
|
|||||||
.as_hashes()
|
.as_hashes()
|
||||||
.expect("Must be hashes")
|
.expect("Must be hashes")
|
||||||
.to_vec(),
|
.to_vec(),
|
||||||
},
|
|
||||||
substrate_block_information: Some(SubstrateMinedBlockInformation {
|
|
||||||
ref_time: block_ref_time,
|
ref_time: block_ref_time,
|
||||||
max_ref_time,
|
max_ref_time,
|
||||||
proof_size: block_proof_size,
|
proof_size: block_proof_size,
|
||||||
max_proof_size,
|
max_proof_size,
|
||||||
}),
|
|
||||||
tx_counts: Default::default(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -856,9 +847,8 @@ mod tests {
|
|||||||
use utils::{new_node, test_config};
|
use utils::{new_node, test_config};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
#[ignore = "Ignored for the time being"]
|
||||||
async fn test_transfer_transaction_should_return_receipt() {
|
async fn test_transfer_transaction_should_return_receipt() {
|
||||||
// Arrange
|
|
||||||
let (ctx, node) = new_node().await;
|
let (ctx, node) = new_node().await;
|
||||||
|
|
||||||
let provider = node.provider().await.expect("Failed to create provider");
|
let provider = node.provider().await.expect("Failed to create provider");
|
||||||
@@ -867,22 +857,15 @@ mod tests {
|
|||||||
.to(account_address)
|
.to(account_address)
|
||||||
.value(U256::from(100_000_000_000_000u128));
|
.value(U256::from(100_000_000_000_000u128));
|
||||||
|
|
||||||
// Act
|
let receipt = provider.send_transaction(transaction).await;
|
||||||
let mut pending_transaction = provider
|
let _ = receipt
|
||||||
.send_transaction(transaction)
|
.expect("Failed to send the transfer transaction")
|
||||||
.await
|
|
||||||
.expect("Submission failed");
|
|
||||||
pending_transaction.set_timeout(Some(Duration::from_secs(60)));
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
let _ = pending_transaction
|
|
||||||
.get_receipt()
|
.get_receipt()
|
||||||
.await
|
.await
|
||||||
.expect("Failed to get the receipt for the transfer");
|
.expect("Failed to get the receipt for the transfer");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
|
||||||
fn print_eth_to_polkadot_mappings() {
|
fn print_eth_to_polkadot_mappings() {
|
||||||
let eth_addresses = vec![
|
let eth_addresses = vec![
|
||||||
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
|
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1",
|
||||||
@@ -898,7 +881,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
|
||||||
fn test_eth_to_polkadot_address() {
|
fn test_eth_to_polkadot_address() {
|
||||||
let cases = vec![
|
let cases = vec![
|
||||||
(
|
(
|
||||||
@@ -929,7 +911,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
|
||||||
fn eth_rpc_version_works() {
|
fn eth_rpc_version_works() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let context = test_config();
|
let context = test_config();
|
||||||
@@ -949,7 +930,6 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
|
||||||
fn version_works() {
|
fn version_works() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let context = test_config();
|
let context = test_config();
|
||||||
@@ -969,7 +949,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
async fn get_chain_id_from_node_should_succeed() {
|
async fn get_chain_id_from_node_should_succeed() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node().await;
|
let node = shared_node().await;
|
||||||
@@ -988,7 +968,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
async fn can_get_gas_limit_from_node() {
|
async fn can_get_gas_limit_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node().await;
|
let node = shared_node().await;
|
||||||
@@ -1006,7 +986,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
async fn can_get_coinbase_from_node() {
|
async fn can_get_coinbase_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node().await;
|
let node = shared_node().await;
|
||||||
@@ -1024,7 +1004,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
async fn can_get_block_difficulty_from_node() {
|
async fn can_get_block_difficulty_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node().await;
|
let node = shared_node().await;
|
||||||
@@ -1042,7 +1022,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
async fn can_get_block_hash_from_node() {
|
async fn can_get_block_hash_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node().await;
|
let node = shared_node().await;
|
||||||
@@ -1060,7 +1040,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
async fn can_get_block_timestamp_from_node() {
|
async fn can_get_block_timestamp_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node().await;
|
let node = shared_node().await;
|
||||||
@@ -1078,7 +1058,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "Ignored since CI doesn't have zombienet installed"]
|
#[ignore = "Ignored since they take a long time to run"]
|
||||||
async fn can_get_block_number_from_node() {
|
async fn can_get_block_number_from_node() {
|
||||||
// Arrange
|
// Arrange
|
||||||
let node = shared_node().await;
|
let node = shared_node().await;
|
||||||
|
|||||||
@@ -62,10 +62,7 @@ where
|
|||||||
) -> TransportResult<Self::Fillable> {
|
) -> TransportResult<Self::Fillable> {
|
||||||
match self.inner.prepare(provider, tx).await {
|
match self.inner.prepare(provider, tx).await {
|
||||||
Ok(fill) => Ok(Some(fill)),
|
Ok(fill) => Ok(Some(fill)),
|
||||||
Err(err) => {
|
Err(_) => Ok(None),
|
||||||
tracing::debug!(error = ?err, "Gas Provider Estimation Failed, using fallback");
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ alloy = { workspace = true }
|
|||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
paste = { workspace = true }
|
paste = { workspace = true }
|
||||||
indexmap = { workspace = true, features = ["serde"] }
|
indexmap = { workspace = true, features = ["serde"] }
|
||||||
itertools = { workspace = true }
|
|
||||||
semver = { workspace = true }
|
semver = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
|||||||
+37
-447
@@ -4,19 +4,17 @@
|
|||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||||
fs::OpenOptions,
|
fs::OpenOptions,
|
||||||
ops::{Add, Div},
|
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
};
|
};
|
||||||
|
|
||||||
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, TxHash};
|
use alloy::primitives::Address;
|
||||||
use anyhow::{Context as _, Result};
|
use anyhow::{Context as _, Result};
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
use itertools::Itertools;
|
use revive_dt_common::types::{ParsedTestSpecifier, PlatformIdentifier};
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
|
||||||
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
||||||
use revive_dt_config::Context;
|
use revive_dt_config::Context;
|
||||||
use revive_dt_format::{case::CaseIdx, metadata::ContractInstance, steps::StepPath};
|
use revive_dt_format::{case::CaseIdx, metadata::ContractInstance};
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_with::{DisplayFromStr, serde_as};
|
use serde_with::{DisplayFromStr, serde_as};
|
||||||
@@ -41,7 +39,7 @@ pub struct ReportAggregator {
|
|||||||
impl ReportAggregator {
|
impl ReportAggregator {
|
||||||
pub fn new(context: Context) -> Self {
|
pub fn new(context: Context) -> Self {
|
||||||
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
let (runner_tx, runner_rx) = unbounded_channel::<RunnerEvent>();
|
||||||
let (listener_tx, _) = channel::<ReporterEvent>(0xFFFF);
|
let (listener_tx, _) = channel::<ReporterEvent>(1024);
|
||||||
Self {
|
Self {
|
||||||
report: Report::new(context),
|
report: Report::new(context),
|
||||||
remaining_cases: Default::default(),
|
remaining_cases: Default::default(),
|
||||||
@@ -64,11 +62,14 @@ impl ReportAggregator {
|
|||||||
debug!("Starting to aggregate report");
|
debug!("Starting to aggregate report");
|
||||||
|
|
||||||
while let Some(event) = self.runner_rx.recv().await {
|
while let Some(event) = self.runner_rx.recv().await {
|
||||||
debug!(event = event.variant_name(), "Received Event");
|
debug!(?event, "Received Event");
|
||||||
match event {
|
match event {
|
||||||
RunnerEvent::SubscribeToEvents(event) => {
|
RunnerEvent::SubscribeToEvents(event) => {
|
||||||
self.handle_subscribe_to_events_event(*event);
|
self.handle_subscribe_to_events_event(*event);
|
||||||
}
|
}
|
||||||
|
RunnerEvent::CorpusDiscovery(event) => {
|
||||||
|
self.handle_corpus_file_discovered_event(*event)
|
||||||
|
}
|
||||||
RunnerEvent::MetadataFileDiscovery(event) => {
|
RunnerEvent::MetadataFileDiscovery(event) => {
|
||||||
self.handle_metadata_file_discovery_event(*event);
|
self.handle_metadata_file_discovery_event(*event);
|
||||||
}
|
}
|
||||||
@@ -105,20 +106,12 @@ impl ReportAggregator {
|
|||||||
RunnerEvent::ContractDeployed(event) => {
|
RunnerEvent::ContractDeployed(event) => {
|
||||||
self.handle_contract_deployed_event(*event);
|
self.handle_contract_deployed_event(*event);
|
||||||
}
|
}
|
||||||
RunnerEvent::Completion(_) => {
|
RunnerEvent::Completion(event) => {
|
||||||
|
self.handle_completion(*event);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* Benchmarks Events */
|
|
||||||
RunnerEvent::StepTransactionInformation(event) => {
|
|
||||||
self.handle_step_transaction_information(*event)
|
|
||||||
}
|
|
||||||
RunnerEvent::ContractInformation(event) => {
|
|
||||||
self.handle_contract_information(*event);
|
|
||||||
}
|
|
||||||
RunnerEvent::BlockMined(event) => self.handle_block_mined(*event),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.handle_completion(CompletionEvent {});
|
|
||||||
debug!("Report aggregation completed");
|
debug!("Report aggregation completed");
|
||||||
|
|
||||||
let file_name = {
|
let file_name = {
|
||||||
@@ -159,6 +152,10 @@ impl ReportAggregator {
|
|||||||
let _ = event.tx.send(self.listener_tx.subscribe());
|
let _ = event.tx.send(self.listener_tx.subscribe());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn handle_corpus_file_discovered_event(&mut self, event: CorpusDiscoveryEvent) {
|
||||||
|
self.report.corpora.extend(event.test_specifiers);
|
||||||
|
}
|
||||||
|
|
||||||
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
|
||||||
self.report.metadata_files.insert(event.path.clone());
|
self.report.metadata_files.insert(event.path.clone());
|
||||||
}
|
}
|
||||||
@@ -237,19 +234,17 @@ impl ReportAggregator {
|
|||||||
|
|
||||||
let case_status = self
|
let case_status = self
|
||||||
.report
|
.report
|
||||||
.execution_information
|
.test_case_information
|
||||||
.entry(specifier.metadata_file_path.clone().into())
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
.or_default()
|
.or_default()
|
||||||
.case_reports
|
.entry(specifier.solc_mode.clone())
|
||||||
|
.or_default()
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|(case_idx, mode_to_execution_map)| {
|
.map(|(case_idx, case_report)| {
|
||||||
let case_status = mode_to_execution_map
|
(
|
||||||
.mode_execution_reports
|
*case_idx,
|
||||||
.get(&specifier.solc_mode)?
|
case_report.status.clone().expect("Can't be uninitialized"),
|
||||||
.status
|
)
|
||||||
.clone()
|
|
||||||
.expect("Can't be uninitialized");
|
|
||||||
Some((*case_idx, case_status))
|
|
||||||
})
|
})
|
||||||
.collect::<BTreeMap<_, _>>();
|
.collect::<BTreeMap<_, _>>();
|
||||||
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
|
||||||
@@ -388,157 +383,22 @@ impl ReportAggregator {
|
|||||||
self.execution_information(&event.execution_specifier)
|
self.execution_information(&event.execution_specifier)
|
||||||
.deployed_contracts
|
.deployed_contracts
|
||||||
.get_or_insert_default()
|
.get_or_insert_default()
|
||||||
.insert(event.contract_instance.clone(), event.address);
|
.insert(event.contract_instance, event.address);
|
||||||
self.test_case_report(&event.execution_specifier.test_specifier)
|
|
||||||
.contract_addresses
|
|
||||||
.entry(event.contract_instance)
|
|
||||||
.or_default()
|
|
||||||
.entry(event.execution_specifier.platform_identifier)
|
|
||||||
.or_default()
|
|
||||||
.push(event.address);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_completion(&mut self, _: CompletionEvent) {
|
fn handle_completion(&mut self, _: CompletionEvent) {
|
||||||
self.runner_rx.close();
|
self.runner_rx.close();
|
||||||
self.handle_metrics_computation();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_metrics_computation(&mut self) {
|
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport {
|
||||||
for report in self.report.execution_information.values_mut() {
|
|
||||||
for report in report.case_reports.values_mut() {
|
|
||||||
for report in report.mode_execution_reports.values_mut() {
|
|
||||||
for (platform_identifier, block_information) in
|
|
||||||
report.mined_block_information.iter_mut()
|
|
||||||
{
|
|
||||||
block_information.sort_by(|a, b| {
|
|
||||||
a.ethereum_block_information
|
|
||||||
.block_number
|
|
||||||
.cmp(&b.ethereum_block_information.block_number)
|
|
||||||
});
|
|
||||||
|
|
||||||
// Computing the TPS.
|
|
||||||
let tps = block_information
|
|
||||||
.iter()
|
|
||||||
.tuple_windows::<(_, _)>()
|
|
||||||
.map(|(block1, block2)| {
|
|
||||||
block2.ethereum_block_information.transaction_hashes.len() as u64
|
|
||||||
/ (block2.ethereum_block_information.block_timestamp
|
|
||||||
- block1.ethereum_block_information.block_timestamp)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.transaction_per_second
|
|
||||||
.with_list(*platform_identifier, tps);
|
|
||||||
|
|
||||||
// Computing the GPS.
|
|
||||||
let gps = block_information
|
|
||||||
.iter()
|
|
||||||
.tuple_windows::<(_, _)>()
|
|
||||||
.map(|(block1, block2)| {
|
|
||||||
block2.ethereum_block_information.mined_gas as u64
|
|
||||||
/ (block2.ethereum_block_information.block_timestamp
|
|
||||||
- block1.ethereum_block_information.block_timestamp)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.gas_per_second
|
|
||||||
.with_list(*platform_identifier, gps);
|
|
||||||
|
|
||||||
// Computing the gas block fullness
|
|
||||||
let gas_block_fullness = block_information
|
|
||||||
.iter()
|
|
||||||
.map(|block| block.gas_block_fullness_percentage())
|
|
||||||
.map(|v| v as u64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.gas_block_fullness
|
|
||||||
.with_list(*platform_identifier, gas_block_fullness);
|
|
||||||
|
|
||||||
// Computing the ref-time block fullness
|
|
||||||
let reftime_block_fullness = block_information
|
|
||||||
.iter()
|
|
||||||
.filter_map(|block| block.ref_time_block_fullness_percentage())
|
|
||||||
.map(|v| v as u64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
if !reftime_block_fullness.is_empty() {
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.ref_time_block_fullness
|
|
||||||
.get_or_insert_default()
|
|
||||||
.with_list(*platform_identifier, reftime_block_fullness);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Computing the proof size block fullness
|
|
||||||
let proof_size_block_fullness = block_information
|
|
||||||
.iter()
|
|
||||||
.filter_map(|block| block.proof_size_block_fullness_percentage())
|
|
||||||
.map(|v| v as u64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
if !proof_size_block_fullness.is_empty() {
|
|
||||||
report
|
|
||||||
.metrics
|
|
||||||
.get_or_insert_default()
|
|
||||||
.proof_size_block_fullness
|
|
||||||
.get_or_insert_default()
|
|
||||||
.with_list(*platform_identifier, proof_size_block_fullness);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_step_transaction_information(&mut self, event: StepTransactionInformationEvent) {
|
|
||||||
self.test_case_report(&event.execution_specifier.test_specifier)
|
|
||||||
.steps
|
|
||||||
.entry(event.step_path)
|
|
||||||
.or_default()
|
|
||||||
.transactions
|
|
||||||
.entry(event.execution_specifier.platform_identifier)
|
|
||||||
.or_default()
|
|
||||||
.push(event.transaction_information);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_contract_information(&mut self, event: ContractInformationEvent) {
|
|
||||||
self.test_case_report(&event.execution_specifier.test_specifier)
|
|
||||||
.compiled_contracts
|
|
||||||
.entry(event.source_code_path)
|
|
||||||
.or_default()
|
|
||||||
.entry(event.contract_name)
|
|
||||||
.or_default()
|
|
||||||
.contract_size
|
|
||||||
.insert(
|
|
||||||
event.execution_specifier.platform_identifier,
|
|
||||||
event.contract_size,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_block_mined(&mut self, event: BlockMinedEvent) {
|
|
||||||
self.test_case_report(&event.execution_specifier.test_specifier)
|
|
||||||
.mined_block_information
|
|
||||||
.entry(event.execution_specifier.platform_identifier)
|
|
||||||
.or_default()
|
|
||||||
.push(event.mined_block_information);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut ExecutionReport {
|
|
||||||
self.report
|
self.report
|
||||||
.execution_information
|
.test_case_information
|
||||||
.entry(specifier.metadata_file_path.clone().into())
|
.entry(specifier.metadata_file_path.clone().into())
|
||||||
.or_default()
|
.or_default()
|
||||||
.case_reports
|
|
||||||
.entry(specifier.case_idx)
|
|
||||||
.or_default()
|
|
||||||
.mode_execution_reports
|
|
||||||
.entry(specifier.solc_mode.clone())
|
.entry(specifier.solc_mode.clone())
|
||||||
.or_default()
|
.or_default()
|
||||||
|
.entry(specifier.case_idx)
|
||||||
|
.or_default()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn execution_information(
|
fn execution_information(
|
||||||
@@ -559,69 +419,35 @@ impl ReportAggregator {
|
|||||||
pub struct Report {
|
pub struct Report {
|
||||||
/// The context that the tool was started up with.
|
/// The context that the tool was started up with.
|
||||||
pub context: Context,
|
pub context: Context,
|
||||||
|
/// The list of corpus files that the tool found.
|
||||||
|
#[serde_as(as = "Vec<DisplayFromStr>")]
|
||||||
|
pub corpora: Vec<ParsedTestSpecifier>,
|
||||||
/// The list of metadata files that were found by the tool.
|
/// The list of metadata files that were found by the tool.
|
||||||
pub metadata_files: BTreeSet<MetadataFilePath>,
|
pub metadata_files: BTreeSet<MetadataFilePath>,
|
||||||
/// Metrics from the execution.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metrics: Option<Metrics>,
|
|
||||||
/// Information relating to each test case.
|
/// Information relating to each test case.
|
||||||
pub execution_information: BTreeMap<MetadataFilePath, MetadataFileReport>,
|
#[serde_as(as = "BTreeMap<_, HashMap<DisplayFromStr, BTreeMap<DisplayFromStr, _>>>")]
|
||||||
|
pub test_case_information:
|
||||||
|
BTreeMap<MetadataFilePath, HashMap<Mode, BTreeMap<CaseIdx, TestCaseReport>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Report {
|
impl Report {
|
||||||
pub fn new(context: Context) -> Self {
|
pub fn new(context: Context) -> Self {
|
||||||
Self {
|
Self {
|
||||||
context,
|
context,
|
||||||
metrics: Default::default(),
|
corpora: Default::default(),
|
||||||
metadata_files: Default::default(),
|
metadata_files: Default::default(),
|
||||||
execution_information: Default::default(),
|
test_case_information: Default::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||||
pub struct MetadataFileReport {
|
pub struct TestCaseReport {
|
||||||
/// Metrics from the execution.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metrics: Option<Metrics>,
|
|
||||||
/// The report of each case keyed by the case idx.
|
|
||||||
pub case_reports: BTreeMap<CaseIdx, CaseReport>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[serde_as]
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
|
||||||
pub struct CaseReport {
|
|
||||||
/// Metrics from the execution.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metrics: Option<Metrics>,
|
|
||||||
/// The [`ExecutionReport`] for each one of the [`Mode`]s.
|
|
||||||
#[serde_as(as = "HashMap<DisplayFromStr, _>")]
|
|
||||||
pub mode_execution_reports: HashMap<Mode, ExecutionReport>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
|
||||||
pub struct ExecutionReport {
|
|
||||||
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub status: Option<TestCaseStatus>,
|
pub status: Option<TestCaseStatus>,
|
||||||
/// Metrics from the execution.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub metrics: Option<Metrics>,
|
|
||||||
/// Information related to the execution on one of the platforms.
|
/// Information related to the execution on one of the platforms.
|
||||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>,
|
||||||
pub platform_execution: PlatformKeyedInformation<Option<ExecutionInformation>>,
|
|
||||||
/// Information on the compiled contracts.
|
|
||||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
|
||||||
pub compiled_contracts: BTreeMap<PathBuf, BTreeMap<String, ContractInformation>>,
|
|
||||||
/// The addresses of the deployed contracts
|
|
||||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
|
||||||
pub contract_addresses: BTreeMap<ContractInstance, PlatformKeyedInformation<Vec<Address>>>,
|
|
||||||
/// Information on the mined blocks as part of this execution.
|
|
||||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
|
||||||
pub mined_block_information: PlatformKeyedInformation<Vec<MinedBlockInformation>>,
|
|
||||||
/// Information tracked for each step that was executed.
|
|
||||||
#[serde(skip_serializing_if = "BTreeMap::is_empty")]
|
|
||||||
pub steps: BTreeMap<StepPath, StepReport>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||||
@@ -719,239 +545,3 @@ pub enum CompilationStatus {
|
|||||||
compiler_input: Option<CompilerInput>,
|
compiler_input: Option<CompilerInput>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information on each step in the execution.
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
|
||||||
pub struct StepReport {
|
|
||||||
/// Information on the transactions submitted as part of this step.
|
|
||||||
transactions: PlatformKeyedInformation<Vec<TransactionInformation>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
|
||||||
pub struct TransactionInformation {
|
|
||||||
/// The hash of the transaction
|
|
||||||
pub transaction_hash: TxHash,
|
|
||||||
pub submission_timestamp: u64,
|
|
||||||
pub block_timestamp: u64,
|
|
||||||
pub block_number: BlockNumber,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The metrics we collect for our benchmarks.
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
|
||||||
pub struct Metrics {
|
|
||||||
pub transaction_per_second: Metric<u64>,
|
|
||||||
pub gas_per_second: Metric<u64>,
|
|
||||||
/* Block Fullness */
|
|
||||||
pub gas_block_fullness: Metric<u64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ref_time_block_fullness: Option<Metric<u64>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub proof_size_block_fullness: Option<Metric<u64>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The data that we store for a given metric (e.g., TPS).
|
|
||||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
|
||||||
pub struct Metric<T> {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub minimum: Option<PlatformKeyedInformation<T>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub maximum: Option<PlatformKeyedInformation<T>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mean: Option<PlatformKeyedInformation<T>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub median: Option<PlatformKeyedInformation<T>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub raw: Option<PlatformKeyedInformation<Vec<T>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Metric<T>
|
|
||||||
where
|
|
||||||
T: Default
|
|
||||||
+ Copy
|
|
||||||
+ Ord
|
|
||||||
+ PartialOrd
|
|
||||||
+ Add<Output = T>
|
|
||||||
+ Div<Output = T>
|
|
||||||
+ TryFrom<usize, Error: std::fmt::Debug>,
|
|
||||||
{
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Default::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn platform_identifiers(&self) -> BTreeSet<PlatformIdentifier> {
|
|
||||||
self.minimum
|
|
||||||
.as_ref()
|
|
||||||
.map(|m| m.keys())
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
.chain(
|
|
||||||
self.maximum
|
|
||||||
.as_ref()
|
|
||||||
.map(|m| m.keys())
|
|
||||||
.into_iter()
|
|
||||||
.flatten(),
|
|
||||||
)
|
|
||||||
.chain(self.mean.as_ref().map(|m| m.keys()).into_iter().flatten())
|
|
||||||
.chain(self.median.as_ref().map(|m| m.keys()).into_iter().flatten())
|
|
||||||
.chain(self.raw.as_ref().map(|m| m.keys()).into_iter().flatten())
|
|
||||||
.copied()
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_list(
|
|
||||||
&mut self,
|
|
||||||
platform_identifier: PlatformIdentifier,
|
|
||||||
original_list: Vec<T>,
|
|
||||||
) -> &mut Self {
|
|
||||||
let mut list = original_list.clone();
|
|
||||||
list.sort();
|
|
||||||
let Some(min) = list.first().copied() else {
|
|
||||||
return self;
|
|
||||||
};
|
|
||||||
let Some(max) = list.last().copied() else {
|
|
||||||
return self;
|
|
||||||
};
|
|
||||||
let sum = list.iter().fold(T::default(), |acc, num| acc + *num);
|
|
||||||
let mean = sum / TryInto::<T>::try_into(list.len()).unwrap();
|
|
||||||
|
|
||||||
let median = match list.len().is_multiple_of(2) {
|
|
||||||
true => {
|
|
||||||
let idx = list.len() / 2;
|
|
||||||
let val1 = *list.get(idx - 1).unwrap();
|
|
||||||
let val2 = *list.get(idx).unwrap();
|
|
||||||
(val1 + val2) / TryInto::<T>::try_into(2usize).unwrap()
|
|
||||||
}
|
|
||||||
false => {
|
|
||||||
let idx = list.len() / 2;
|
|
||||||
*list.get(idx).unwrap()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
self.minimum
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, min);
|
|
||||||
self.maximum
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, max);
|
|
||||||
self.mean
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, mean);
|
|
||||||
self.median
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, median);
|
|
||||||
self.raw
|
|
||||||
.get_or_insert_default()
|
|
||||||
.insert(platform_identifier, original_list);
|
|
||||||
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn combine(&self, other: &Self) -> Self {
|
|
||||||
let mut platform_identifiers = self.platform_identifiers();
|
|
||||||
platform_identifiers.extend(other.platform_identifiers());
|
|
||||||
|
|
||||||
let mut this = Self::new();
|
|
||||||
for platform_identifier in platform_identifiers {
|
|
||||||
let mut l1 = self
|
|
||||||
.raw
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|m| m.get(&platform_identifier))
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_default();
|
|
||||||
let l2 = other
|
|
||||||
.raw
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|m| m.get(&platform_identifier))
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_default();
|
|
||||||
l1.extend(l2);
|
|
||||||
this.with_list(platform_identifier, l1);
|
|
||||||
}
|
|
||||||
|
|
||||||
this
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
|
||||||
pub struct ContractInformation {
|
|
||||||
/// The size of the contract on the various platforms.
|
|
||||||
pub contract_size: PlatformKeyedInformation<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
|
||||||
pub struct MinedBlockInformation {
|
|
||||||
pub ethereum_block_information: EthereumMinedBlockInformation,
|
|
||||||
pub substrate_block_information: Option<SubstrateMinedBlockInformation>,
|
|
||||||
pub tx_counts: BTreeMap<StepPath, usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MinedBlockInformation {
|
|
||||||
pub fn gas_block_fullness_percentage(&self) -> u8 {
|
|
||||||
self.ethereum_block_information
|
|
||||||
.gas_block_fullness_percentage()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ref_time_block_fullness_percentage(&self) -> Option<u8> {
|
|
||||||
self.substrate_block_information
|
|
||||||
.as_ref()
|
|
||||||
.map(|block| block.ref_time_block_fullness_percentage())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn proof_size_block_fullness_percentage(&self) -> Option<u8> {
|
|
||||||
self.substrate_block_information
|
|
||||||
.as_ref()
|
|
||||||
.map(|block| block.proof_size_block_fullness_percentage())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
|
||||||
pub struct EthereumMinedBlockInformation {
|
|
||||||
/// The block number.
|
|
||||||
pub block_number: BlockNumber,
|
|
||||||
|
|
||||||
/// The block timestamp.
|
|
||||||
pub block_timestamp: BlockTimestamp,
|
|
||||||
|
|
||||||
/// The amount of gas mined in the block.
|
|
||||||
pub mined_gas: u128,
|
|
||||||
|
|
||||||
/// The gas limit of the block.
|
|
||||||
pub block_gas_limit: u128,
|
|
||||||
|
|
||||||
/// The hashes of the transactions that were mined as part of the block.
|
|
||||||
pub transaction_hashes: Vec<TxHash>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EthereumMinedBlockInformation {
|
|
||||||
pub fn gas_block_fullness_percentage(&self) -> u8 {
|
|
||||||
(self.mined_gas * 100 / self.block_gas_limit) as u8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
|
||||||
pub struct SubstrateMinedBlockInformation {
|
|
||||||
/// The ref time for substrate based chains.
|
|
||||||
pub ref_time: u128,
|
|
||||||
|
|
||||||
/// The max ref time for substrate based chains.
|
|
||||||
pub max_ref_time: u64,
|
|
||||||
|
|
||||||
/// The proof size for substrate based chains.
|
|
||||||
pub proof_size: u128,
|
|
||||||
|
|
||||||
/// The max proof size for substrate based chains.
|
|
||||||
pub max_proof_size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SubstrateMinedBlockInformation {
|
|
||||||
pub fn ref_time_block_fullness_percentage(&self) -> u8 {
|
|
||||||
(self.ref_time * 100 / self.max_ref_time as u128) as u8
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn proof_size_block_fullness_percentage(&self) -> u8 {
|
|
||||||
(self.proof_size * 100 / self.max_proof_size as u128) as u8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Information keyed by the platform identifier.
|
|
||||||
pub type PlatformKeyedInformation<T> = BTreeMap<PlatformIdentifier, T>;
|
|
||||||
|
|||||||
@@ -6,16 +6,14 @@ use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
|
|||||||
use alloy::primitives::Address;
|
use alloy::primitives::Address;
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use indexmap::IndexMap;
|
use indexmap::IndexMap;
|
||||||
|
use revive_dt_common::types::ParsedTestSpecifier;
|
||||||
use revive_dt_common::types::PlatformIdentifier;
|
use revive_dt_common::types::PlatformIdentifier;
|
||||||
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||||
use revive_dt_format::metadata::ContractInstance;
|
use revive_dt_format::metadata::ContractInstance;
|
||||||
use revive_dt_format::metadata::Metadata;
|
use revive_dt_format::metadata::Metadata;
|
||||||
use revive_dt_format::steps::StepPath;
|
|
||||||
use semver::Version;
|
use semver::Version;
|
||||||
use tokio::sync::{broadcast, oneshot};
|
use tokio::sync::{broadcast, oneshot};
|
||||||
|
|
||||||
use crate::MinedBlockInformation;
|
|
||||||
use crate::TransactionInformation;
|
|
||||||
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath};
|
||||||
|
|
||||||
macro_rules! __report_gen_emit_test_specific {
|
macro_rules! __report_gen_emit_test_specific {
|
||||||
@@ -347,16 +345,6 @@ macro_rules! define_event {
|
|||||||
),*
|
),*
|
||||||
}
|
}
|
||||||
|
|
||||||
impl $ident {
|
|
||||||
pub fn variant_name(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
$(
|
|
||||||
Self::$variant_ident { .. } => stringify!($variant_ident)
|
|
||||||
),*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
$(
|
$(
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
$(#[$variant_meta])*
|
$(#[$variant_meta])*
|
||||||
@@ -493,6 +481,11 @@ define_event! {
|
|||||||
/// The channel that the aggregator is to send the receive side of the channel on.
|
/// The channel that the aggregator is to send the receive side of the channel on.
|
||||||
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
|
||||||
},
|
},
|
||||||
|
/// An event emitted by runners when they've discovered a corpus file.
|
||||||
|
CorpusDiscovery {
|
||||||
|
/// The contents of the corpus file.
|
||||||
|
test_specifiers: Vec<ParsedTestSpecifier>
|
||||||
|
},
|
||||||
/// An event emitted by runners when they've discovered a metadata file.
|
/// An event emitted by runners when they've discovered a metadata file.
|
||||||
MetadataFileDiscovery {
|
MetadataFileDiscovery {
|
||||||
/// The path of the metadata file discovered.
|
/// The path of the metadata file discovered.
|
||||||
@@ -622,35 +615,7 @@ define_event! {
|
|||||||
address: Address
|
address: Address
|
||||||
},
|
},
|
||||||
/// Reports the completion of the run.
|
/// Reports the completion of the run.
|
||||||
Completion {},
|
Completion {}
|
||||||
|
|
||||||
/* Benchmarks Events */
|
|
||||||
/// An event emitted with information on a transaction that was submitted for a certain step
|
|
||||||
/// of the execution.
|
|
||||||
StepTransactionInformation {
|
|
||||||
/// A specifier for the execution that's taking place.
|
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
|
||||||
/// The path of the step that this transaction belongs to.
|
|
||||||
step_path: StepPath,
|
|
||||||
/// Information about the transaction
|
|
||||||
transaction_information: TransactionInformation
|
|
||||||
},
|
|
||||||
ContractInformation {
|
|
||||||
/// A specifier for the execution that's taking place.
|
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
|
||||||
/// The path of the solidity source code that contains the contract.
|
|
||||||
source_code_path: PathBuf,
|
|
||||||
/// The name of the contract
|
|
||||||
contract_name: String,
|
|
||||||
/// The size of the contract
|
|
||||||
contract_size: usize
|
|
||||||
},
|
|
||||||
BlockMined {
|
|
||||||
/// A specifier for the execution that's taking place.
|
|
||||||
execution_specifier: Arc<ExecutionSpecifier>,
|
|
||||||
/// Information on the mined block,
|
|
||||||
mined_block_information: MinedBlockInformation
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,13 +2,12 @@
|
|||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
str::FromStr,
|
|
||||||
sync::{LazyLock, Mutex},
|
sync::{LazyLock, Mutex},
|
||||||
};
|
};
|
||||||
|
|
||||||
use revive_dt_common::types::VersionOrRequirement;
|
use revive_dt_common::types::VersionOrRequirement;
|
||||||
|
|
||||||
use semver::{Version, VersionReq};
|
use semver::Version;
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
use crate::list::List;
|
use crate::list::List;
|
||||||
@@ -66,9 +65,6 @@ impl SolcDownloader {
|
|||||||
target: &'static str,
|
target: &'static str,
|
||||||
list: &'static str,
|
list: &'static str,
|
||||||
) -> anyhow::Result<Self> {
|
) -> anyhow::Result<Self> {
|
||||||
static MAXIMUM_COMPILER_VERSION_REQUIREMENT: LazyLock<VersionReq> =
|
|
||||||
LazyLock::new(|| VersionReq::from_str("<=0.8.30").unwrap());
|
|
||||||
|
|
||||||
let version_or_requirement = version.into();
|
let version_or_requirement = version.into();
|
||||||
match version_or_requirement {
|
match version_or_requirement {
|
||||||
VersionOrRequirement::Version(version) => Ok(Self {
|
VersionOrRequirement::Version(version) => Ok(Self {
|
||||||
@@ -83,10 +79,7 @@ impl SolcDownloader {
|
|||||||
.builds
|
.builds
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|build| build.version)
|
.map(|build| build.version)
|
||||||
.filter(|version| {
|
.filter(|version| requirement.matches(version))
|
||||||
MAXIMUM_COMPILER_VERSION_REQUIREMENT.matches(version)
|
|
||||||
&& requirement.matches(version)
|
|
||||||
})
|
|
||||||
.max()
|
.max()
|
||||||
else {
|
else {
|
||||||
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
|
anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
|
||||||
|
|||||||
Submodule
+1
Submodule polkadot-sdk added at dc3d0e5ab7
+1
-1
Submodule resolc-compiler-tests updated: 7bc445491e...5dcd1cdb87
@@ -22,6 +22,7 @@ POLKADOT_SDK_DIR="${1:-}"
|
|||||||
# Binary paths (default to names in $PATH)
|
# Binary paths (default to names in $PATH)
|
||||||
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
REVIVE_DEV_NODE_BIN="revive-dev-node"
|
||||||
ETH_RPC_BIN="eth-rpc"
|
ETH_RPC_BIN="eth-rpc"
|
||||||
|
SUBSTRATE_NODE_BIN="substrate-node"
|
||||||
|
|
||||||
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}"
|
||||||
echo ""
|
echo ""
|
||||||
@@ -49,13 +50,14 @@ if [ -n "$POLKADOT_SDK_DIR" ]; then
|
|||||||
|
|
||||||
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node"
|
||||||
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc"
|
||||||
|
SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node"
|
||||||
|
|
||||||
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ]; then
|
if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then
|
||||||
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}"
|
||||||
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
(cd "$POLKADOT_SDK_DIR" && cargo build --release --package staging-node-cli --package pallet-revive-eth-rpc --package revive-dev-node)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN"; do
|
for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do
|
||||||
if [ ! -x "$bin" ]; then
|
if [ ! -x "$bin" ]; then
|
||||||
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
echo -e "${RED}Expected binary not found after build: $bin${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -82,6 +84,7 @@ RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
|
|||||||
--concurrency.number-of-threads 5 \
|
--concurrency.number-of-threads 5 \
|
||||||
--concurrency.number-of-concurrent-tasks 500 \
|
--concurrency.number-of-concurrent-tasks 500 \
|
||||||
--wallet.additional-keys 100000 \
|
--wallet.additional-keys 100000 \
|
||||||
|
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
|
||||||
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
|
||||||
--eth-rpc.path "$ETH_RPC_BIN" \
|
--eth-rpc.path "$ETH_RPC_BIN" \
|
||||||
> logs.log \
|
> logs.log \
|
||||||
@@ -1,246 +0,0 @@
|
|||||||
"""
|
|
||||||
Utilities to print benchmark metrics from a report JSON into CSV.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
python scripts/print_benchmark_metrics_csv.py /absolute/path/to/report.json
|
|
||||||
|
|
||||||
The script prints, for each metadata path, case index, and mode combination,
|
|
||||||
CSV rows aligned to mined blocks with the following columns:
|
|
||||||
- block_number
|
|
||||||
- number_of_txs
|
|
||||||
- tps (transaction_per_second)
|
|
||||||
- gps (gas_per_second)
|
|
||||||
- gas_block_fullness
|
|
||||||
- ref_time (if available)
|
|
||||||
- max_ref_time (if available)
|
|
||||||
- proof_size (if available)
|
|
||||||
- max_proof_size (if available)
|
|
||||||
- ref_time_block_fullness (if available)
|
|
||||||
- proof_size_block_fullness (if available)
|
|
||||||
|
|
||||||
Important nuance: TPS and GPS arrays have (number_of_blocks - 1) items. The
|
|
||||||
first block row has no TPS/GPS; the CSV leaves those cells empty for the first
|
|
||||||
row and aligns subsequent values to their corresponding next block.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import csv
|
|
||||||
from typing import List, Mapping, TypedDict
|
|
||||||
|
|
||||||
|
|
||||||
class EthereumMinedBlockInformation(TypedDict):
|
|
||||||
"""EVM block information extracted from the report.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
block_number: The block height.
|
|
||||||
block_timestamp: The UNIX timestamp of the block.
|
|
||||||
mined_gas: Total gas used (mined) in the block.
|
|
||||||
block_gas_limit: The gas limit of the block.
|
|
||||||
transaction_hashes: List of transaction hashes included in the block.
|
|
||||||
"""
|
|
||||||
|
|
||||||
block_number: int
|
|
||||||
block_timestamp: int
|
|
||||||
mined_gas: int
|
|
||||||
block_gas_limit: int
|
|
||||||
transaction_hashes: List[str]
|
|
||||||
|
|
||||||
|
|
||||||
class SubstrateMinedBlockInformation(TypedDict):
|
|
||||||
"""Substrate-specific block resource usage fields.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
ref_time: The consumed ref time in the block.
|
|
||||||
max_ref_time: The maximum ref time allowed for the block.
|
|
||||||
proof_size: The consumed proof size in the block.
|
|
||||||
max_proof_size: The maximum proof size allowed for the block.
|
|
||||||
"""
|
|
||||||
|
|
||||||
ref_time: int
|
|
||||||
max_ref_time: int
|
|
||||||
proof_size: int
|
|
||||||
max_proof_size: int
|
|
||||||
|
|
||||||
|
|
||||||
class MinedBlockInformation(TypedDict):
|
|
||||||
"""Block-level information for a mined block with both EVM and optional Substrate fields."""
|
|
||||||
|
|
||||||
ethereum_block_information: EthereumMinedBlockInformation
|
|
||||||
substrate_block_information: SubstrateMinedBlockInformation
|
|
||||||
|
|
||||||
|
|
||||||
class Metric(TypedDict):
|
|
||||||
"""Metric data of integer values keyed by platform identifier.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
minimum: Single scalar minimum per platform.
|
|
||||||
maximum: Single scalar maximum per platform.
|
|
||||||
mean: Single scalar mean per platform.
|
|
||||||
median: Single scalar median per platform.
|
|
||||||
raw: Time-series (or list) of values per platform.
|
|
||||||
"""
|
|
||||||
|
|
||||||
minimum: Mapping[str, int]
|
|
||||||
maximum: Mapping[str, int]
|
|
||||||
mean: Mapping[str, int]
|
|
||||||
median: Mapping[str, int]
|
|
||||||
raw: Mapping[str, List[int]]
|
|
||||||
|
|
||||||
|
|
||||||
class Metrics(TypedDict):
|
|
||||||
"""All metrics that may be present for a given execution report.
|
|
||||||
|
|
||||||
Note that some metrics are optional and present only for specific platforms
|
|
||||||
or execution modes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
transaction_per_second: Metric
|
|
||||||
gas_per_second: Metric
|
|
||||||
gas_block_fullness: Metric
|
|
||||||
ref_time_block_fullness: Metric
|
|
||||||
proof_size_block_fullness: Metric
|
|
||||||
|
|
||||||
|
|
||||||
class ExecutionReport(TypedDict):
|
|
||||||
"""Execution report for a mode containing mined blocks and metrics.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
mined_block_information: Mapping from platform identifier to the list of
|
|
||||||
mined blocks observed for that platform.
|
|
||||||
metrics: The computed metrics for the execution.
|
|
||||||
"""
|
|
||||||
|
|
||||||
mined_block_information: Mapping[str, List[MinedBlockInformation]]
|
|
||||||
metrics: Metrics
|
|
||||||
|
|
||||||
|
|
||||||
class CaseReport(TypedDict):
|
|
||||||
"""Report for a single case, keyed by mode string."""
|
|
||||||
|
|
||||||
mode_execution_reports: Mapping[str, ExecutionReport]
|
|
||||||
|
|
||||||
|
|
||||||
class MetadataFileReport(TypedDict):
|
|
||||||
"""Report subtree keyed by case indices for a metadata file path."""
|
|
||||||
|
|
||||||
case_reports: Mapping[str, CaseReport]
|
|
||||||
|
|
||||||
|
|
||||||
class ReportRoot(TypedDict):
|
|
||||||
"""Top-level report schema with execution information keyed by metadata path."""
|
|
||||||
|
|
||||||
execution_information: Mapping[str, MetadataFileReport]
|
|
||||||
|
|
||||||
|
|
||||||
BlockInformation = TypedDict(
|
|
||||||
"BlockInformation",
|
|
||||||
{
|
|
||||||
"Block Number": int,
|
|
||||||
"Timestamp": int,
|
|
||||||
"Datetime": None,
|
|
||||||
"Transaction Count": int,
|
|
||||||
"TPS": int | None,
|
|
||||||
"GPS": int | None,
|
|
||||||
"Ref Time": int,
|
|
||||||
"Max Ref Time": int,
|
|
||||||
"Block Fullness Ref Time": int,
|
|
||||||
"Proof Size": int,
|
|
||||||
"Max Proof Size": int,
|
|
||||||
"Block Fullness Proof Size": int,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
"""A typed dictionary used to hold all of the block information"""
|
|
||||||
|
|
||||||
|
|
||||||
def load_report(path: str) -> ReportRoot:
|
|
||||||
"""Load the report JSON from disk.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
path: Absolute or relative filesystem path to the JSON report file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The parsed report as a typed dictionary structure.
|
|
||||||
"""
|
|
||||||
|
|
||||||
with open(path, "r", encoding="utf-8") as f:
|
|
||||||
data: ReportRoot = json.load(f)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
report_path: str = sys.argv[1]
|
|
||||||
report: ReportRoot = load_report(report_path)
|
|
||||||
|
|
||||||
# TODO: Remove this in the future, but for now, the target is fixed.
|
|
||||||
target: str = "revive-dev-node-revm-solc"
|
|
||||||
|
|
||||||
csv_writer = csv.writer(sys.stdout)
|
|
||||||
|
|
||||||
for _, metadata_file_report in report["execution_information"].items():
|
|
||||||
for _, case_report in metadata_file_report["case_reports"].items():
|
|
||||||
for _, execution_report in case_report["mode_execution_reports"].items():
|
|
||||||
blocks_information: list[MinedBlockInformation] = execution_report[
|
|
||||||
"mined_block_information"
|
|
||||||
][target]
|
|
||||||
|
|
||||||
resolved_blocks: list[BlockInformation] = []
|
|
||||||
for i, block_information in enumerate(blocks_information):
|
|
||||||
resolved_blocks.append(
|
|
||||||
{
|
|
||||||
"Block Number": block_information[
|
|
||||||
"ethereum_block_information"
|
|
||||||
]["block_number"],
|
|
||||||
"Timestamp": block_information[
|
|
||||||
"ethereum_block_information"
|
|
||||||
]["block_timestamp"],
|
|
||||||
"Datetime": None,
|
|
||||||
"Transaction Count": len(
|
|
||||||
block_information["ethereum_block_information"][
|
|
||||||
"transaction_hashes"
|
|
||||||
]
|
|
||||||
),
|
|
||||||
"TPS": (
|
|
||||||
None
|
|
||||||
if i == 0
|
|
||||||
else execution_report["metrics"][
|
|
||||||
"transaction_per_second"
|
|
||||||
]["raw"][target][i - 1]
|
|
||||||
),
|
|
||||||
"GPS": (
|
|
||||||
None
|
|
||||||
if i == 0
|
|
||||||
else execution_report["metrics"]["gas_per_second"][
|
|
||||||
"raw"
|
|
||||||
][target][i - 1]
|
|
||||||
),
|
|
||||||
"Ref Time": block_information[
|
|
||||||
"substrate_block_information"
|
|
||||||
]["ref_time"],
|
|
||||||
"Max Ref Time": block_information[
|
|
||||||
"substrate_block_information"
|
|
||||||
]["max_ref_time"],
|
|
||||||
"Block Fullness Ref Time": execution_report["metrics"][
|
|
||||||
"ref_time_block_fullness"
|
|
||||||
]["raw"][target][i],
|
|
||||||
"Proof Size": block_information[
|
|
||||||
"substrate_block_information"
|
|
||||||
]["proof_size"],
|
|
||||||
"Max Proof Size": block_information[
|
|
||||||
"substrate_block_information"
|
|
||||||
]["max_proof_size"],
|
|
||||||
"Block Fullness Proof Size": execution_report["metrics"][
|
|
||||||
"proof_size_block_fullness"
|
|
||||||
]["raw"][target][i],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
csv_writer = csv.DictWriter(sys.stdout, resolved_blocks[0].keys())
|
|
||||||
csv_writer.writeheader()
|
|
||||||
csv_writer.writerows(resolved_blocks)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,226 +0,0 @@
|
|||||||
"""
|
|
||||||
This script is used to turn the JSON report produced by the revive differential tests tool into an
|
|
||||||
easy to consume markdown document for the purpose of reporting this information in the Polkadot SDK
|
|
||||||
CI. The full models used in the JSON report can be found in the revive differential tests repo and
|
|
||||||
the models used in this script are just a partial reproduction of the full report models.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import TypedDict, Literal, Union
|
|
||||||
|
|
||||||
import json, io
|
|
||||||
|
|
||||||
|
|
||||||
class Report(TypedDict):
|
|
||||||
context: "Context"
|
|
||||||
execution_information: dict[
|
|
||||||
"MetadataFilePathString",
|
|
||||||
dict["ModeString", dict["CaseIdxString", "CaseReport"]],
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class Context(TypedDict):
|
|
||||||
Test: "TestContext"
|
|
||||||
|
|
||||||
|
|
||||||
class TestContext(TypedDict):
|
|
||||||
corpus_configuration: "CorpusConfiguration"
|
|
||||||
|
|
||||||
|
|
||||||
class CorpusConfiguration(TypedDict):
|
|
||||||
test_specifiers: list["TestSpecifier"]
|
|
||||||
|
|
||||||
|
|
||||||
class CaseReport(TypedDict):
|
|
||||||
status: "CaseStatus"
|
|
||||||
|
|
||||||
|
|
||||||
class CaseStatusSuccess(TypedDict):
|
|
||||||
status: Literal["Succeeded"]
|
|
||||||
steps_executed: int
|
|
||||||
|
|
||||||
|
|
||||||
class CaseStatusFailure(TypedDict):
|
|
||||||
status: Literal["Failed"]
|
|
||||||
reason: str
|
|
||||||
|
|
||||||
|
|
||||||
class CaseStatusIgnored(TypedDict):
|
|
||||||
status: Literal["Ignored"]
|
|
||||||
reason: str
|
|
||||||
|
|
||||||
|
|
||||||
CaseStatus = Union[CaseStatusSuccess, CaseStatusFailure, CaseStatusIgnored]
|
|
||||||
"""A union type of all of the possible statuses that could be reported for a case."""
|
|
||||||
|
|
||||||
TestSpecifier = str
|
|
||||||
"""A test specifier string. For example resolc-compiler-tests/fixtures/solidity/test.json::0::Y+"""
|
|
||||||
|
|
||||||
ModeString = str
|
|
||||||
"""The mode string. For example Y+ >=0.8.13"""
|
|
||||||
|
|
||||||
MetadataFilePathString = str
|
|
||||||
"""The path to a metadata file. For example resolc-compiler-tests/fixtures/solidity/test.json"""
|
|
||||||
|
|
||||||
CaseIdxString = str
|
|
||||||
"""The index of a case as a string. For example '0'"""
|
|
||||||
|
|
||||||
|
|
||||||
def path_relative_to_resolc_compiler_test_directory(path: str) -> str:
|
|
||||||
"""
|
|
||||||
Given a path, this function returns the path relative to the resolc-compiler-test directory. The
|
|
||||||
following is an example of an input and an output:
|
|
||||||
|
|
||||||
Input: ~/polkadot-sdk/revive-differential-tests/resolc-compiler-tests/fixtures/solidity/test.json
|
|
||||||
Output: test.json
|
|
||||||
"""
|
|
||||||
|
|
||||||
return f"{path.split('resolc-compiler-tests/fixtures/solidity')[-1].strip('/')}"
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
with open("report.json", "r") as file:
|
|
||||||
report: Report = json.load(file)
|
|
||||||
|
|
||||||
# Starting the markdown document and adding information to it as we go.
|
|
||||||
markdown_document: io.TextIOWrapper = open("report.md", "w")
|
|
||||||
print("# Differential Tests Results", file=markdown_document)
|
|
||||||
|
|
||||||
# Getting all of the test specifiers from the report and making them relative to the tests dir.
|
|
||||||
test_specifiers: list[str] = list(
|
|
||||||
map(
|
|
||||||
path_relative_to_resolc_compiler_test_directory,
|
|
||||||
report["context"]["Test"]["corpus_configuration"]["test_specifiers"],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print("## Specified Tests", file=markdown_document)
|
|
||||||
for test_specifier in test_specifiers:
|
|
||||||
print(f"* `{test_specifier}`", file=markdown_document)
|
|
||||||
|
|
||||||
# Counting the total number of test cases, successes, failures, and ignored tests
|
|
||||||
total_number_of_cases: int = 0
|
|
||||||
total_number_of_successes: int = 0
|
|
||||||
total_number_of_failures: int = 0
|
|
||||||
total_number_of_ignores: int = 0
|
|
||||||
for _, mode_to_case_mapping in report["execution_information"].items():
|
|
||||||
for _, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
|
||||||
for _, case_report in case_idx_to_report_mapping.items():
|
|
||||||
status: CaseStatus = case_report["status"]
|
|
||||||
|
|
||||||
total_number_of_cases += 1
|
|
||||||
if status["status"] == "Succeeded":
|
|
||||||
total_number_of_successes += 1
|
|
||||||
elif status["status"] == "Failed":
|
|
||||||
total_number_of_failures += 1
|
|
||||||
elif status["status"] == "Ignored":
|
|
||||||
total_number_of_ignores += 1
|
|
||||||
else:
|
|
||||||
raise Exception(
|
|
||||||
f"Encountered a status that's unknown to the script: {status}"
|
|
||||||
)
|
|
||||||
|
|
||||||
print("## Counts", file=markdown_document)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Test Cases:** {total_number_of_cases}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Successes:** {total_number_of_successes}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Failures:** {total_number_of_failures}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"* **Total Number of Ignores:** {total_number_of_ignores}",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Grouping the various test cases into dictionaries and groups depending on their status to make
|
|
||||||
# them easier to include in the markdown document later on.
|
|
||||||
successful_cases: dict[
|
|
||||||
MetadataFilePathString, dict[CaseIdxString, set[ModeString]]
|
|
||||||
] = {}
|
|
||||||
for metadata_file_path, mode_to_case_mapping in report[
|
|
||||||
"execution_information"
|
|
||||||
].items():
|
|
||||||
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
|
||||||
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
|
||||||
status: CaseStatus = case_report["status"]
|
|
||||||
metadata_file_path: str = (
|
|
||||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
|
||||||
)
|
|
||||||
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
|
||||||
|
|
||||||
if status["status"] == "Succeeded":
|
|
||||||
successful_cases.setdefault(
|
|
||||||
metadata_file_path,
|
|
||||||
{},
|
|
||||||
).setdefault(
|
|
||||||
case_idx_string, set()
|
|
||||||
).add(mode_string)
|
|
||||||
|
|
||||||
print("## Failures", file=markdown_document)
|
|
||||||
print(
|
|
||||||
"The test specifiers seen in this section have the format 'path::case_idx::compilation_mode'\
|
|
||||||
and they're compatible with the revive differential tests framework and can be specified\
|
|
||||||
to it directly in the same way that they're provided through the `--test` argument of the\
|
|
||||||
framework.\n",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"The failures are provided in an expandable section to ensure that the PR does not get \
|
|
||||||
polluted with information. Please click on the section below for more information",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"<details><summary>Detailed Differential Tests Failure Information</summary>\n\n",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print("| Test Specifier | Failure Reason | Note |", file=markdown_document)
|
|
||||||
print("| -- | -- | -- |", file=markdown_document)
|
|
||||||
|
|
||||||
for metadata_file_path, mode_to_case_mapping in report[
|
|
||||||
"execution_information"
|
|
||||||
].items():
|
|
||||||
for mode_string, case_idx_to_report_mapping in mode_to_case_mapping.items():
|
|
||||||
for case_idx_string, case_report in case_idx_to_report_mapping.items():
|
|
||||||
status: CaseStatus = case_report["status"]
|
|
||||||
metadata_file_path: str = (
|
|
||||||
path_relative_to_resolc_compiler_test_directory(metadata_file_path)
|
|
||||||
)
|
|
||||||
mode_string: str = mode_string.replace(" M3", "+").replace(" M0", "-")
|
|
||||||
|
|
||||||
if status["status"] != "Failed":
|
|
||||||
continue
|
|
||||||
|
|
||||||
failure_reason: str = status["reason"].replace("\n", " ")
|
|
||||||
|
|
||||||
note: str = ""
|
|
||||||
modes_where_this_case_succeeded: set[ModeString] = (
|
|
||||||
successful_cases.setdefault(
|
|
||||||
metadata_file_path,
|
|
||||||
{},
|
|
||||||
).setdefault(case_idx_string, set())
|
|
||||||
)
|
|
||||||
if len(modes_where_this_case_succeeded) != 0:
|
|
||||||
note: str = (
|
|
||||||
f"This test case succeeded with other compilation modes: {modes_where_this_case_succeeded}"
|
|
||||||
)
|
|
||||||
|
|
||||||
test_specifier: str = (
|
|
||||||
f"{metadata_file_path}::{case_idx_string}::{mode_string}"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f"| `{test_specifier}` | `{failure_reason}` | {note} |",
|
|
||||||
file=markdown_document,
|
|
||||||
)
|
|
||||||
print("\n\n</details>", file=markdown_document)
|
|
||||||
|
|
||||||
# The primary downside of not using `with`, but I guess it's better since I don't want to over
|
|
||||||
# indent the code.
|
|
||||||
markdown_document.close()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
Reference in New Issue
Block a user