Compare commits

...

11 Commits

Author SHA1 Message Date
Omar Abdulla 05b609dd1b Increase substrate's timeout 2025-10-20 16:44:38 +03:00
Omar Abdulla 1b43b0dd3f Drop the read lock in the remaining tasks logger 2025-10-20 16:36:10 +03:00
Omar 260ac5d98e Add support for profiles (#192)
* Add support for profiles

* Set a default workdir for debug profile
2025-10-20 10:27:51 +00:00
Omar 94f116f843 Make tests a submodule of the repo (#191) 2025-10-16 00:11:06 +00:00
Omar 0d7a87a728 Get rid of corpus files (#190)
* Get rid of corpus files

* Update the readme
2025-10-15 22:40:09 +00:00
Omar 29bf5304ec User Managed Nodes (#189)
* Allow for genesis to be exported by the tool

* Allow for substrate-based nodes to be managed by the user

* Rename the commandline argument

* Rename the commandline argument

* Move existing rpc option to revive-dev-node

* Remove unneeded test

* Remove un-required function in cached compiler

* Change the default concurrency limit

* Update the default number of threads

* Update readme

* Remove accidentally comitted dir

* Update the readme

* Update the readme
2025-10-15 16:32:20 +00:00
Omar 491c23efb3 Remove the revive network (#188)
* Remove the revive network

* Add a provider method to the `EthereumNode`

* Report the ref time and proof size for substrate chains in block information

* Remove un-needed dependency
2025-10-14 13:50:36 +00:00
Omar 3c86cbb7ef Make output format deserializable (#187)
* Make output format deserializable

* Flush the buffer after writing the entire file output
2025-10-09 15:41:26 +00:00
Omar fde07b7c0d Allow for succeeding tests to be ignored (#186) 2025-10-09 14:35:09 +00:00
Omar ebc24a588b Add different output formats (#185)
* Add different output formats

* Add the mode to the output
2025-10-09 14:24:14 +00:00
Omar 21e25f09e6 Zombienet & Benchmarks Cleanups (#184)
* Minor zombienet cleanups

* Remove un-necessary trace call from the benchmark driver

* Improve the benchmarks driver

* Ignore the lighthouse tests

* Allow for the consensus to be specified for the revive dev node

* Ignore the zombienet tests for the time being
2025-10-09 11:41:01 +00:00
43 changed files with 2178 additions and 1882 deletions
-1
View File
@@ -9,7 +9,6 @@ node_modules
*.log
profile.json.gz
resolc-compiler-tests
workdir
!/schema.json
+3
View File
@@ -1,3 +1,6 @@
[submodule "polkadot-sdk"]
path = polkadot-sdk
url = https://github.com/paritytech/polkadot-sdk.git
[submodule "resolc-compiler-tests"]
path = resolc-compiler-tests
url = https://github.com/paritytech/resolc-compiler-tests
Generated
+217 -20
View File
@@ -782,6 +782,15 @@ dependencies = [
"libc",
]
[[package]]
name = "ansi_term"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi",
]
[[package]]
name = "anstream"
version = "0.6.18"
@@ -2328,7 +2337,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976"
dependencies = [
"data-encoding",
"syn 1.0.109",
"syn 2.0.101",
]
[[package]]
@@ -2927,6 +2936,22 @@ dependencies = [
"sp-crypto-hashing",
]
[[package]]
name = "frame-decode"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c470df86cf28818dd3cd2fc4667b80dbefe2236c722c3dc1d09e7c6c82d6dfcd"
dependencies = [
"frame-metadata",
"parity-scale-codec",
"scale-decode",
"scale-encode",
"scale-info",
"scale-type-resolver",
"sp-crypto-hashing",
"thiserror 2.0.12",
]
[[package]]
name = "frame-metadata"
version = "23.0.0"
@@ -5564,6 +5589,7 @@ dependencies = [
"clap",
"moka",
"once_cell",
"regex",
"schemars 1.0.4",
"semver 1.0.26",
"serde",
@@ -5602,6 +5628,7 @@ dependencies = [
"semver 1.0.26",
"serde",
"serde_json",
"serde_with",
"strum",
"temp-dir",
]
@@ -5611,6 +5638,7 @@ name = "revive-dt-core"
version = "0.1.0"
dependencies = [
"alloy",
"ansi_term",
"anyhow",
"bson",
"cacache",
@@ -5641,7 +5669,7 @@ dependencies = [
"alloy",
"anyhow",
"futures",
"regex",
"itertools 0.14.0",
"revive-common",
"revive-dt-common",
"schemars 1.0.4",
@@ -5670,6 +5698,7 @@ dependencies = [
"serde_yaml_ng",
"sp-core",
"sp-runtime",
"subxt 0.44.0",
"temp-dir",
"tokio",
"tower 0.5.2",
@@ -5942,7 +5971,7 @@ dependencies = [
"security-framework 3.3.0",
"security-framework-sys",
"webpki-root-certs 0.26.11",
"windows-sys 0.52.0",
"windows-sys 0.59.0",
]
[[package]]
@@ -7339,11 +7368,48 @@ dependencies = [
"serde",
"serde_json",
"sp-crypto-hashing",
"subxt-core",
"subxt-lightclient",
"subxt-macro",
"subxt-metadata",
"subxt-rpcs",
"subxt-core 0.43.0",
"subxt-lightclient 0.43.0",
"subxt-macro 0.43.0",
"subxt-metadata 0.43.0",
"subxt-rpcs 0.43.0",
"thiserror 2.0.12",
"tokio",
"tokio-util",
"tracing",
"url",
"wasm-bindgen-futures",
"web-time",
]
[[package]]
name = "subxt"
version = "0.44.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddbf938ac1d86a361a84709a71cdbae5d87f370770b563651d1ec052eed9d0b4"
dependencies = [
"async-trait",
"derive-where",
"either",
"frame-metadata",
"futures",
"hex",
"jsonrpsee",
"parity-scale-codec",
"primitive-types 0.13.1",
"scale-bits",
"scale-decode",
"scale-encode",
"scale-info",
"scale-value",
"serde",
"serde_json",
"sp-crypto-hashing",
"subxt-core 0.44.0",
"subxt-lightclient 0.44.0",
"subxt-macro 0.44.0",
"subxt-metadata 0.44.0",
"subxt-rpcs 0.44.0",
"thiserror 2.0.12",
"tokio",
"tokio-util",
@@ -7365,7 +7431,24 @@ dependencies = [
"quote",
"scale-info",
"scale-typegen",
"subxt-metadata",
"subxt-metadata 0.43.0",
"syn 2.0.101",
"thiserror 2.0.12",
]
[[package]]
name = "subxt-codegen"
version = "0.44.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c250ad8cd102d40ae47977b03295a2ff791375f30ddc7474d399fb56efb793b"
dependencies = [
"heck",
"parity-scale-codec",
"proc-macro2",
"quote",
"scale-info",
"scale-typegen",
"subxt-metadata 0.44.0",
"syn 2.0.101",
"thiserror 2.0.12",
]
@@ -7379,7 +7462,7 @@ dependencies = [
"base58",
"blake2",
"derive-where",
"frame-decode",
"frame-decode 0.8.3",
"frame-metadata",
"hashbrown 0.14.5",
"hex",
@@ -7395,7 +7478,37 @@ dependencies = [
"serde",
"serde_json",
"sp-crypto-hashing",
"subxt-metadata",
"subxt-metadata 0.43.0",
"thiserror 2.0.12",
"tracing",
]
[[package]]
name = "subxt-core"
version = "0.44.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5705c5b420294524e41349bf23c6b11aa474ce731de7317f4153390e1927f702"
dependencies = [
"base58",
"blake2",
"derive-where",
"frame-decode 0.9.0",
"frame-metadata",
"hashbrown 0.14.5",
"hex",
"impl-serde",
"keccak-hash",
"parity-scale-codec",
"primitive-types 0.13.1",
"scale-bits",
"scale-decode",
"scale-encode",
"scale-info",
"scale-value",
"serde",
"serde_json",
"sp-crypto-hashing",
"subxt-metadata 0.44.0",
"thiserror 2.0.12",
"tracing",
]
@@ -7417,6 +7530,23 @@ dependencies = [
"tracing",
]
[[package]]
name = "subxt-lightclient"
version = "0.44.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "64e02732a6c9ae46bc282c1a741b3d3e494021b3e87e7e92cfb3620116d92911"
dependencies = [
"futures",
"futures-util",
"serde",
"serde_json",
"smoldot-light",
"thiserror 2.0.12",
"tokio",
"tokio-stream",
"tracing",
]
[[package]]
name = "subxt-macro"
version = "0.43.0"
@@ -7428,9 +7558,26 @@ dependencies = [
"proc-macro-error2",
"quote",
"scale-typegen",
"subxt-codegen",
"subxt-metadata",
"subxt-utils-fetchmetadata",
"subxt-codegen 0.43.0",
"subxt-metadata 0.43.0",
"subxt-utils-fetchmetadata 0.43.0",
"syn 2.0.101",
]
[[package]]
name = "subxt-macro"
version = "0.44.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "501bf358698f5ab02a6199a1fcd3f1b482e2f5b6eb5d185411e6a74a175ec8e8"
dependencies = [
"darling 0.20.11",
"parity-scale-codec",
"proc-macro-error2",
"quote",
"scale-typegen",
"subxt-codegen 0.44.0",
"subxt-metadata 0.44.0",
"subxt-utils-fetchmetadata 0.44.0",
"syn 2.0.101",
]
@@ -7440,7 +7587,22 @@ version = "0.43.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c134068711c0c46906abc0e6e4911204420331530738e18ca903a5469364d9f"
dependencies = [
"frame-decode",
"frame-decode 0.8.3",
"frame-metadata",
"hashbrown 0.14.5",
"parity-scale-codec",
"scale-info",
"sp-crypto-hashing",
"thiserror 2.0.12",
]
[[package]]
name = "subxt-metadata"
version = "0.44.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01fb7c0bfafad78dda7084c6a2444444744af3bbf7b2502399198b9b4c20eddf"
dependencies = [
"frame-decode 0.9.0",
"frame-metadata",
"hashbrown 0.14.5",
"parity-scale-codec",
@@ -7465,8 +7627,32 @@ dependencies = [
"primitive-types 0.13.1",
"serde",
"serde_json",
"subxt-core",
"subxt-lightclient",
"subxt-core 0.43.0",
"subxt-lightclient 0.43.0",
"thiserror 2.0.12",
"tokio-util",
"tracing",
"url",
]
[[package]]
name = "subxt-rpcs"
version = "0.44.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab68a9c20ecedb0cb7d62d64f884e6add91bb70485783bf40aa8eac5c389c6e0"
dependencies = [
"derive-where",
"frame-metadata",
"futures",
"hex",
"impl-serde",
"jsonrpsee",
"parity-scale-codec",
"primitive-types 0.13.1",
"serde",
"serde_json",
"subxt-core 0.44.0",
"subxt-lightclient 0.44.0",
"thiserror 2.0.12",
"tokio-util",
"tracing",
@@ -7496,7 +7682,7 @@ dependencies = [
"serde_json",
"sha2 0.10.9",
"sp-crypto-hashing",
"subxt-core",
"subxt-core 0.43.0",
"thiserror 2.0.12",
"zeroize",
]
@@ -7512,6 +7698,17 @@ dependencies = [
"thiserror 2.0.12",
]
[[package]]
name = "subxt-utils-fetchmetadata"
version = "0.44.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e450f6812a653c5a3e63a079aa3b60a3f4c362722753c3222286eaa1800f9002"
dependencies = [
"hex",
"parity-scale-codec",
"thiserror 2.0.12",
]
[[package]]
name = "syn"
version = "1.0.109"
@@ -9305,7 +9502,7 @@ dependencies = [
"serde_json",
"sha2 0.10.9",
"sp-core",
"subxt",
"subxt 0.43.0",
"subxt-signer",
"thiserror 1.0.69",
"tokio",
@@ -9365,7 +9562,7 @@ dependencies = [
"async-trait",
"futures",
"lazy_static",
"subxt",
"subxt 0.43.0",
"subxt-signer",
"tokio",
"zombienet-configuration",
+4 -1
View File
@@ -22,6 +22,7 @@ revive-dt-node-pool = { version = "0.1.0", path = "crates/node-pool" }
revive-dt-report = { version = "0.1.0", path = "crates/report" }
revive-dt-solc-binaries = { version = "0.1.0", path = "crates/solc-binaries" }
ansi_term = "0.12.1"
anyhow = "1.0"
bson = { version = "2.15.0" }
cacache = { version = "13.1.0" }
@@ -49,6 +50,7 @@ sha2 = { version = "0.10.9" }
sp-core = "36.1.0"
sp-runtime = "41.1.0"
strum = { version = "0.27.2", features = ["derive"] }
subxt = { version = "0.44.0" }
temp-dir = { version = "0.1.16" }
tempfile = "3.3"
thiserror = "2"
@@ -67,13 +69,14 @@ tracing-subscriber = { version = "0.3.19", default-features = false, features =
"env-filter",
] }
indexmap = { version = "2.10.0", default-features = false }
itertools = { version = "0.14.0" }
# revive compiler
revive-solc-json-interface = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
revive-common = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
revive-differential = { git = "https://github.com/paritytech/revive", rev = "3389865af7c3ff6f29a586d82157e8bc573c1a8e" }
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev ="891f6554354ce466abd496366dbf8b4f82141241" }
zombienet-sdk = { git = "https://github.com/paritytech/zombienet-sdk.git", rev = "891f6554354ce466abd496366dbf8b4f82141241" }
[workspace.dependencies.alloy]
version = "1.0.37"
+51 -174
View File
@@ -52,192 +52,69 @@ All of the above need to be installed and available in the path in order for the
This tool is being updated quite frequently. Therefore, it's recommended that you don't install the tool and then run it, but rather that you run it from the root of the directory using `cargo run --release`. The help command of the tool gives you all of the information you need to know about each of the options and flags that the tool offers.
```bash
$ cargo run --release -- execute-tests --help
Error: Executes tests in the MatterLabs format differentially on multiple targets concurrently
Usage: retester execute-tests [OPTIONS]
Options:
-w, --working-directory <WORKING_DIRECTORY>
The working directory that the program will use for all of the temporary artifacts needed at runtime.
If not specified, then a temporary directory will be created and used by the program for all temporary artifacts.
[default: ]
-p, --platform <PLATFORMS>
The set of platforms that the differential tests should run on
[default: geth-evm-solc,revive-dev-node-polkavm-resolc]
Possible values:
- geth-evm-solc: The Go-ethereum reference full node EVM implementation with the solc compiler
- kitchensink-polkavm-resolc: The kitchensink node with the PolkaVM backend with the resolc compiler
- kitchensink-revm-solc: The kitchensink node with the REVM backend with the solc compiler
- revive-dev-node-polkavm-resolc: The revive dev node with the PolkaVM backend with the resolc compiler
- revive-dev-node-revm-solc: The revive dev node with the REVM backend with the solc compiler
-c, --corpus <CORPUS>
A list of test corpus JSON files to be tested
-h, --help
Print help (see a summary with '-h')
Solc Configuration:
--solc.version <VERSION>
Specifies the default version of the Solc compiler that should be used if there is no override specified by one of the test cases
[default: 0.8.29]
Resolc Configuration:
--resolc.path <resolc.path>
Specifies the path of the resolc compiler to be used by the tool.
If this is not specified, then the tool assumes that it should use the resolc binary that's provided in the user's $PATH.
[default: resolc]
Geth Configuration:
--geth.path <geth.path>
Specifies the path of the geth node to be used by the tool.
If this is not specified, then the tool assumes that it should use the geth binary that's provided in the user's $PATH.
[default: geth]
--geth.start-timeout-ms <geth.start-timeout-ms>
The amount of time to wait upon startup before considering that the node timed out
[default: 5000]
Kitchensink Configuration:
--kitchensink.path <kitchensink.path>
Specifies the path of the kitchensink node to be used by the tool.
If this is not specified, then the tool assumes that it should use the kitchensink binary that's provided in the user's $PATH.
[default: substrate-node]
--kitchensink.start-timeout-ms <kitchensink.start-timeout-ms>
The amount of time to wait upon startup before considering that the node timed out
[default: 5000]
--kitchensink.dont-use-dev-node
This configures the tool to use Kitchensink instead of using the revive-dev-node
Revive Dev Node Configuration:
--revive-dev-node.path <revive-dev-node.path>
Specifies the path of the revive dev node to be used by the tool.
If this is not specified, then the tool assumes that it should use the revive dev node binary that's provided in the user's $PATH.
[default: revive-dev-node]
--revive-dev-node.start-timeout-ms <revive-dev-node.start-timeout-ms>
The amount of time to wait upon startup before considering that the node timed out
[default: 5000]
Eth RPC Configuration:
--eth-rpc.path <eth-rpc.path>
Specifies the path of the ETH RPC to be used by the tool.
If this is not specified, then the tool assumes that it should use the ETH RPC binary that's provided in the user's $PATH.
[default: eth-rpc]
--eth-rpc.start-timeout-ms <eth-rpc.start-timeout-ms>
The amount of time to wait upon startup before considering that the node timed out
[default: 5000]
Genesis Configuration:
--genesis.path <genesis.path>
Specifies the path of the genesis file to use for the nodes that are started.
This is expected to be the path of a JSON geth genesis file.
Wallet Configuration:
--wallet.default-private-key <DEFAULT_KEY>
The private key of the default signer
[default: 0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d]
--wallet.additional-keys <ADDITIONAL_KEYS>
This argument controls which private keys the nodes should have access to and be added to its wallet signers. With a value of N, private keys (0, N] will be added to the signer set of the node
[default: 100000]
Concurrency Configuration:
--concurrency.number-of-nodes <NUMBER_OF_NODES>
Determines the amount of nodes that will be spawned for each chain
[default: 5]
--concurrency.number-of-threads <NUMBER_OF_THREADS>
Determines the amount of tokio worker threads that will will be used
[default: 16]
--concurrency.number-of-concurrent-tasks <NUMBER_CONCURRENT_TASKS>
Determines the amount of concurrent tasks that will be spawned to run tests.
Defaults to 10 x the number of nodes.
--concurrency.ignore-concurrency-limit
Determines if the concurrency limit should be ignored or not
Compilation Configuration:
--compilation.invalidate-cache
Controls if the compilation cache should be invalidated or not
Report Configuration:
--report.include-compiler-input
Controls if the compiler input is included in the final report
--report.include-compiler-output
Controls if the compiler output is included in the final report
```
To run tests with this tool you need a corpus JSON file that defines the tests included in the corpus. The simplest corpus file looks like the following:
```json
{
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"path": "resolc-compiler-tests/fixtures/solidity"
}
```
> [!NOTE]
> Note that the tests can be found in the [`resolc-compiler-tests`](https://github.com/paritytech/resolc-compiler-tests) repository.
The above corpus file instructs the tool to look for all of the test cases contained within all of the metadata files of the specified directory.
The simplest command to run this tool is the following:
```bash
RUST_LOG="info" cargo run --release -- execute-tests \
RUST_LOG="info" cargo run --release -- test \
--test ./resolc-compiler-tests/fixtures/solidity \
--platform geth-evm-solc \
--corpus corp.json \
--working-directory workdir \
--concurrency.number-of-nodes 5 \
--concurrency.ignore-concurrency-limit \
> logs.log \
2> output.log
```
The above command will run the tool executing every one of the tests discovered in the path specified in the corpus file. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
The above command will run the tool executing every one of the tests discovered in the path provided to the tool. All of the logs from the execution will be persisted in the `logs.log` file and all of the output of the tool will be persisted to the `output.log` file. If all that you're looking for is to run the tool and check which tests succeeded and failed, then the `output.log` file is what you need to be looking at. However, if you're contributing the to the tool then the `logs.log` file will be very valuable.
If you only want to run a subset of tests, then you can specify that in your corpus file. The following is an example:
<details>
<summary>User Managed Nodes</summary>
```json
{
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"paths": [
"path/to/a/single/metadata/file/I/want/to/run.json",
"path/to/a/directory/to/find/all/metadata/files/within"
]
}
This section describes how the user can make use of nodes that they manage rather than allowing the tool to spawn and manage the nodes on the user's behalf.
> ⚠️ This is an advanced feature of the tool and could lead test successes or failures to not be reproducible. Please use this feature with caution and only if you understand the implications of running your own node instead of having the framework manage your nodes. ⚠️
If you're an advanced user and you'd like to manage your own nodes instead of having the tool initialize, spawn, and manage them, then you can choose to run your own nodes and then provide them to the tool to make use of just like the following:
```bash
#!/usr/bin/env bash
set -euo pipefail
PLATFORM="revive-dev-node-revm-solc"
retester export-genesis "$PLATFORM" > chainspec.json
# Start revive-dev-node in a detached tmux session
tmux new-session -d -s revive-dev-node \
'RUST_LOG="error,evm=debug,sc_rpc_server=info,runtime::revive=debug" revive-dev-node \
--dev \
--chain chainspec.json \
--force-authoring \
--rpc-methods Unsafe \
--rpc-cors all \
--rpc-max-connections 4294967295 \
--pool-limit 4294967295 \
--pool-kbytes 4294967295'
sleep 5
# Start eth-rpc in a detached tmux session
tmux new-session -d -s eth-rpc \
'RUST_LOG="info,eth-rpc=debug" eth-rpc \
--dev \
--node-rpc-url ws://127.0.0.1:9944 \
--rpc-max-connections 4294967295'
sleep 5
# Run the tests (logs to files as before)
RUST_LOG="info" retester test \
--platform "$PLATFORM" \
--corpus ./revive-differential-tests/fixtures/solidity \
--working-directory ./workdir \
--concurrency.number-of-nodes 1 \
--concurrency.number-of-concurrent-tasks 5 \
--revive-dev-node.existing-rpc-url "http://localhost:8545" \
> logs.log
```
</details>
Binary file not shown.
+1
View File
@@ -14,6 +14,7 @@ anyhow = { workspace = true }
clap = { workspace = true }
moka = { workspace = true, features = ["sync"] }
once_cell = { workspace = true }
regex = { workspace = true }
semver = { workspace = true }
serde = { workspace = true }
schemars = { workspace = true }
+2
View File
@@ -1,11 +1,13 @@
mod identifiers;
mod mode;
mod parsed_test_specifier;
mod private_key_allocator;
mod round_robin_pool;
mod version_or_requirement;
pub use identifiers::*;
pub use mode::*;
pub use parsed_test_specifier::*;
pub use private_key_allocator::*;
pub use round_robin_pool::*;
pub use version_or_requirement::*;
+265
View File
@@ -1,6 +1,11 @@
use crate::iterators::EitherIter;
use crate::types::VersionOrRequirement;
use anyhow::{Context as _, bail};
use regex::Regex;
use schemars::JsonSchema;
use semver::Version;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
@@ -33,6 +38,19 @@ impl Display for Mode {
}
}
impl FromStr for Mode {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parsed_mode = ParsedMode::from_str(s)?;
let mut iter = parsed_mode.to_modes();
let (Some(mode), None) = (iter.next(), iter.next()) else {
bail!("Failed to parse the mode")
};
Ok(mode)
}
}
impl Mode {
/// Return all of the available mode combinations.
pub fn all() -> impl Iterator<Item = &'static Mode> {
@@ -171,3 +189,250 @@ impl ModeOptimizerSetting {
!matches!(self, ModeOptimizerSetting::M0)
}
}
/// This represents a mode that has been parsed from test metadata.
///
/// Mode strings can take the following form (in pseudo-regex):
///
/// ```text
/// [YEILV][+-]? (M[0123sz])? <semver>?
/// ```
///
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
#[serde(try_from = "String", into = "String")]
pub struct ParsedMode {
pub pipeline: Option<ModePipeline>,
pub optimize_flag: Option<bool>,
pub optimize_setting: Option<ModeOptimizerSetting>,
pub version: Option<semver::VersionReq>,
}
impl FromStr for ParsedMode {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?x)
^
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
\s*
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
\s*
(?P<version>[>=<^]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
$
").unwrap()
});
let Some(caps) = REGEX.captures(s) else {
anyhow::bail!("Cannot parse mode '{s}' from string");
};
let pipeline = match caps.name("pipeline") {
Some(m) => Some(
ModePipeline::from_str(m.as_str())
.context("Failed to parse mode pipeline from string")?,
),
None => None,
};
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
let optimize_setting = match caps.name("optimize_setting") {
Some(m) => Some(
ModeOptimizerSetting::from_str(m.as_str())
.context("Failed to parse optimizer setting from string")?,
),
None => None,
};
let version = match caps.name("version") {
Some(m) => Some(
semver::VersionReq::parse(m.as_str())
.map_err(|e| {
anyhow::anyhow!(
"Cannot parse the version requirement '{}': {e}",
m.as_str()
)
})
.context("Failed to parse semver requirement from mode string")?,
),
None => None,
};
Ok(ParsedMode {
pipeline,
optimize_flag,
optimize_setting,
version,
})
}
}
impl Display for ParsedMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut has_written = false;
if let Some(pipeline) = self.pipeline {
pipeline.fmt(f)?;
if let Some(optimize_flag) = self.optimize_flag {
f.write_str(if optimize_flag { "+" } else { "-" })?;
}
has_written = true;
}
if let Some(optimize_setting) = self.optimize_setting {
if has_written {
f.write_str(" ")?;
}
optimize_setting.fmt(f)?;
has_written = true;
}
if let Some(version) = &self.version {
if has_written {
f.write_str(" ")?;
}
version.fmt(f)?;
}
Ok(())
}
}
impl From<ParsedMode> for String {
fn from(parsed_mode: ParsedMode) -> Self {
parsed_mode.to_string()
}
}
impl TryFrom<String> for ParsedMode {
type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
ParsedMode::from_str(&value)
}
}
impl ParsedMode {
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|| EitherIter::A(ModePipeline::test_cases()),
|p| EitherIter::B(std::iter::once(*p)),
);
let optimize_flag_setting = self.optimize_flag.map(|flag| {
if flag {
ModeOptimizerSetting::M3
} else {
ModeOptimizerSetting::M0
}
});
let optimize_flag_iter = match optimize_flag_setting {
Some(setting) => EitherIter::A(std::iter::once(setting)),
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
};
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|| EitherIter::A(optimize_flag_iter),
|s| EitherIter::B(std::iter::once(*s)),
);
pipeline_iter.flat_map(move |pipeline| {
optimize_settings_iter
.clone()
.map(move |optimize_setting| Mode {
pipeline,
optimize_setting,
version: self.version.clone(),
})
})
}
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
/// This avoids any duplicate entries.
pub fn many_to_modes<'a>(
parsed: impl Iterator<Item = &'a ParsedMode>,
) -> impl Iterator<Item = Mode> {
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
modes.into_iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parsed_mode_from_str() {
let strings = vec![
("Mz", "Mz"),
("Y", "Y"),
("Y+", "Y+"),
("Y-", "Y-"),
("E", "E"),
("E+", "E+"),
("E-", "E-"),
("Y M0", "Y M0"),
("Y M1", "Y M1"),
("Y M2", "Y M2"),
("Y M3", "Y M3"),
("Y Ms", "Y Ms"),
("Y Mz", "Y Mz"),
("E M0", "E M0"),
("E M1", "E M1"),
("E M2", "E M2"),
("E M3", "E M3"),
("E Ms", "E Ms"),
("E Mz", "E Mz"),
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
("Y 0.8.0", "Y ^0.8.0"),
("E+ 0.8.0", "E+ ^0.8.0"),
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
("E Mz <0.7.0", "E Mz <0.7.0"),
// We can parse +- _and_ M1/M2 but the latter takes priority.
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
("E- M2 0.7.0", "E- M2 ^0.7.0"),
// We don't see this in the wild but it is parsed.
("<=0.8", "<=0.8"),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
assert_eq!(
expected,
parsed.to_string(),
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
);
}
}
#[test]
fn test_parsed_mode_to_test_modes() {
let strings = vec![
("Mz", vec!["Y Mz", "E Mz"]),
("Y", vec!["Y M0", "Y M3"]),
("E", vec!["E M0", "E M3"]),
("Y+", vec!["Y M3"]),
("Y-", vec!["Y M0"]),
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
(
"<=0.8",
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
assert_eq!(
expected_set, actual_set,
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
);
}
}
}
@@ -0,0 +1,133 @@
use std::{fmt::Display, path::PathBuf, str::FromStr};
use anyhow::{Context as _, bail};
use crate::types::Mode;
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum ParsedTestSpecifier {
/// All of the test cases in the file should be ran across all of the specified modes
FileOrDirectory {
/// The path of the metadata file containing the test cases.
metadata_or_directory_file_path: PathBuf,
},
/// Only a specific case within the metadata file should be ran across all of the modes in the
/// file.
Case {
/// The path of the metadata file containing the test cases.
metadata_file_path: PathBuf,
/// The index of the specific case to run.
case_idx: usize,
},
/// A specific case and a specific mode should be ran. This is the most specific out of all of
/// the specifier types.
CaseWithMode {
/// The path of the metadata file containing the test cases.
metadata_file_path: PathBuf,
/// The index of the specific case to run.
case_idx: usize,
/// The parsed mode that the test should be run in.
mode: Mode,
},
}
impl Display for ParsedTestSpecifier {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ParsedTestSpecifier::FileOrDirectory {
metadata_or_directory_file_path,
} => {
write!(f, "{}", metadata_or_directory_file_path.display())
}
ParsedTestSpecifier::Case {
metadata_file_path,
case_idx,
} => {
write!(f, "{}::{}", metadata_file_path.display(), case_idx)
}
ParsedTestSpecifier::CaseWithMode {
metadata_file_path,
case_idx,
mode,
} => {
write!(
f,
"{}::{}::{}",
metadata_file_path.display(),
case_idx,
mode
)
}
}
}
}
impl FromStr for ParsedTestSpecifier {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut split_iter = s.split("::");
let Some(path_string) = split_iter.next() else {
bail!("Could not find the path in the test specifier")
};
let path = PathBuf::from(path_string)
.canonicalize()
.context("Failed to canonicalize the path of the test")?;
let Some(case_idx_string) = split_iter.next() else {
return Ok(Self::FileOrDirectory {
metadata_or_directory_file_path: path,
});
};
let case_idx = usize::from_str(case_idx_string)
.context("Failed to parse the case idx of the test specifier from string")?;
// At this point the provided path must be a file.
if !path.is_file() {
bail!(
"Test specifier with a path and case idx must point to a file and not a directory"
)
}
let Some(mode_string) = split_iter.next() else {
return Ok(Self::Case {
metadata_file_path: path,
case_idx,
});
};
let mode = Mode::from_str(mode_string)
.context("Failed to parse the mode string in the parsed test specifier")?;
Ok(Self::CaseWithMode {
metadata_file_path: path,
case_idx,
mode,
})
}
}
impl From<ParsedTestSpecifier> for String {
fn from(value: ParsedTestSpecifier) -> Self {
value.to_string()
}
}
impl TryFrom<String> for ParsedTestSpecifier {
type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
value.parse()
}
}
impl TryFrom<&str> for ParsedTestSpecifier {
type Error = anyhow::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
value.parse()
}
}
@@ -7,7 +7,10 @@ pragma solidity >=0.6.9;
import "./callable.sol";
contract Main {
function main(uint[1] calldata p1, Callable callable) public returns(uint) {
function main(
uint[1] calldata p1,
Callable callable
) public pure returns (uint) {
return callable.f(p1);
}
}
+1
View File
@@ -18,6 +18,7 @@ semver = { workspace = true }
temp-dir = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_with = { workspace = true }
strum = { workspace = true }
[lints]
+379 -77
View File
@@ -12,19 +12,18 @@ use std::{
use alloy::{
genesis::Genesis,
hex::ToHexExt,
network::EthereumWallet,
primitives::{FixedBytes, U256},
primitives::{B256, FixedBytes, U256},
signers::local::PrivateKeySigner,
};
use clap::{Parser, ValueEnum, ValueHint};
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_common::types::{ParsedTestSpecifier, PlatformIdentifier};
use semver::Version;
use serde::{Serialize, Serializer};
use serde::{Deserialize, Serialize, Serializer};
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
use temp_dir::TempDir;
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
#[command(name = "retester")]
pub enum Context {
/// Executes tests in the MatterLabs format differentially on multiple targets concurrently.
@@ -35,6 +34,9 @@ pub enum Context {
/// Exports the JSON schema of the MatterLabs test format used by the tool.
ExportJsonSchema,
/// Exports the genesis file of the desired platform.
ExportGenesis(Box<ExportGenesisContext>),
}
impl Context {
@@ -45,6 +47,15 @@ impl Context {
pub fn report_configuration(&self) -> &ReportConfiguration {
self.as_ref()
}
pub fn update_for_profile(&mut self) {
match self {
Context::Test(ctx) => ctx.update_for_profile(),
Context::Benchmark(ctx) => ctx.update_for_profile(),
Context::ExportJsonSchema => {}
Context::ExportGenesis(..) => {}
}
}
}
impl AsRef<WorkingDirectoryConfiguration> for Context {
@@ -52,7 +63,7 @@ impl AsRef<WorkingDirectoryConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
}
}
}
@@ -62,7 +73,7 @@ impl AsRef<CorpusConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
}
}
}
@@ -72,7 +83,7 @@ impl AsRef<SolcConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
}
}
}
@@ -82,7 +93,7 @@ impl AsRef<ResolcConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
}
}
}
@@ -92,6 +103,7 @@ impl AsRef<GethConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportGenesis(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -102,6 +114,7 @@ impl AsRef<KurtosisConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportGenesis(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -112,6 +125,7 @@ impl AsRef<PolkadotParachainConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportGenesis(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -122,6 +136,7 @@ impl AsRef<KitchensinkConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportGenesis(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -132,6 +147,7 @@ impl AsRef<ReviveDevNodeConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportGenesis(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -142,7 +158,7 @@ impl AsRef<EthRpcConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
}
}
}
@@ -151,7 +167,7 @@ impl AsRef<GenesisConfiguration> for Context {
fn as_ref(&self) -> &GenesisConfiguration {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(..) => {
Self::Benchmark(..) | Self::ExportGenesis(..) => {
static GENESIS: LazyLock<GenesisConfiguration> = LazyLock::new(Default::default);
&GENESIS
}
@@ -165,6 +181,7 @@ impl AsRef<WalletConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportGenesis(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
@@ -175,7 +192,7 @@ impl AsRef<ConcurrencyConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
}
}
}
@@ -185,7 +202,7 @@ impl AsRef<CompilationConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
}
}
}
@@ -195,13 +212,40 @@ impl AsRef<ReportConfiguration> for Context {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
}
}
}
#[derive(Clone, Debug, Parser, Serialize)]
impl AsRef<IgnoreSuccessConfiguration> for Context {
fn as_ref(&self) -> &IgnoreSuccessConfiguration {
match self {
Self::Test(context) => context.as_ref().as_ref(),
Self::Benchmark(..) => unreachable!(),
Self::ExportJsonSchema | Self::ExportGenesis(..) => unreachable!(),
}
}
}
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct TestExecutionContext {
/// The commandline profile to use. Different profiles change the defaults of the various cli
/// arguments.
#[arg(long = "profile", default_value_t = Profile::Default)]
pub profile: Profile,
/// The set of platforms that the differential tests should run on.
#[arg(
short = 'p',
long = "platform",
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
)]
pub platforms: Vec<PlatformIdentifier>,
/// The output format to use for the tool's output.
#[arg(short, long, default_value_t = OutputFormat::CargoTestLike)]
pub output_format: OutputFormat,
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
///
@@ -215,14 +259,6 @@ pub struct TestExecutionContext {
)]
pub working_directory: WorkingDirectoryConfiguration,
/// The set of platforms that the differential tests should run on.
#[arg(
short = 'p',
long = "platform",
default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"]
)]
pub platforms: Vec<PlatformIdentifier>,
/// Configuration parameters for the corpus files to use.
#[clap(flatten, next_help_heading = "Corpus Configuration")]
pub corpus_configuration: CorpusConfiguration,
@@ -278,10 +314,55 @@ pub struct TestExecutionContext {
/// Configuration parameters for the report.
#[clap(flatten, next_help_heading = "Report Configuration")]
pub report_configuration: ReportConfiguration,
/// Configuration parameters for ignoring certain test cases based on the report
#[clap(flatten, next_help_heading = "Ignore Success Configuration")]
pub ignore_success_configuration: IgnoreSuccessConfiguration,
}
#[derive(Clone, Debug, Parser, Serialize)]
impl TestExecutionContext {
pub fn update_for_profile(&mut self) {
match self.profile {
Profile::Default => {}
Profile::Debug => {
let default_concurrency_config =
ConcurrencyConfiguration::parse_from(["concurrency-configuration"]);
let working_directory_config = WorkingDirectoryConfiguration::default();
if self.concurrency_configuration.number_of_nodes
== default_concurrency_config.number_of_nodes
{
self.concurrency_configuration.number_of_nodes = 1;
}
if self.concurrency_configuration.number_of_threads
== default_concurrency_config.number_of_threads
{
self.concurrency_configuration.number_of_threads = 5;
}
if self.concurrency_configuration.number_concurrent_tasks
== default_concurrency_config.number_concurrent_tasks
{
self.concurrency_configuration.number_concurrent_tasks = 1;
}
if working_directory_config == self.working_directory {
let home_directory =
PathBuf::from(std::env::var("HOME").expect("Home dir not found"));
let working_directory = home_directory.join(".retester-workdir");
self.working_directory = WorkingDirectoryConfiguration::Path(working_directory)
}
}
}
}
}
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct BenchmarkingContext {
/// The commandline profile to use. Different profiles change the defaults of the various cli
/// arguments.
#[arg(long = "profile", default_value_t = Profile::Default)]
pub profile: Profile,
/// The working directory that the program will use for all of the temporary artifacts needed at
/// runtime.
///
@@ -361,6 +442,72 @@ pub struct BenchmarkingContext {
pub report_configuration: ReportConfiguration,
}
impl BenchmarkingContext {
pub fn update_for_profile(&mut self) {
match self.profile {
Profile::Default => {}
Profile::Debug => {
let default_concurrency_config =
ConcurrencyConfiguration::parse_from(["concurrency-configuration"]);
let working_directory_config = WorkingDirectoryConfiguration::default();
if self.concurrency_configuration.number_of_nodes
== default_concurrency_config.number_of_nodes
{
self.concurrency_configuration.number_of_nodes = 1;
}
if self.concurrency_configuration.number_of_threads
== default_concurrency_config.number_of_threads
{
self.concurrency_configuration.number_of_threads = 5;
}
if self.concurrency_configuration.number_concurrent_tasks
== default_concurrency_config.number_concurrent_tasks
{
self.concurrency_configuration.number_concurrent_tasks = 1;
}
if working_directory_config == self.working_directory {
let home_directory =
PathBuf::from(std::env::var("HOME").expect("Home dir not found"));
let working_directory = home_directory.join(".retester-workdir");
self.working_directory = WorkingDirectoryConfiguration::Path(working_directory)
}
}
}
}
}
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct ExportGenesisContext {
/// The platform of choice to export the genesis for.
pub platform: PlatformIdentifier,
/// Configuration parameters for the geth node.
#[clap(flatten, next_help_heading = "Geth Configuration")]
pub geth_configuration: GethConfiguration,
/// Configuration parameters for the lighthouse node.
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
pub lighthouse_configuration: KurtosisConfiguration,
/// Configuration parameters for the Kitchensink.
#[clap(flatten, next_help_heading = "Kitchensink Configuration")]
pub kitchensink_configuration: KitchensinkConfiguration,
/// Configuration parameters for the Polkadot Parachain.
#[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")]
pub polkadot_parachain_configuration: PolkadotParachainConfiguration,
/// Configuration parameters for the Revive Dev Node.
#[clap(flatten, next_help_heading = "Revive Dev Node Configuration")]
pub revive_dev_node_configuration: ReviveDevNodeConfiguration,
/// Configuration parameters for the wallet.
#[clap(flatten, next_help_heading = "Wallet Configuration")]
pub wallet_configuration: WalletConfiguration,
}
impl Default for TestExecutionContext {
fn default() -> Self {
Self::parse_from(["execution-context"])
@@ -457,9 +604,15 @@ impl AsRef<ReportConfiguration> for TestExecutionContext {
}
}
impl AsRef<IgnoreSuccessConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &IgnoreSuccessConfiguration {
&self.ignore_success_configuration
}
}
impl Default for BenchmarkingContext {
fn default() -> Self {
Self::parse_from(["execution-context"])
Self::parse_from(["benchmarking-context"])
}
}
@@ -547,16 +700,71 @@ impl AsRef<ReportConfiguration> for BenchmarkingContext {
}
}
impl Default for ExportGenesisContext {
fn default() -> Self {
Self::parse_from(["export-genesis-context"])
}
}
impl AsRef<GethConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &GethConfiguration {
&self.geth_configuration
}
}
impl AsRef<KurtosisConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &KurtosisConfiguration {
&self.lighthouse_configuration
}
}
impl AsRef<KitchensinkConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &KitchensinkConfiguration {
&self.kitchensink_configuration
}
}
impl AsRef<PolkadotParachainConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &PolkadotParachainConfiguration {
&self.polkadot_parachain_configuration
}
}
impl AsRef<ReviveDevNodeConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &ReviveDevNodeConfiguration {
&self.revive_dev_node_configuration
}
}
impl AsRef<WalletConfiguration> for ExportGenesisContext {
fn as_ref(&self) -> &WalletConfiguration {
&self.wallet_configuration
}
}
/// A set of configuration parameters for the corpus files to use for the execution.
#[derive(Clone, Debug, Parser, Serialize)]
#[serde_with::serde_as]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct CorpusConfiguration {
/// A list of test corpus JSON files to be tested.
#[arg(short = 'c', long = "corpus")]
pub paths: Vec<PathBuf>,
/// A list of test specifiers for the tests that the tool should run.
///
/// Test specifiers follow the following format:
///
/// - `{directory_path|metadata_file_path}`: A path to a metadata file where all of the cases
/// live and should be run. Alternatively, it points to a directory instructing the framework
/// to discover of the metadata files that live there an execute them.
/// - `{metadata_file_path}::{case_idx}`: The path to a metadata file and then a case idx
/// separated by two colons. This specifies that only this specific test case within the
/// metadata file should be executed.
/// - `{metadata_file_path}::{case_idx}::{mode}`: This is very similar to the above specifier
/// with the exception that in this case the mode is specified and will be used in the test.
#[serde_as(as = "Vec<serde_with::DisplayFromStr>")]
#[arg(short = 't', long = "test")]
pub test_specifiers: Vec<ParsedTestSpecifier>,
}
/// A set of configuration parameters for Solc.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct SolcConfiguration {
/// Specifies the default version of the Solc compiler that should be used if there is no
/// override specified by one of the test cases.
@@ -565,7 +773,7 @@ pub struct SolcConfiguration {
}
/// A set of configuration parameters for Resolc.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct ResolcConfiguration {
/// Specifies the path of the resolc compiler to be used by the tool.
///
@@ -576,7 +784,7 @@ pub struct ResolcConfiguration {
}
/// A set of configuration parameters for Polkadot Parachain.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct PolkadotParachainConfiguration {
/// Specifies the path of the polkadot-parachain node to be used by the tool.
///
@@ -600,7 +808,7 @@ pub struct PolkadotParachainConfiguration {
}
/// A set of configuration parameters for Geth.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct GethConfiguration {
/// Specifies the path of the geth node to be used by the tool.
///
@@ -620,7 +828,7 @@ pub struct GethConfiguration {
}
/// A set of configuration parameters for kurtosis.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct KurtosisConfiguration {
/// Specifies the path of the kurtosis node to be used by the tool.
///
@@ -635,7 +843,7 @@ pub struct KurtosisConfiguration {
}
/// A set of configuration parameters for Kitchensink.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct KitchensinkConfiguration {
/// Specifies the path of the kitchensink node to be used by the tool.
///
@@ -659,7 +867,7 @@ pub struct KitchensinkConfiguration {
}
/// A set of configuration parameters for the revive dev node.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool.
///
@@ -680,10 +888,36 @@ pub struct ReviveDevNodeConfiguration {
value_parser = parse_duration
)]
pub start_timeout_ms: Duration,
/// The consensus to use for the spawned revive-dev-node.
#[clap(
id = "revive-dev-node.consensus",
long = "revive-dev-node.consensus",
default_value = "instant-seal"
)]
pub consensus: String,
/// Specifies the connection string of an existing node that's not managed by the framework.
///
/// If this argument is specified then the framework will not spawn certain nodes itself but
/// rather it will opt to using the existing node's through their provided connection strings.
///
/// This means that if `ConcurrencyConfiguration.number_of_nodes` is 10 and we only specify the
/// connection strings of 2 nodes here, then nodes 0 and 1 will use the provided connection
/// strings and nodes 2 through 10 (exclusive) will all be spawned and managed by the framework.
///
/// Thus, if you want all of the transactions and tests to happen against the node that you
/// spawned and manage then you need to specify a `ConcurrencyConfiguration.number_of_nodes` of
/// 1.
#[clap(
id = "revive-dev-node.existing-rpc-url",
long = "revive-dev-node.existing-rpc-url"
)]
pub existing_rpc_url: Vec<String>,
}
/// A set of configuration parameters for the ETH RPC.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct EthRpcConfiguration {
/// Specifies the path of the ETH RPC to be used by the tool.
///
@@ -703,7 +937,7 @@ pub struct EthRpcConfiguration {
}
/// A set of configuration parameters for the genesis.
#[derive(Clone, Debug, Default, Parser, Serialize)]
#[derive(Clone, Debug, Default, Parser, Serialize, Deserialize)]
pub struct GenesisConfiguration {
/// Specifies the path of the genesis file to use for the nodes that are started.
///
@@ -741,15 +975,14 @@ impl GenesisConfiguration {
}
/// A set of configuration parameters for the wallet.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct WalletConfiguration {
/// The private key of the default signer.
#[clap(
long = "wallet.default-private-key",
default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d"
)]
#[serde(serialize_with = "serialize_private_key")]
default_key: PrivateKeySigner,
default_key: B256,
/// This argument controls which private keys the nodes should have access to and be added to
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
@@ -767,7 +1000,8 @@ impl WalletConfiguration {
pub fn wallet(&self) -> Arc<EthereumWallet> {
self.wallet
.get_or_init(|| {
let mut wallet = EthereumWallet::new(self.default_key.clone());
let mut wallet =
EthereumWallet::new(PrivateKeySigner::from_bytes(&self.default_key).unwrap());
for signer in (1..=self.additional_keys)
.map(|id| U256::from(id))
.map(|id| id.to_be_bytes::<32>())
@@ -785,15 +1019,8 @@ impl WalletConfiguration {
}
}
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
value.to_bytes().encode_hex().serialize(serializer)
}
/// A set of configuration for concurrency.
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct ConcurrencyConfiguration {
/// Determines the amount of nodes that will be spawned for each chain.
#[clap(long = "concurrency.number-of-nodes", default_value_t = 5)]
@@ -803,42 +1030,38 @@ pub struct ConcurrencyConfiguration {
#[arg(
long = "concurrency.number-of-threads",
default_value_t = std::thread::available_parallelism()
.map(|n| n.get())
.map(|n| n.get() * 4 / 6)
.unwrap_or(1)
)]
pub number_of_threads: usize,
/// Determines the amount of concurrent tasks that will be spawned to run tests.
/// Determines the amount of concurrent tasks that will be spawned to run tests. This means that
/// at any given time there is `concurrency.number-of-concurrent-tasks` tests concurrently
/// executing.
///
/// Defaults to 10 x the number of nodes.
#[arg(long = "concurrency.number-of-concurrent-tasks")]
number_concurrent_tasks: Option<usize>,
/// Determines if the concurrency limit should be ignored or not.
#[arg(long = "concurrency.ignore-concurrency-limit")]
ignore_concurrency_limit: bool,
/// Note that a task limit of `0` means no limit on the number of concurrent tasks.
#[arg(long = "concurrency.number-of-concurrent-tasks", default_value_t = 500)]
number_concurrent_tasks: usize,
}
impl ConcurrencyConfiguration {
pub fn concurrency_limit(&self) -> Option<usize> {
match self.ignore_concurrency_limit {
true => None,
false => Some(
self.number_concurrent_tasks
.unwrap_or(20 * self.number_of_nodes),
),
if self.number_concurrent_tasks == 0 {
None
} else {
Some(self.number_concurrent_tasks)
}
}
}
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct CompilationConfiguration {
/// Controls if the compilation cache should be invalidated or not.
#[arg(long = "compilation.invalidate-cache")]
pub invalidate_compilation_cache: bool,
}
#[derive(Clone, Debug, Parser, Serialize)]
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct ReportConfiguration {
/// Controls if the compiler input is included in the final report.
#[clap(long = "report.include-compiler-input")]
@@ -849,8 +1072,15 @@ pub struct ReportConfiguration {
pub include_compiler_output: bool,
}
#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
pub struct IgnoreSuccessConfiguration {
/// The path of the report generated by the tool to use to ignore the cases that succeeded.
#[clap(long = "ignore-success.report-path")]
pub path: Option<PathBuf>,
}
/// Represents the working directory that the program uses.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum WorkingDirectoryConfiguration {
/// A temporary directory is used as the working directory. This will be removed when dropped.
TemporaryDirectory(Arc<TempDir>),
@@ -858,6 +1088,24 @@ pub enum WorkingDirectoryConfiguration {
Path(PathBuf),
}
impl Serialize for WorkingDirectoryConfiguration {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.as_path().serialize(serializer)
}
}
impl<'a> Deserialize<'a> for WorkingDirectoryConfiguration {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'a>,
{
PathBuf::deserialize(deserializer).map(Self::Path)
}
}
impl WorkingDirectoryConfiguration {
pub fn as_path(&self) -> &Path {
self.as_ref()
@@ -907,15 +1155,6 @@ impl Display for WorkingDirectoryConfiguration {
}
}
impl Serialize for WorkingDirectoryConfiguration {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.as_path().serialize(serializer)
}
}
fn parse_duration(s: &str) -> anyhow::Result<Duration> {
u64::from_str(s)
.map(Duration::from_millis)
@@ -950,3 +1189,66 @@ pub enum TestingPlatform {
/// A polkadot/Substrate based network
Zombienet,
}
/// The output format to use for the test execution output.
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
ValueEnum,
EnumString,
Display,
AsRefStr,
IntoStaticStr,
)]
#[strum(serialize_all = "kebab-case")]
pub enum OutputFormat {
/// The legacy format that was used in the past for the output.
Legacy,
/// An output format that looks heavily resembles the output from `cargo test`.
CargoTestLike,
}
/// Command line profiles used to override the default values provided for the commands.
#[derive(
Clone,
Copy,
Debug,
Default,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
ValueEnum,
EnumString,
Display,
AsRefStr,
IntoStaticStr,
)]
#[strum(serialize_all = "kebab-case")]
pub enum Profile {
/// The default profile used by the framework. This profile is optimized to make the test
/// and workload execution happen as fast as possible.
#[default]
Default,
/// A debug profile optimized for use cases when certain tests are being debugged. This profile
/// sets up the framework with the following:
///
/// * `concurrency.number-of-nodes` set to 1 node.
/// * `concurrency.number-of-concurrent-tasks` set to 1 such that tests execute sequentially.
/// * `concurrency.number-of-threads` set to 5.
/// * `working-directory` set to ~/.retester-workdir
Debug,
}
+1
View File
@@ -21,6 +21,7 @@ revive-dt-node = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-report = { workspace = true }
ansi_term = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
bson = { workspace = true }
+57 -116
View File
@@ -22,6 +22,7 @@ use alloy::{
},
};
use anyhow::{Context as _, Result, bail};
use futures::TryFutureExt;
use indexmap::IndexMap;
use revive_dt_common::{
futures::{PollingWaitBehavior, poll},
@@ -30,12 +31,12 @@ use revive_dt_common::{
use revive_dt_format::{
metadata::{ContractInstance, ContractPathAndIdent},
steps::{
AllocateAccountStep, BalanceAssertionStep, Calldata, EtherValue, FunctionCallStep, Method,
RepeatStep, Step, StepAddress, StepIdx, StepPath, StorageEmptyAssertionStep,
AllocateAccountStep, Calldata, EtherValue, FunctionCallStep, Method, RepeatStep, Step,
StepIdx, StepPath,
},
traits::{ResolutionContext, ResolverApi},
};
use tokio::sync::{Mutex, mpsc::UnboundedSender};
use tokio::sync::{Mutex, OnceCell, mpsc::UnboundedSender};
use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument};
use crate::{
@@ -123,13 +124,7 @@ where
&self.platform_information.reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Pre-linking compilation failed"
)
})
.inspect_err(|err| error!(?err, "Pre-linking compilation failed"))
.context("Failed to produce the pre-linking compiled contracts")?;
let mut deployed_libraries = None::<HashMap<_, _>>;
@@ -137,13 +132,7 @@ where
.test_definition
.metadata
.contract_sources()
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Failed to retrieve contract sources from metadata"
)
})
.inspect_err(|err| error!(?err, "Failed to retrieve contract sources from metadata"))
.context("Failed to get the contract instances from the metadata file")?;
for library_instance in self
.test_definition
@@ -191,20 +180,19 @@ where
TransactionRequest::default().from(deployer_address),
code,
);
let receipt = self.execute_transaction(tx).await.inspect_err(|err| {
error!(
?err,
%library_instance,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Failed to deploy the library"
)
})?;
let receipt = self
.execute_transaction(tx)
.and_then(|(_, receipt_fut)| receipt_fut)
.await
.inspect_err(|err| {
error!(
?err,
%library_instance,
"Failed to deploy the library"
)
})?;
debug!(
?library_instance,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Deployed library"
);
debug!(?library_instance, "Deployed library");
let library_address = receipt
.contract_address
@@ -227,13 +215,7 @@ where
&self.platform_information.reporter,
)
.await
.inspect_err(|err| {
error!(
?err,
platform_identifier = %self.platform_information.platform.platform_identifier(),
"Post-linking compilation failed"
)
})
.inspect_err(|err| error!(?err, "Post-linking compilation failed"))
.context("Failed to compile the post-link contracts")?;
self.execution_state = ExecutionState::new(
@@ -269,7 +251,6 @@ where
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%step_path,
),
err(Debug),
@@ -305,15 +286,11 @@ where
.handle_function_call_contract_deployment(step)
.await
.context("Failed to deploy contracts for the function call step")?;
let execution_receipt = self
let transaction_hash = self
.handle_function_call_execution(step, deployment_receipts)
.await
.context("Failed to handle the function call execution")?;
let tracing_result = self
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
.await
.context("Failed to handle the function call call frame tracing")?;
self.handle_function_call_variable_assignment(step, &tracing_result)
self.handle_function_call_variable_assignment(step, transaction_hash)
.await
.context("Failed to handle function call variable assignment")?;
Ok(1)
@@ -367,18 +344,19 @@ where
&mut self,
step: &FunctionCallStep,
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
) -> Result<TransactionReceipt> {
) -> Result<TxHash> {
match step.method {
// This step was already executed when `handle_step` was called. We just need to
// lookup the transaction receipt in this case and continue on.
Method::Deployer => deployment_receipts
.remove(&step.instance)
.context("Failed to find deployment receipt for constructor call"),
.context("Failed to find deployment receipt for constructor call")
.map(|receipt| receipt.transaction_hash),
Method::Fallback | Method::FunctionName(_) => {
let tx = step
.as_transaction(self.resolver.as_ref(), self.default_resolution_context())
.await?;
self.execute_transaction(tx).await
Ok(self.execute_transaction(tx).await?.0)
}
}
}
@@ -417,15 +395,19 @@ where
async fn handle_function_call_variable_assignment(
&mut self,
step: &FunctionCallStep,
tracing_result: &CallFrame,
tx_hash: TxHash,
) -> Result<()> {
let Some(ref assignments) = step.variable_assignments else {
return Ok(());
};
// Handling the return data variable assignments.
let callframe = OnceCell::new();
for (variable_name, output_word) in assignments.return_data.iter().zip(
tracing_result
callframe
.get_or_try_init(|| self.handle_function_call_call_frame_tracing(tx_hash))
.await
.context("Failed to get the callframe trace for transaction")?
.output
.as_ref()
.unwrap_or_default()
@@ -446,26 +428,6 @@ where
Ok(())
}
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))]
pub async fn execute_balance_assertion(
&mut self,
_: &StepPath,
_: &BalanceAssertionStep,
) -> anyhow::Result<usize> {
// Kept empty intentionally for the benchmark driver.
Ok(1)
}
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
async fn execute_storage_empty_assertion_step(
&mut self,
_: &StepPath,
_: &StorageEmptyAssertionStep,
) -> Result<usize> {
// Kept empty intentionally for the benchmark driver.
Ok(1)
}
#[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))]
async fn execute_repeat_step(
&mut self,
@@ -547,7 +509,6 @@ where
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
@@ -590,7 +551,6 @@ where
skip_all,
fields(
driver_id = self.driver_id,
platform_identifier = %self.platform_information.platform.platform_identifier(),
%contract_instance,
%deployer
),
@@ -660,7 +620,11 @@ where
TransactionBuilder::<Ethereum>::with_deploy_code(tx, code)
};
let receipt = match self.execute_transaction(tx).await {
let receipt = match self
.execute_transaction(tx)
.and_then(|(_, receipt_fut)| receipt_fut)
.await
{
Ok(receipt) => receipt,
Err(error) => {
tracing::error!(?error, "Contract deployment transaction failed.");
@@ -687,33 +651,6 @@ where
Ok((address, abi, receipt))
}
#[instrument(level = "info", fields(driver_id = self.driver_id), skip_all)]
async fn step_address_auto_deployment(
&mut self,
step_address: &StepAddress,
) -> Result<Address> {
match step_address {
StepAddress::Address(address) => Ok(*address),
StepAddress::ResolvableAddress(resolvable) => {
let Some(instance) = resolvable
.strip_suffix(".address")
.map(ContractInstance::new)
else {
bail!("Not an address variable");
};
self.get_or_deploy_contract_instance(
&instance,
FunctionCallStep::default_caller_address(),
None,
None,
)
.await
.map(|v| v.0)
}
}
}
// endregion:Contract Deployment
// region:Resolution & Resolver
@@ -734,7 +671,7 @@ where
async fn execute_transaction(
&self,
transaction: TransactionRequest,
) -> anyhow::Result<TransactionReceipt> {
) -> anyhow::Result<(TxHash, impl Future<Output = Result<TransactionReceipt>>)> {
let node = self.platform_information.node;
let transaction_hash = node
.submit_transaction(transaction)
@@ -747,24 +684,28 @@ where
.send(WatcherEvent::SubmittedTransaction { transaction_hash })
.context("Failed to send the transaction hash to the watcher")?;
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
Ok((transaction_hash, async move {
info!("Starting to poll for transaction receipt");
poll(
Duration::from_secs(30 * 60),
PollingWaitBehavior::Constant(Duration::from_secs(1)),
|| {
async move {
match node.get_receipt(transaction_hash).await {
Ok(receipt) => {
info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt))
}
Err(_) => Ok(ControlFlow::Continue(())),
}
Err(_) => Ok(ControlFlow::Continue(())),
}
}
.instrument(info_span!("Polling for receipt"))
},
)
.await
.instrument(info_span!("Polling for receipt"))
},
)
.instrument(info_span!("Polling for receipt", %transaction_hash))
.await
.inspect(|_| info!("Found the transaction receipt"))
}))
}
// endregion:Transaction Execution
}
@@ -6,16 +6,19 @@ use anyhow::Context as _;
use futures::{FutureExt, StreamExt};
use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_core::Platform;
use revive_dt_format::steps::{Step, StepIdx, StepPath};
use revive_dt_format::{
corpus::Corpus,
steps::{Step, StepIdx, StepPath},
};
use tokio::sync::Mutex;
use tracing::{error, info, info_span, instrument, warn};
use tracing::{Instrument, error, info, info_span, instrument, warn};
use revive_dt_config::{BenchmarkingContext, Context};
use revive_dt_report::Reporter;
use crate::{
differential_benchmarks::{Driver, Watcher, WatcherEvent},
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
};
/// Handles the differential testing executing it according to the information defined in the
@@ -39,9 +42,17 @@ pub async fn handle_differential_benchmarks(
let full_context = Context::Benchmark(Box::new(context.clone()));
// Discover all of the metadata files that are defined in the context.
let metadata_files = collect_metadata_files(&context)
.context("Failed to collect metadata files for differential testing")?;
info!(len = metadata_files.len(), "Discovered metadata files");
let corpus = context
.corpus_configuration
.test_specifiers
.clone()
.into_iter()
.try_fold(Corpus::default(), Corpus::with_test_specifier)
.context("Failed to parse the test corpus")?;
info!(
len = corpus.metadata_file_count(),
"Discovered metadata files"
);
// Discover the list of platforms that the tests should run on based on the context.
let platforms = context
@@ -84,8 +95,9 @@ pub async fn handle_differential_benchmarks(
// Preparing test definitions for the execution.
let test_definitions = create_test_definitions_stream(
&full_context,
metadata_files.iter(),
&corpus,
&platforms_and_nodes,
None,
reporter.clone(),
)
.await
@@ -159,12 +171,15 @@ pub async fn handle_differential_benchmarks(
futures::future::try_join(
watcher.run(),
driver.execute_all().inspect(|_| {
info!("All transactions submitted - driver completed execution");
watcher_tx
.send(WatcherEvent::AllTransactionsSubmitted)
.unwrap()
}),
driver
.execute_all()
.instrument(info_span!("Executing Benchmarks", %platform_identifier))
.inspect(|_| {
info!("All transactions submitted - driver completed execution");
watcher_tx
.send(WatcherEvent::AllTransactionsSubmitted)
.unwrap()
}),
)
.await
.context("Failed to run the driver and executor")
@@ -104,6 +104,12 @@ impl Watcher {
async move {
let mut mined_blocks_information = Vec::new();
// region:TEMPORARY
eprintln!("Watcher information for {}", self.platform_identifier);
eprintln!(
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count,ref_time,max_ref_time,proof_size,max_proof_size"
);
// endregion:TEMPORARY
while let Some(block) = blocks_information_stream.next().await {
// If the block number is equal to or less than the last block before the
// repetition then we ignore it and continue on to the next block.
@@ -118,8 +124,9 @@ impl Watcher {
}
info!(
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
block_number = block.block_number,
block_tx_count = block.transaction_hashes.len(),
remaining_transactions = watch_for_transaction_hashes.read().await.len(),
"Observed a block"
);
@@ -131,6 +138,24 @@ impl Watcher {
watch_for_transaction_hashes.remove(tx_hash);
}
// region:TEMPORARY
// TODO: The following core is TEMPORARY and will be removed once we have proper
// reporting in place and then it can be removed. This serves as as way of doing
// some very simple reporting for the time being.
eprintln!(
"\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\",\"{}\"",
block.block_number,
block.block_timestamp,
block.mined_gas,
block.block_gas_limit,
block.transaction_hashes.len(),
block.ref_time,
block.max_ref_time,
block.proof_size,
block.max_proof_size,
);
// endregion:TEMPORARY
mined_blocks_information.push(block);
}
@@ -139,41 +164,10 @@ impl Watcher {
}
};
let (_, mined_blocks_information) =
let (_, _) =
futures::future::join(watcher_event_watching_task, block_information_watching_task)
.await;
// region:TEMPORARY
{
// TODO: The following core is TEMPORARY and will be removed once we have proper
// reporting in place and then it can be removed. This serves as as way of doing some
// very simple reporting for the time being.
use std::io::Write;
let mut stderr = std::io::stderr().lock();
writeln!(
stderr,
"Watcher information for {}",
self.platform_identifier
)?;
writeln!(
stderr,
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count"
)?;
for block in mined_blocks_information {
writeln!(
stderr,
"{},{},{},{},{}",
block.block_number,
block.block_timestamp,
block.mined_gas,
block.block_gas_limit,
block.transaction_hashes.len()
)?
}
}
// endregion:TEMPORARY
Ok(())
}
}
+154 -63
View File
@@ -7,19 +7,21 @@ use std::{
time::{Duration, Instant},
};
use ansi_term::{ANSIStrings, Color};
use anyhow::Context as _;
use futures::{FutureExt, StreamExt};
use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_common::{cached_fs::read_to_string, types::PrivateKeyAllocator};
use revive_dt_core::Platform;
use revive_dt_format::corpus::Corpus;
use tokio::sync::{Mutex, RwLock, Semaphore};
use tracing::{Instrument, error, info, info_span, instrument};
use revive_dt_config::{Context, TestExecutionContext};
use revive_dt_config::{Context, OutputFormat, TestExecutionContext};
use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus};
use crate::{
differential_tests::Driver,
helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream},
helpers::{CachedCompiler, NodePool, create_test_definitions_stream},
};
/// Handles the differential testing executing it according to the information defined in the
@@ -32,9 +34,17 @@ pub async fn handle_differential_tests(
let reporter_clone = reporter.clone();
// Discover all of the metadata files that are defined in the context.
let metadata_files = collect_metadata_files(&context)
.context("Failed to collect metadata files for differential testing")?;
info!(len = metadata_files.len(), "Discovered metadata files");
let corpus = context
.corpus_configuration
.test_specifiers
.clone()
.into_iter()
.try_fold(Corpus::default(), Corpus::with_test_specifier)
.context("Failed to parse the test corpus")?;
info!(
len = corpus.metadata_file_count(),
"Discovered metadata files"
);
// Discover the list of platforms that the tests should run on based on the context.
let platforms = context
@@ -71,11 +81,20 @@ pub async fn handle_differential_tests(
info!("Spawned the platform nodes");
// Preparing test definitions.
let only_execute_failed_tests = match context.ignore_success_configuration.path.as_ref() {
Some(path) => {
let report = read_to_string(path)
.context("Failed to read the report file to ignore the succeeding test cases")?;
Some(serde_json::from_str(&report).context("Failed to deserialize report")?)
}
None => None,
};
let full_context = Context::Test(Box::new(context.clone()));
let test_definitions = create_test_definitions_stream(
&full_context,
metadata_files.iter(),
&corpus,
&platforms_and_nodes,
only_execute_failed_tests.as_ref(),
reporter.clone(),
)
.await
@@ -176,7 +195,7 @@ pub async fn handle_differential_tests(
.report_completion_event()
.expect("Can't fail")
});
let cli_reporting_task = start_cli_reporting_task(reporter);
let cli_reporting_task = start_cli_reporting_task(context.output_format, reporter);
tokio::task::spawn(async move {
loop {
@@ -186,6 +205,7 @@ pub async fn handle_differential_tests(
?remaining_tasks,
"Remaining Tests"
);
drop(remaining_tasks);
tokio::time::sleep(Duration::from_secs(10)).await
}
});
@@ -196,21 +216,15 @@ pub async fn handle_differential_tests(
}
#[allow(irrefutable_let_patterns, clippy::uninlined_format_args)]
async fn start_cli_reporting_task(reporter: Reporter) {
async fn start_cli_reporting_task(output_format: OutputFormat, reporter: Reporter) {
let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail");
drop(reporter);
let start = Instant::now();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const GREY: &str = "\x1B[90m";
const COLOR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
let mut number_of_successes = 0;
let mut number_of_failures = 0;
let mut global_success_count = 0;
let mut global_failure_count = 0;
let mut global_ignore_count = 0;
let mut buf = BufWriter::new(stderr());
while let Ok(event) = aggregator_events_rx.recv().await {
@@ -223,55 +237,132 @@ async fn start_cli_reporting_task(reporter: Reporter) {
continue;
};
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => {
number_of_successes += 1;
writeln!(
buf,
"{}{}Case Succeeded{} - Steps Executed: {}{}",
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
)
match output_format {
OutputFormat::Legacy => {
let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display());
for (case_idx, case_status) in case_status.into_iter() {
let _ = write!(buf, "\tCase Index {case_idx:>3}: ");
let _ = match case_status {
TestCaseStatus::Succeeded { steps_executed } => {
global_success_count += 1;
writeln!(
buf,
"{}",
ANSIStrings(&[
Color::Green.bold().paint("Case Succeeded"),
Color::Green
.paint(format!(" - Steps Executed: {steps_executed}")),
])
)
}
TestCaseStatus::Failed { reason } => {
global_failure_count += 1;
writeln!(
buf,
"{}",
ANSIStrings(&[
Color::Red.bold().paint("Case Failed"),
Color::Red.paint(format!(" - Reason: {}", reason.trim())),
])
)
}
TestCaseStatus::Ignored { reason, .. } => {
global_ignore_count += 1;
writeln!(
buf,
"{}",
ANSIStrings(&[
Color::Yellow.bold().paint("Case Ignored"),
Color::Yellow.paint(format!(" - Reason: {}", reason.trim())),
])
)
}
};
}
TestCaseStatus::Failed { reason } => {
number_of_failures += 1;
writeln!(
buf,
"{}{}Case Failed{} - Reason: {}{}",
RED,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
)
}
TestCaseStatus::Ignored { reason, .. } => writeln!(
let _ = writeln!(buf);
}
OutputFormat::CargoTestLike => {
writeln!(
buf,
"{}{}Case Ignored{} - Reason: {}{}",
GREY,
BOLD,
BOLD_RESET,
reason.trim(),
COLOR_RESET,
),
};
"\t{} {} - {}\n",
Color::Green.paint("Running"),
metadata_file_path.display(),
mode
)
.unwrap();
let mut success_count = 0;
let mut failure_count = 0;
let mut ignored_count = 0;
writeln!(buf, "running {} tests", case_status.len()).unwrap();
for (case_idx, case_result) in case_status.iter() {
let status = match case_result {
TestCaseStatus::Succeeded { .. } => {
success_count += 1;
global_success_count += 1;
Color::Green.paint("ok")
}
TestCaseStatus::Failed { reason } => {
failure_count += 1;
global_failure_count += 1;
Color::Red.paint(format!("FAILED, {reason}"))
}
TestCaseStatus::Ignored { reason, .. } => {
ignored_count += 1;
global_ignore_count += 1;
Color::Yellow.paint(format!("ignored, {reason:?}"))
}
};
writeln!(buf, "test case_idx_{} ... {}", case_idx, status).unwrap();
}
writeln!(buf).unwrap();
let status = if failure_count > 0 {
Color::Red.paint("FAILED")
} else {
Color::Green.paint("ok")
};
writeln!(
buf,
"test result: {}. {} passed; {} failed; {} ignored",
status, success_count, failure_count, ignored_count,
)
.unwrap();
writeln!(buf).unwrap();
buf = tokio::task::spawn_blocking(move || {
buf.flush().unwrap();
buf
})
.await
.unwrap();
}
}
let _ = writeln!(buf);
}
// Summary at the end.
let _ = writeln!(
buf,
"{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds",
number_of_successes + number_of_failures,
GREEN,
number_of_successes,
COLOR_RESET,
RED,
number_of_failures,
COLOR_RESET,
start.elapsed().as_secs()
);
match output_format {
OutputFormat::Legacy => {
writeln!(
buf,
"{} cases: {} cases succeeded, {} cases failed in {} seconds",
global_success_count + global_failure_count + global_ignore_count,
Color::Green.paint(global_success_count.to_string()),
Color::Red.paint(global_failure_count.to_string()),
start.elapsed().as_secs()
)
.unwrap();
}
OutputFormat::CargoTestLike => {
writeln!(
buf,
"run finished. {} passed; {} failed; {} ignored; finished in {}s",
global_success_count,
global_failure_count,
global_ignore_count,
start.elapsed().as_secs()
)
.unwrap();
}
}
}
@@ -325,26 +325,6 @@ impl ArtifactsCache {
let value = bson::from_slice::<CacheValue>(&value).ok()?;
Some(value)
}
#[instrument(level = "debug", skip_all, err)]
pub async fn get_or_insert_with(
&self,
key: &CacheKey<'_>,
callback: impl AsyncFnOnce() -> Result<CacheValue>,
) -> Result<CacheValue> {
match self.get(key).await {
Some(value) => {
debug!("Cache hit");
Ok(value)
}
None => {
debug!("Cache miss");
let value = callback().await?;
self.insert(key, &value).await?;
Ok(value)
}
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
-33
View File
@@ -1,33 +0,0 @@
use revive_dt_config::CorpusConfiguration;
use revive_dt_format::{corpus::Corpus, metadata::MetadataFile};
use tracing::{info, info_span, instrument};
/// Given an object that implements [`AsRef<CorpusConfiguration>`], this function finds all of the
/// corpus files and produces a map containing all of the [`MetadataFile`]s discovered.
#[instrument(level = "debug", name = "Collecting Corpora", skip_all)]
pub fn collect_metadata_files(
context: impl AsRef<CorpusConfiguration>,
) -> anyhow::Result<Vec<MetadataFile>> {
let mut metadata_files = Vec::new();
let corpus_configuration = AsRef::<CorpusConfiguration>::as_ref(&context);
for path in &corpus_configuration.paths {
let span = info_span!("Processing corpus file", path = %path.display());
let _guard = span.enter();
let corpus = Corpus::try_from_path(path)?;
info!(
name = corpus.name(),
number_of_contained_paths = corpus.path_count(),
"Deserialized corpus file"
);
metadata_files.extend(corpus.enumerate_tests());
}
// There's a possibility that there are certain paths that all lead to the same metadata files
// and therefore it's important that we sort them and then deduplicate them.
metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
Ok(metadata_files)
}
-2
View File
@@ -1,9 +1,7 @@
mod cached_compiler;
mod metadata;
mod pool;
mod test;
pub use cached_compiler::*;
pub use metadata::*;
pub use pool::*;
pub use test::*;
+54 -39
View File
@@ -4,10 +4,9 @@ use std::{borrow::Cow, path::Path};
use futures::{Stream, StreamExt, stream};
use indexmap::{IndexMap, indexmap};
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_config::Context;
use revive_dt_format::mode::ParsedMode;
use revive_dt_format::corpus::Corpus;
use serde_json::{Value, json};
use revive_dt_compiler::Mode;
@@ -17,7 +16,7 @@ use revive_dt_format::{
metadata::MetadataFile,
};
use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{ExecutionSpecificReporter, Reporter};
use revive_dt_report::{ExecutionSpecificReporter, Report, Reporter, TestCaseStatus};
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
use tracing::{debug, error, info};
@@ -28,46 +27,28 @@ pub async fn create_test_definitions_stream<'a>(
// This is only required for creating the compiler objects and is not used anywhere else in the
// function.
context: &Context,
metadata_files: impl IntoIterator<Item = &'a MetadataFile>,
corpus: &'a Corpus,
platforms_and_nodes: &'a BTreeMap<PlatformIdentifier, (&dyn Platform, NodePool)>,
only_execute_failed_tests: Option<&Report>,
reporter: Reporter,
) -> impl Stream<Item = TestDefinition<'a>> {
stream::iter(
metadata_files
.into_iter()
// Flatten over the cases.
.flat_map(|metadata_file| {
metadata_file
.cases
.iter()
.enumerate()
.map(move |(case_idx, case)| (metadata_file, case_idx, case))
})
// Flatten over the modes, prefer the case modes over the metadata file modes.
.flat_map(move |(metadata_file, case_idx, case)| {
corpus
.cases_iterator()
.map(move |(metadata_file, case_idx, case, mode)| {
let reporter = reporter.clone();
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
let modes = match modes {
Some(modes) => EitherIter::A(
ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned),
),
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
};
modes.into_iter().map(move |mode| {
(
metadata_file,
case_idx,
case,
mode.clone(),
reporter.test_specific_reporter(Arc::new(TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
})),
)
})
(
metadata_file,
case_idx,
case,
mode.clone(),
reporter.test_specific_reporter(Arc::new(TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
})),
)
})
// Inform the reporter of each one of the test cases that were discovered which we expect to
// run.
@@ -140,7 +121,7 @@ pub async fn create_test_definitions_stream<'a>(
)
// Filter out the test cases which are incompatible or that can't run in the current setup.
.filter_map(move |test| async move {
match test.check_compatibility() {
match test.check_compatibility(only_execute_failed_tests) {
Ok(()) => Some(test),
Err((reason, additional_information)) => {
debug!(
@@ -200,12 +181,16 @@ pub struct TestDefinition<'a> {
impl<'a> TestDefinition<'a> {
/// Checks if this test can be ran with the current configuration.
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
pub fn check_compatibility(
&self,
only_execute_failed_tests: Option<&Report>,
) -> TestCheckFunctionResult {
self.check_metadata_file_ignored()?;
self.check_case_file_ignored()?;
self.check_target_compatibility()?;
self.check_evm_version_compatibility()?;
self.check_compiler_compatibility()?;
self.check_ignore_succeeded(only_execute_failed_tests)?;
Ok(())
}
@@ -313,6 +298,36 @@ impl<'a> TestDefinition<'a> {
))
}
}
/// Checks if the test case should be executed or not based on the passed report and whether the
/// user has instructed the tool to ignore the already succeeding test cases.
fn check_ignore_succeeded(
&self,
only_execute_failed_tests: Option<&Report>,
) -> TestCheckFunctionResult {
let Some(report) = only_execute_failed_tests else {
return Ok(());
};
let test_case_status = report
.test_case_information
.get(&(self.metadata_file_path.to_path_buf().into()))
.and_then(|obj| obj.get(&self.mode))
.and_then(|obj| obj.get(&self.case_idx))
.and_then(|obj| obj.status.as_ref());
match test_case_status {
Some(TestCaseStatus::Failed { .. }) => Ok(()),
Some(TestCaseStatus::Ignored { .. }) => Err((
"Ignored since it was ignored in a previous run",
indexmap! {},
)),
Some(TestCaseStatus::Succeeded { .. }) => {
Err(("Ignored since it succeeded in a prior run", indexmap! {}))
}
None => Ok(()),
}
}
}
pub struct TestPlatformInformation<'a> {
+105 -10
View File
@@ -16,7 +16,7 @@ use revive_dt_config::*;
use revive_dt_node::{
Node, node_implementations::geth::GethNode,
node_implementations::lighthouse_geth::LighthouseGethNode,
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombieNode,
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombienetNode,
};
use revive_dt_node_interaction::EthereumNode;
use tracing::info;
@@ -59,6 +59,9 @@ pub trait Platform {
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>>;
/// Exports the genesis/chainspec for the node.
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value>;
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
@@ -104,6 +107,15 @@ impl Platform for GethEvmSolcPlatform {
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
let node_genesis = GethNode::node_genesis(genesis.clone(), &wallet);
serde_json::to_value(node_genesis)
.context("Failed to convert node genesis to a serde_value")
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
@@ -149,6 +161,15 @@ impl Platform for LighthouseGethEvmSolcPlatform {
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let genesis = AsRef::<GenesisConfiguration>::as_ref(&context).genesis()?;
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
let node_genesis = LighthouseGethNode::node_genesis(genesis.clone(), &wallet);
serde_json::to_value(node_genesis)
.context("Failed to convert node genesis to a serde_value")
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
@@ -184,7 +205,9 @@ impl Platform for KitchensinkPolkavmResolcPlatform {
let node = SubstrateNode::new(
kitchensink_path,
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
context,
&[],
);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
@@ -201,6 +224,16 @@ impl Platform for KitchensinkPolkavmResolcPlatform {
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
.path
.as_path();
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
let export_chainspec_command = SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND;
SubstrateNode::node_genesis(kitchensink_path, export_chainspec_command, &wallet)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
@@ -236,7 +269,9 @@ impl Platform for KitchensinkRevmSolcPlatform {
let node = SubstrateNode::new(
kitchensink_path,
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
context,
&[],
);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
@@ -253,6 +288,16 @@ impl Platform for KitchensinkRevmSolcPlatform {
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context)
.path
.as_path();
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
let export_chainspec_command = SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND;
SubstrateNode::node_genesis(kitchensink_path, export_chainspec_command, &wallet)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
@@ -280,15 +325,21 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
.path
.clone();
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = SubstrateNode::new(
revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus),
context,
&eth_rpc_connection_strings,
);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
@@ -305,6 +356,16 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
.path
.as_path();
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
@@ -332,15 +393,21 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
.path
.clone();
let revive_dev_node_configuration = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context);
let revive_dev_node_path = revive_dev_node_configuration.path.clone();
let revive_dev_node_consensus = revive_dev_node_configuration.consensus.clone();
let eth_rpc_connection_strings = revive_dev_node_configuration.existing_rpc_url.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = SubstrateNode::new(
revive_dev_node_path,
SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND,
Some(revive_dev_node_consensus),
context,
&eth_rpc_connection_strings,
);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
@@ -357,6 +424,16 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context)
.path
.as_path();
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
let export_chainspec_command = SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND;
SubstrateNode::node_genesis(revive_dev_node_path, export_chainspec_command, &wallet)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
@@ -389,7 +466,7 @@ impl Platform for ZombienetPolkavmResolcPlatform {
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombieNode::new(polkadot_parachain_path, context);
let node = ZombienetNode::new(polkadot_parachain_path, context);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
@@ -401,10 +478,19 @@ impl Platform for ZombienetPolkavmResolcPlatform {
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Solc::new(context, version).await;
let compiler = Resolc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
.path
.as_path();
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
@@ -437,7 +523,7 @@ impl Platform for ZombienetRevmSolcPlatform {
.clone();
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = ZombieNode::new(polkadot_parachain_path, context);
let node = ZombienetNode::new(polkadot_parachain_path, context);
let node = spawn_node(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
@@ -453,6 +539,15 @@ impl Platform for ZombienetRevmSolcPlatform {
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
fn export_genesis(&self, context: Context) -> anyhow::Result<serde_json::Value> {
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context)
.path
.as_path();
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
ZombienetNode::node_genesis(polkadot_parachain_path, &wallet)
}
}
impl From<PlatformIdentifier> for Box<dyn Platform> {
+18 -3
View File
@@ -2,10 +2,11 @@ mod differential_benchmarks;
mod differential_tests;
mod helpers;
use anyhow::Context as _;
use clap::Parser;
use revive_dt_report::ReportAggregator;
use schemars::schema_for;
use tracing::info;
use tracing::{info, level_filters::LevelFilter};
use tracing_subscriber::{EnvFilter, FmtSubscriber};
use revive_dt_config::Context;
@@ -30,14 +31,20 @@ fn main() -> anyhow::Result<()> {
.with_writer(writer)
.with_thread_ids(false)
.with_thread_names(false)
.with_env_filter(EnvFilter::from_default_env())
.with_env_filter(
EnvFilter::builder()
.with_default_directive(LevelFilter::OFF.into())
.from_env_lossy(),
)
.with_ansi(false)
.pretty()
.finish();
tracing::subscriber::set_global_default(subscriber)?;
info!("Differential testing tool is starting");
let context = Context::try_parse()?;
let mut context = Context::try_parse()?;
context.update_for_profile();
let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task();
match context {
@@ -72,6 +79,14 @@ fn main() -> anyhow::Result<()> {
Ok(())
}),
Context::ExportGenesis(ref export_genesis_context) => {
let platform = Into::<&dyn Platform>::into(export_genesis_context.platform);
let genesis = platform.export_genesis(context)?;
let genesis_json = serde_json::to_string_pretty(&genesis)
.context("Failed to serialize the genesis to JSON")?;
println!("{genesis_json}");
Ok(())
}
Context::ExportJsonSchema => {
let schema = schema_for!(Metadata);
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
+1 -1
View File
@@ -16,12 +16,12 @@ revive-common = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
futures = { workspace = true }
regex = { workspace = true }
tracing = { workspace = true }
schemars = { workspace = true }
semver = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
itertools = { workspace = true }
[dev-dependencies]
tokio = { workspace = true }
+5 -2
View File
@@ -1,9 +1,12 @@
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
use revive_dt_common::{
macros::define_wrapper_type,
types::{Mode, ParsedMode},
};
use crate::{mode::ParsedMode, steps::*};
use crate::steps::*;
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
pub struct Case {
+180 -111
View File
@@ -1,131 +1,200 @@
use std::{
fs::File,
borrow::Cow,
collections::HashMap,
path::{Path, PathBuf},
};
use revive_dt_common::iterators::FilesWithExtensionIterator;
use serde::{Deserialize, Serialize};
use tracing::{debug, info};
use itertools::Itertools;
use revive_dt_common::{
iterators::{EitherIter, FilesWithExtensionIterator},
types::{Mode, ParsedMode, ParsedTestSpecifier},
};
use tracing::{debug, warn};
use crate::metadata::{Metadata, MetadataFile};
use anyhow::Context as _;
use crate::{
case::{Case, CaseIdx},
metadata::{Metadata, MetadataFile},
};
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(untagged)]
pub enum Corpus {
SinglePath { name: String, path: PathBuf },
MultiplePaths { name: String, paths: Vec<PathBuf> },
#[derive(Default)]
pub struct Corpus {
test_specifiers: HashMap<ParsedTestSpecifier, Vec<PathBuf>>,
metadata_files: HashMap<PathBuf, MetadataFile>,
}
impl Corpus {
pub fn try_from_path(file_path: impl AsRef<Path>) -> anyhow::Result<Self> {
let mut corpus = File::open(file_path.as_ref())
.map_err(anyhow::Error::from)
.and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))
.with_context(|| {
format!(
"Failed to open and deserialize corpus file at {}",
file_path.as_ref().display()
)
})?;
let corpus_directory = file_path
.as_ref()
.canonicalize()
.context("Failed to canonicalize the path to the corpus file")?
.parent()
.context("Corpus file has no parent")?
.to_path_buf();
for path in corpus.paths_iter_mut() {
*path = corpus_directory.join(path.as_path())
}
Ok(corpus)
pub fn new() -> Self {
Default::default()
}
pub fn enumerate_tests(&self) -> Vec<MetadataFile> {
let mut tests = self
.paths_iter()
.flat_map(|root_path| {
if !root_path.is_dir() {
Box::new(std::iter::once(root_path.to_path_buf()))
as Box<dyn Iterator<Item = _>>
} else {
Box::new(
FilesWithExtensionIterator::new(root_path)
.with_use_cached_fs(true)
.with_allowed_extension("sol")
.with_allowed_extension("json"),
)
pub fn with_test_specifier(
mut self,
test_specifier: ParsedTestSpecifier,
) -> anyhow::Result<Self> {
match &test_specifier {
ParsedTestSpecifier::FileOrDirectory {
metadata_or_directory_file_path: metadata_file_path,
}
| ParsedTestSpecifier::Case {
metadata_file_path, ..
}
| ParsedTestSpecifier::CaseWithMode {
metadata_file_path, ..
} => {
let metadata_files = enumerate_metadata_files(metadata_file_path);
self.test_specifiers.insert(
test_specifier,
metadata_files
.iter()
.map(|metadata_file| metadata_file.metadata_file_path.clone())
.collect(),
);
for metadata_file in metadata_files.into_iter() {
self.metadata_files
.insert(metadata_file.metadata_file_path.clone(), metadata_file);
}
.map(move |metadata_file_path| (root_path, metadata_file_path))
})
.filter_map(|(root_path, metadata_file_path)| {
Metadata::try_from_file(&metadata_file_path)
.or_else(|| {
debug!(
discovered_from = %root_path.display(),
metadata_file_path = %metadata_file_path.display(),
"Skipping file since it doesn't contain valid metadata"
);
None
})
.map(|metadata| MetadataFile {
metadata_file_path,
corpus_file_path: root_path.to_path_buf(),
content: metadata,
})
.inspect(|metadata_file| {
debug!(
metadata_file_path = %metadata_file.relative_path().display(),
"Loaded metadata file"
}
};
Ok(self)
}
pub fn cases_iterator(
&self,
) -> impl Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_ {
let mut iterator = Box::new(std::iter::empty())
as Box<dyn Iterator<Item = (&'_ MetadataFile, CaseIdx, &'_ Case, Cow<'_, Mode>)> + '_>;
for (test_specifier, metadata_file_paths) in self.test_specifiers.iter() {
for metadata_file_path in metadata_file_paths {
let metadata_file = self
.metadata_files
.get(metadata_file_path)
.expect("Must succeed");
match test_specifier {
ParsedTestSpecifier::FileOrDirectory { .. } => {
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
let case_idx = CaseIdx::new(case_idx);
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
let modes = match modes {
Some(modes) => EitherIter::A(
ParsedMode::many_to_modes(modes.iter())
.map(Cow::<'static, _>::Owned),
),
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
};
iterator = Box::new(
iterator.chain(
modes
.into_iter()
.map(move |mode| (metadata_file, case_idx, case, mode)),
),
)
}
}
ParsedTestSpecifier::Case { case_idx, .. } => {
let Some(case) = metadata_file.cases.get(*case_idx) else {
warn!(
test_specifier = %test_specifier,
metadata_file_path = %metadata_file_path.display(),
case_idx = case_idx,
case_count = metadata_file.cases.len(),
"Specified case not found in metadata file"
);
continue;
};
let case_idx = CaseIdx::new(*case_idx);
let modes = case.modes.as_ref().or(metadata_file.modes.as_ref());
let modes = match modes {
Some(modes) => EitherIter::A(
ParsedMode::many_to_modes(modes.iter())
.map(Cow::<'static, Mode>::Owned),
),
None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)),
};
iterator = Box::new(
iterator.chain(
modes
.into_iter()
.map(move |mode| (metadata_file, case_idx, case, mode)),
),
)
})
})
.collect::<Vec<_>>();
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
info!(
len = tests.len(),
corpus_name = self.name(),
"Found tests in Corpus"
);
tests
}
}
ParsedTestSpecifier::CaseWithMode { case_idx, mode, .. } => {
let Some(case) = metadata_file.cases.get(*case_idx) else {
warn!(
test_specifier = %test_specifier,
metadata_file_path = %metadata_file_path.display(),
case_idx = case_idx,
case_count = metadata_file.cases.len(),
"Specified case not found in metadata file"
);
continue;
};
let case_idx = CaseIdx::new(*case_idx);
pub fn name(&self) -> &str {
match self {
Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(),
}
}
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
match self {
Corpus::SinglePath { path, .. } => {
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>
}
Corpus::MultiplePaths { paths, .. } => {
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>
let mode = Cow::Borrowed(mode);
iterator = Box::new(iterator.chain(std::iter::once((
metadata_file,
case_idx,
case,
mode,
))))
}
}
}
}
iterator.unique_by(|item| (&item.0.metadata_file_path, item.1, item.3.clone()))
}
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
match self {
Corpus::SinglePath { path, .. } => {
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>
}
Corpus::MultiplePaths { paths, .. } => {
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>
}
}
}
pub fn path_count(&self) -> usize {
match self {
Corpus::SinglePath { .. } => 1,
Corpus::MultiplePaths { paths, .. } => paths.len(),
}
pub fn metadata_file_count(&self) -> usize {
self.metadata_files.len()
}
}
fn enumerate_metadata_files(path: impl AsRef<Path>) -> Vec<MetadataFile> {
let root_path = path.as_ref();
let mut tests = if !root_path.is_dir() {
Box::new(std::iter::once(root_path.to_path_buf())) as Box<dyn Iterator<Item = _>>
} else {
Box::new(
FilesWithExtensionIterator::new(root_path)
.with_use_cached_fs(true)
.with_allowed_extension("sol")
.with_allowed_extension("json"),
)
}
.map(move |metadata_file_path| (root_path, metadata_file_path))
.filter_map(|(root_path, metadata_file_path)| {
Metadata::try_from_file(&metadata_file_path)
.or_else(|| {
debug!(
discovered_from = %root_path.display(),
metadata_file_path = %metadata_file_path.display(),
"Skipping file since it doesn't contain valid metadata"
);
None
})
.map(|metadata| MetadataFile {
metadata_file_path,
corpus_file_path: root_path.to_path_buf(),
content: metadata,
})
.inspect(|metadata_file| {
debug!(
metadata_file_path = %metadata_file.relative_path().display(),
"Loaded metadata file"
)
})
})
.collect::<Vec<_>>();
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
tests
}
-1
View File
@@ -3,6 +3,5 @@
pub mod case;
pub mod corpus;
pub mod metadata;
pub mod mode;
pub mod steps;
pub mod traits;
+2 -2
View File
@@ -16,11 +16,11 @@ use revive_dt_common::{
cached_fs::read_to_string,
iterators::FilesWithExtensionIterator,
macros::define_wrapper_type,
types::{Mode, VmIdentifier},
types::{Mode, ParsedMode, VmIdentifier},
};
use tracing::error;
use crate::{case::Case, mode::ParsedMode};
use crate::case::Case;
pub const METADATA_FILE_EXTENSION: &str = "json";
pub const SOLIDITY_CASE_FILE_EXTENSION: &str = "sol";
-257
View File
@@ -1,257 +0,0 @@
use anyhow::Context as _;
use regex::Regex;
use revive_dt_common::iterators::EitherIter;
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that has been parsed from test metadata.
///
/// Mode strings can take the following form (in pseudo-regex):
///
/// ```text
/// [YEILV][+-]? (M[0123sz])? <semver>?
/// ```
///
/// We can parse valid mode strings into [`ParsedMode`] using [`ParsedMode::from_str`].
#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)]
#[serde(try_from = "String", into = "String")]
pub struct ParsedMode {
pub pipeline: Option<ModePipeline>,
pub optimize_flag: Option<bool>,
pub optimize_setting: Option<ModeOptimizerSetting>,
pub version: Option<semver::VersionReq>,
}
impl FromStr for ParsedMode {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
static REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?x)
^
(?:(?P<pipeline>[YEILV])(?P<optimize_flag>[+-])?)? # Pipeline to use eg Y, E+, E-
\s*
(?P<optimize_setting>M[a-zA-Z0-9])? # Optimize setting eg M0, Ms, Mz
\s*
(?P<version>[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8
$
").unwrap()
});
let Some(caps) = REGEX.captures(s) else {
anyhow::bail!("Cannot parse mode '{s}' from string");
};
let pipeline = match caps.name("pipeline") {
Some(m) => Some(
ModePipeline::from_str(m.as_str())
.context("Failed to parse mode pipeline from string")?,
),
None => None,
};
let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+");
let optimize_setting = match caps.name("optimize_setting") {
Some(m) => Some(
ModeOptimizerSetting::from_str(m.as_str())
.context("Failed to parse optimizer setting from string")?,
),
None => None,
};
let version = match caps.name("version") {
Some(m) => Some(
semver::VersionReq::parse(m.as_str())
.map_err(|e| {
anyhow::anyhow!(
"Cannot parse the version requirement '{}': {e}",
m.as_str()
)
})
.context("Failed to parse semver requirement from mode string")?,
),
None => None,
};
Ok(ParsedMode {
pipeline,
optimize_flag,
optimize_setting,
version,
})
}
}
impl Display for ParsedMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut has_written = false;
if let Some(pipeline) = self.pipeline {
pipeline.fmt(f)?;
if let Some(optimize_flag) = self.optimize_flag {
f.write_str(if optimize_flag { "+" } else { "-" })?;
}
has_written = true;
}
if let Some(optimize_setting) = self.optimize_setting {
if has_written {
f.write_str(" ")?;
}
optimize_setting.fmt(f)?;
has_written = true;
}
if let Some(version) = &self.version {
if has_written {
f.write_str(" ")?;
}
version.fmt(f)?;
}
Ok(())
}
}
impl From<ParsedMode> for String {
fn from(parsed_mode: ParsedMode) -> Self {
parsed_mode.to_string()
}
}
impl TryFrom<String> for ParsedMode {
type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
ParsedMode::from_str(&value)
}
}
impl ParsedMode {
/// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try.
pub fn to_modes(&self) -> impl Iterator<Item = Mode> {
let pipeline_iter = self.pipeline.as_ref().map_or_else(
|| EitherIter::A(ModePipeline::test_cases()),
|p| EitherIter::B(std::iter::once(*p)),
);
let optimize_flag_setting = self.optimize_flag.map(|flag| {
if flag {
ModeOptimizerSetting::M3
} else {
ModeOptimizerSetting::M0
}
});
let optimize_flag_iter = match optimize_flag_setting {
Some(setting) => EitherIter::A(std::iter::once(setting)),
None => EitherIter::B(ModeOptimizerSetting::test_cases()),
};
let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else(
|| EitherIter::A(optimize_flag_iter),
|s| EitherIter::B(std::iter::once(*s)),
);
pipeline_iter.flat_map(move |pipeline| {
optimize_settings_iter
.clone()
.map(move |optimize_setting| Mode {
pipeline,
optimize_setting,
version: self.version.clone(),
})
})
}
/// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s.
/// This avoids any duplicate entries.
pub fn many_to_modes<'a>(
parsed: impl Iterator<Item = &'a ParsedMode>,
) -> impl Iterator<Item = Mode> {
let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect();
modes.into_iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parsed_mode_from_str() {
let strings = vec![
("Mz", "Mz"),
("Y", "Y"),
("Y+", "Y+"),
("Y-", "Y-"),
("E", "E"),
("E+", "E+"),
("E-", "E-"),
("Y M0", "Y M0"),
("Y M1", "Y M1"),
("Y M2", "Y M2"),
("Y M3", "Y M3"),
("Y Ms", "Y Ms"),
("Y Mz", "Y Mz"),
("E M0", "E M0"),
("E M1", "E M1"),
("E M2", "E M2"),
("E M3", "E M3"),
("E Ms", "E Ms"),
("E Mz", "E Mz"),
// When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning)
("Y 0.8.0", "Y ^0.8.0"),
("E+ 0.8.0", "E+ ^0.8.0"),
("Y M3 >=0.8.0", "Y M3 >=0.8.0"),
("E Mz <0.7.0", "E Mz <0.7.0"),
// We can parse +- _and_ M1/M2 but the latter takes priority.
("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"),
("E- M2 0.7.0", "E- M2 ^0.7.0"),
// We don't see this in the wild but it is parsed.
("<=0.8", "<=0.8"),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
assert_eq!(
expected,
parsed.to_string(),
"Mode string '{actual}' did not parse to '{expected}': got '{parsed}'"
);
}
}
#[test]
fn test_parsed_mode_to_test_modes() {
let strings = vec![
("Mz", vec!["Y Mz", "E Mz"]),
("Y", vec!["Y M0", "Y M3"]),
("E", vec!["E M0", "E M3"]),
("Y+", vec!["Y M3"]),
("Y-", vec!["Y M0"]),
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
(
"<=0.8",
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
),
];
for (actual, expected) in strings {
let parsed = ParsedMode::from_str(actual)
.unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'"));
let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect();
let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect();
assert_eq!(
expected_set, actual_set,
"Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'"
);
}
}
}
+17
View File
@@ -3,7 +3,9 @@
use std::pin::Pin;
use std::sync::Arc;
use alloy::network::Ethereum;
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256};
use alloy::providers::DynProvider;
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
use anyhow::Result;
@@ -74,6 +76,9 @@ pub trait EthereumNode {
+ '_,
>,
>;
fn provider(&self)
-> Pin<Box<dyn Future<Output = anyhow::Result<DynProvider<Ethereum>>> + '_>>;
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
@@ -92,4 +97,16 @@ pub struct MinedBlockInformation {
/// The hashes of the transactions that were mined as part of the block.
pub transaction_hashes: Vec<TxHash>,
/// The ref time for substrate based chains.
pub ref_time: u128,
/// The max ref time for substrate based chains.
pub max_ref_time: u64,
/// The proof size for substrate based chains.
pub proof_size: u128,
/// The max proof size for substrate based chains.
pub max_proof_size: u64,
}
+1
View File
@@ -29,6 +29,7 @@ serde_yaml_ng = { workspace = true }
sp-core = { workspace = true }
sp-runtime = { workspace = true }
subxt = { workspace = true }
zombienet-sdk = { workspace = true }
[dev-dependencies]
+27 -12
View File
@@ -32,7 +32,7 @@ use alloy::{
},
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use tokio::sync::OnceCell;
use tracing::{Instrument, error, instrument};
@@ -130,7 +130,7 @@ impl GethNode {
/// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
fn init(&mut self, genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
@@ -139,16 +139,7 @@ impl GethNode {
create_dir_all(&self.logs_directory)
.context("Failed to create logs directory for geth node")?;
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
// Note, the use of the entry API here means that we only modify the entries for any
// account that is not in the `alloc` field of the genesis state.
genesis
.alloc
.entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
let genesis = Self::node_genesis(genesis, self.wallet.as_ref());
let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE);
serde_json::to_writer(
File::create(&genesis_path).context("Failed to create geth genesis file")?,
@@ -265,6 +256,16 @@ impl GethNode {
.await
.cloned()
}
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
genesis
.alloc
.entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
genesis
}
}
impl EthereumNode for GethNode {
@@ -535,6 +536,10 @@ impl EthereumNode for GethNode {
.as_hashes()
.expect("Must be hashes")
.to_vec(),
ref_time: 0,
max_ref_time: 0,
proof_size: 0,
max_proof_size: 0,
})
});
@@ -542,6 +547,16 @@ impl EthereumNode for GethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn provider(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
{
Box::pin(
self.provider()
.map(|provider| provider.map(|provider| provider.erased())),
)
}
}
pub struct GethNodeResolver {
@@ -43,7 +43,7 @@ use alloy::{
},
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::serde_as;
@@ -222,6 +222,7 @@ impl LighthouseGethNode {
"--ws.port=8546".to_string(),
"--ws.api=eth,net,web3,txpool,engine".to_string(),
"--ws.origins=*".to_string(),
"--miner.gaslimit=30000000".to_string(),
],
consensus_layer_extra_parameters: vec![
"--disable-quic".to_string(),
@@ -247,6 +248,8 @@ impl LighthouseGethNode {
.collect::<BTreeMap<_, _>>();
serde_json::to_string(&map).unwrap()
},
gas_limit: 30_000_000,
genesis_gaslimit: 30_000_000,
},
wait_for_finalization: false,
port_publisher: Some(PortPublisherParameters {
@@ -538,6 +541,16 @@ impl LighthouseGethNode {
.await
})
}
pub fn node_genesis(mut genesis: Genesis, wallet: &EthereumWallet) -> Genesis {
for signer_address in NetworkWallet::<Ethereum>::signer_addresses(&wallet) {
genesis
.alloc
.entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
genesis
}
}
impl EthereumNode for LighthouseGethNode {
@@ -754,6 +767,10 @@ impl EthereumNode for LighthouseGethNode {
.as_hashes()
.expect("Must be hashes")
.to_vec(),
ref_time: 0,
max_ref_time: 0,
proof_size: 0,
max_proof_size: 0,
})
});
@@ -761,6 +778,16 @@ impl EthereumNode for LighthouseGethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn provider(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
{
Box::pin(
self.http_provider()
.map(|provider| provider.map(|provider| provider.erased())),
)
}
}
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
@@ -1035,6 +1062,8 @@ struct NetworkParameters {
pub num_validator_keys_per_node: u64,
pub genesis_delay: u64,
pub genesis_gaslimit: u64,
pub gas_limit: u64,
pub prefunded_accounts: String,
}
@@ -1131,6 +1160,7 @@ mod tests {
}
#[tokio::test]
#[ignore = "Ignored since they take a long time to run"]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange
let (context, node) = new_node();
+178 -593
View File
@@ -1,6 +1,6 @@
use std::{
fs::{create_dir_all, remove_dir_all},
path::PathBuf,
path::{Path, PathBuf},
pin::Pin,
process::{Command, Stdio},
sync::{
@@ -11,17 +11,10 @@ use std::{
};
use alloy::{
consensus::{BlockHeader, TxEnvelope},
eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount},
network::{
Ethereum, EthereumWallet, Network, NetworkWallet, TransactionBuilder,
TransactionBuilderError, UnbuiltTransactionError,
},
primitives::{
Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey,
TxHash, U256,
},
genesis::Genesis,
network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
providers::{
Provider,
ext::DebugApi,
@@ -29,26 +22,25 @@ use alloy::{
},
rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
eth::{Block, Header, Transaction},
trace::geth::{
DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame,
},
},
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_format::traits::ResolverApi;
use serde::{Deserialize, Serialize};
use serde_json::{Value as JsonValue, json};
use serde_json::json;
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use revive_dt_config::*;
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
use subxt::{OnlineClient, SubstrateConfig};
use tokio::sync::OnceCell;
use tracing::instrument;
use tracing::{instrument, trace};
use crate::{
Node,
@@ -79,7 +71,8 @@ pub struct SubstrateNode {
eth_proxy_process: Option<Process>,
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>>,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
consensus: Option<String>,
}
impl SubstrateNode {
@@ -102,9 +95,11 @@ impl SubstrateNode {
pub fn new(
node_path: PathBuf,
export_chainspec_command: &str,
consensus: Option<String>,
context: impl AsRef<WorkingDirectoryConfiguration>
+ AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>,
existing_connection_strings: &[String],
) -> Self {
let working_directory_path =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
@@ -118,12 +113,17 @@ impl SubstrateNode {
let base_directory = substrate_directory.join(id.to_string());
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
let rpc_url = existing_connection_strings
.get(id as usize)
.cloned()
.unwrap_or_default();
Self {
id,
node_binary: node_path,
eth_proxy_binary: eth_rpc_path.to_path_buf(),
export_chainspec_command: export_chainspec_command.to_string(),
rpc_url: String::new(),
rpc_url,
base_directory,
logs_directory,
substrate_process: None,
@@ -131,14 +131,21 @@ impl SubstrateNode {
wallet: wallet.clone(),
nonce_manager: Default::default(),
provider: Default::default(),
consensus,
}
}
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
if !self.rpc_url.is_empty() {
return Ok(self);
}
trace!("Removing the various directories");
let _ = remove_dir_all(self.base_directory.as_path());
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
trace!("Creating the various directories");
create_dir_all(&self.base_directory)
.context("Failed to create base directory for substrate node")?;
create_dir_all(&self.logs_directory)
@@ -146,66 +153,15 @@ impl SubstrateNode {
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
// Note: we do not pipe the logs of this process to a separate file since this is just a
// once-off export of the default chain spec and not part of the long-running node process.
let output = Command::new(&self.node_binary)
.arg(self.export_chainspec_command.as_str())
.arg("--chain")
.arg("dev")
.env_remove("RUST_LOG")
.output()
.context("Failed to export the chain-spec")?;
if !output.status.success() {
anyhow::bail!(
"Substrate-node export-chain-spec failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
let content = String::from_utf8(output.stdout)
.context("Failed to decode Substrate export-chain-spec output as UTF-8")?;
let mut chainspec_json: JsonValue =
serde_json::from_str(&content).context("Failed to parse Substrate chain spec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
.as_array()
.cloned()
.unwrap_or_default();
let mut merged_balances: Vec<(String, u128)> = existing_chainspec_balances
.into_iter()
.filter_map(|val| {
if let Some(arr) = val.as_array() {
if arr.len() == 2 {
let account = arr[0].as_str()?.to_string();
let balance = arr[1].as_f64()? as u128;
return Some((account, balance));
}
}
None
})
.collect();
let mut eth_balances = {
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
// Note, the use of the entry API here means that we only modify the entries for any
// account that is not in the `alloc` field of the genesis state.
genesis
.alloc
.entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
self.extract_balance_from_genesis_file(&genesis)
.context("Failed to extract balances from EVM genesis JSON")?
};
merged_balances.append(&mut eth_balances);
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] =
json!(merged_balances);
trace!("Creating the node genesis");
let chainspec_json = Self::node_genesis(
&self.node_binary,
&self.export_chainspec_command,
&self.wallet,
)
.context("Failed to prepare the chainspec command")?;
trace!("Writing the node genesis");
serde_json::to_writer_pretty(
std::fs::File::create(&template_chainspec_path)
.context("Failed to create substrate template chainspec file")?,
@@ -216,6 +172,10 @@ impl SubstrateNode {
}
fn spawn_process(&mut self) -> anyhow::Result<()> {
if !self.rpc_url.is_empty() {
return Ok(());
}
let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16;
let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16;
@@ -223,12 +183,13 @@ impl SubstrateNode {
self.rpc_url = format!("http://127.0.0.1:{proxy_rpc_port}");
trace!("Spawning the substrate process");
let substrate_process = Process::new(
"node",
self.logs_directory.as_path(),
self.node_binary.as_path(),
|command, stdout_file, stderr_file| {
command
let cmd = command
.arg("--dev")
.arg("--chain")
.arg(chainspec_path)
@@ -245,12 +206,19 @@ impl SubstrateNode {
.arg("all")
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.arg("--pool-limit")
.arg(u32::MAX.to_string())
.arg("--pool-kbytes")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
if let Some(consensus) = self.consensus.as_ref() {
cmd.arg("--consensus").arg(consensus.clone());
}
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: Duration::from_secs(30),
max_wait_duration: Duration::from_secs(90),
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => Ok(line.contains(Self::SUBSTRATE_READY_MARKER)),
None => Ok(false),
@@ -267,6 +235,7 @@ impl SubstrateNode {
}
}
trace!("Spawning eth-rpc process");
let eth_proxy_process = Process::new(
"proxy",
self.logs_directory.as_path(),
@@ -305,21 +274,6 @@ impl SubstrateNode {
Ok(())
}
fn extract_balance_from_genesis_file(
&self,
genesis: &Genesis,
) -> anyhow::Result<Vec<(String, u128)>> {
genesis
.alloc
.iter()
.try_fold(Vec::new(), |mut vec, (address, acc)| {
let substrate_address = Self::eth_to_substrate_address(address);
let balance = acc.balance.try_into()?;
vec.push((substrate_address, balance));
Ok(vec)
})
}
fn eth_to_substrate_address(address: &Address) -> String {
let eth_bytes = address.0.0;
@@ -342,12 +296,10 @@ impl SubstrateNode {
Ok(String::from_utf8_lossy(&output).trim().to_string())
}
async fn provider(
&self,
) -> anyhow::Result<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>> {
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
self.provider
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<ReviveNetwork, _>(
construct_concurrency_limited_provider::<Ethereum, _>(
self.rpc_url.as_str(),
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
ChainIdFiller::new(Some(CHAIN_ID)),
@@ -360,6 +312,49 @@ impl SubstrateNode {
.await
.cloned()
}
pub fn node_genesis(
node_path: &Path,
export_chainspec_command: &str,
wallet: &EthereumWallet,
) -> anyhow::Result<serde_json::Value> {
trace!("Exporting the chainspec");
let output = Command::new(node_path)
.arg(export_chainspec_command)
.arg("--chain")
.arg("dev")
.env_remove("RUST_LOG")
.output()
.context("Failed to export the chain-spec")?;
trace!("Waiting for chainspec export");
if !output.status.success() {
anyhow::bail!(
"Substrate-node export-chain-spec failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
trace!("Obtained chainspec");
let content = String::from_utf8(output.stdout)
.context("Failed to decode Substrate export-chain-spec output as UTF-8")?;
let mut chainspec_json = serde_json::from_str::<serde_json::Value>(&content)
.context("Failed to parse Substrate chain spec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
.as_array_mut()
.expect("Can't fail");
trace!("Adding addresses to chainspec");
for address in NetworkWallet::<Ethereum>::signer_addresses(wallet) {
let substrate_address = Self::eth_to_substrate_address(&address);
let balance = INITIAL_BALANCE;
existing_chainspec_balances.push(json!((substrate_address, balance)));
}
Ok(chainspec_json)
}
}
impl EthereumNode for SubstrateNode {
@@ -508,44 +503,97 @@ impl EthereumNode for SubstrateNode {
+ '_,
>,
> {
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider for block subscription")?;
let mut block_subscription = provider
.watch_full_blocks()
.await
.context("Failed to create the blocks stream")?;
block_subscription.set_channel_size(0xFFFF);
block_subscription.set_poll_interval(Duration::from_secs(1));
let block_stream = block_subscription.into_stream();
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
pub mod revive {}
let mined_block_information_stream = block_stream.filter_map(|block| async {
let block = block.ok()?;
Some(MinedBlockInformation {
block_number: block.number(),
block_timestamp: block.header.timestamp,
mined_gas: block.header.gas_used as _,
block_gas_limit: block.header.gas_limit,
transaction_hashes: block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
})
Box::pin(async move {
let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16;
let substrate_rpc_url = format!("ws://127.0.0.1:{substrate_rpc_port}");
let api = OnlineClient::<SubstrateConfig>::from_url(substrate_rpc_url)
.await
.context("Failed to create subxt rpc client")?;
let provider = self.provider().await.context("Failed to create provider")?;
let block_stream = api
.blocks()
.subscribe_all()
.await
.context("Failed to subscribe to blocks")?;
let mined_block_information_stream = block_stream.filter_map(move |block| {
let api = api.clone();
let provider = provider.clone();
async move {
let substrate_block = block.ok()?;
let revive_block = provider
.get_block_by_number(
BlockNumberOrTag::Number(substrate_block.number() as _),
)
.await
.expect("TODO: Remove")
.expect("TODO: Remove");
let used = api
.storage()
.at(substrate_block.reference())
.fetch_or_default(&revive::storage().system().block_weight())
.await
.expect("TODO: Remove");
let block_ref_time = (used.normal.ref_time as u128)
+ (used.operational.ref_time as u128)
+ (used.mandatory.ref_time as u128);
let block_proof_size = (used.normal.proof_size as u128)
+ (used.operational.proof_size as u128)
+ (used.mandatory.proof_size as u128);
let limits = api
.constants()
.at(&revive::constants().system().block_weights())
.expect("TODO: Remove");
let max_ref_time = limits.max_block.ref_time;
let max_proof_size = limits.max_block.proof_size;
Some(MinedBlockInformation {
block_number: substrate_block.number() as _,
block_timestamp: revive_block.header.timestamp,
mined_gas: revive_block.header.gas_used as _,
block_gas_limit: revive_block.header.gas_limit as _,
transaction_hashes: revive_block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
ref_time: block_ref_time,
max_ref_time,
proof_size: block_proof_size,
max_proof_size,
})
}
});
Ok(Box::pin(mined_block_information_stream)
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn provider(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
{
Box::pin(
self.provider()
.map(|provider| provider.map(|provider| provider.erased())),
)
}
}
pub struct SubstrateNodeResolver {
id: u32,
provider: ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>,
provider: ConcreteProvider<Ethereum, Arc<EthereumWallet>>,
}
impl ResolverApi for SubstrateNodeResolver {
@@ -709,430 +757,6 @@ impl Drop for SubstrateNode {
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ReviveNetwork;
impl Network for ReviveNetwork {
type TxType = <Ethereum as Network>::TxType;
type TxEnvelope = <Ethereum as Network>::TxEnvelope;
type UnsignedTx = <Ethereum as Network>::UnsignedTx;
type ReceiptEnvelope = <Ethereum as Network>::ReceiptEnvelope;
type Header = ReviveHeader;
type TransactionRequest = <Ethereum as Network>::TransactionRequest;
type TransactionResponse = <Ethereum as Network>::TransactionResponse;
type ReceiptResponse = <Ethereum as Network>::ReceiptResponse;
type HeaderResponse = Header<ReviveHeader>;
type BlockResponse = Block<Transaction<TxEnvelope>, Header<ReviveHeader>>;
}
impl TransactionBuilder<ReviveNetwork> for <Ethereum as Network>::TransactionRequest {
fn chain_id(&self) -> Option<alloy::primitives::ChainId> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::chain_id(self)
}
fn set_chain_id(&mut self, chain_id: alloy::primitives::ChainId) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_chain_id(
self, chain_id,
)
}
fn nonce(&self) -> Option<u64> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::nonce(self)
}
fn set_nonce(&mut self, nonce: u64) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_nonce(
self, nonce,
)
}
fn take_nonce(&mut self) -> Option<u64> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::take_nonce(
self,
)
}
fn input(&self) -> Option<&alloy::primitives::Bytes> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::input(self)
}
fn set_input<T: Into<alloy::primitives::Bytes>>(&mut self, input: T) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_input(
self, input,
)
}
fn from(&self) -> Option<Address> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::from(self)
}
fn set_from(&mut self, from: Address) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_from(
self, from,
)
}
fn kind(&self) -> Option<alloy::primitives::TxKind> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::kind(self)
}
fn clear_kind(&mut self) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::clear_kind(
self,
)
}
fn set_kind(&mut self, kind: alloy::primitives::TxKind) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_kind(
self, kind,
)
}
fn value(&self) -> Option<alloy::primitives::U256> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::value(self)
}
fn set_value(&mut self, value: alloy::primitives::U256) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_value(
self, value,
)
}
fn gas_price(&self) -> Option<u128> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::gas_price(self)
}
fn set_gas_price(&mut self, gas_price: u128) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_gas_price(
self, gas_price,
)
}
fn max_fee_per_gas(&self) -> Option<u128> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::max_fee_per_gas(
self,
)
}
fn set_max_fee_per_gas(&mut self, max_fee_per_gas: u128) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_max_fee_per_gas(
self, max_fee_per_gas
)
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::max_priority_fee_per_gas(
self,
)
}
fn set_max_priority_fee_per_gas(&mut self, max_priority_fee_per_gas: u128) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_max_priority_fee_per_gas(
self, max_priority_fee_per_gas
)
}
fn gas_limit(&self) -> Option<u64> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::gas_limit(self)
}
fn set_gas_limit(&mut self, gas_limit: u64) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_gas_limit(
self, gas_limit,
)
}
fn access_list(&self) -> Option<&alloy::rpc::types::AccessList> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::access_list(
self,
)
}
fn set_access_list(&mut self, access_list: alloy::rpc::types::AccessList) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::set_access_list(
self,
access_list,
)
}
fn complete_type(
&self,
ty: <ReviveNetwork as Network>::TxType,
) -> Result<(), Vec<&'static str>> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::complete_type(
self, ty,
)
}
fn can_submit(&self) -> bool {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::can_submit(
self,
)
}
fn can_build(&self) -> bool {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::can_build(self)
}
fn output_tx_type(&self) -> <ReviveNetwork as Network>::TxType {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::output_tx_type(
self,
)
}
fn output_tx_type_checked(&self) -> Option<<ReviveNetwork as Network>::TxType> {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::output_tx_type_checked(
self,
)
}
fn prep_for_submission(&mut self) {
<<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::prep_for_submission(
self,
)
}
fn build_unsigned(
self,
) -> alloy::network::BuildResult<<ReviveNetwork as Network>::UnsignedTx, ReviveNetwork> {
let result = <<Ethereum as Network>::TransactionRequest as TransactionBuilder<Ethereum>>::build_unsigned(
self,
);
match result {
Ok(unsigned_tx) => Ok(unsigned_tx),
Err(UnbuiltTransactionError { request, error }) => {
Err(UnbuiltTransactionError::<ReviveNetwork> {
request,
error: match error {
TransactionBuilderError::InvalidTransactionRequest(tx_type, items) => {
TransactionBuilderError::InvalidTransactionRequest(tx_type, items)
}
TransactionBuilderError::UnsupportedSignatureType => {
TransactionBuilderError::UnsupportedSignatureType
}
TransactionBuilderError::Signer(error) => {
TransactionBuilderError::Signer(error)
}
TransactionBuilderError::Custom(error) => {
TransactionBuilderError::Custom(error)
}
},
})
}
}
}
async fn build<W: alloy::network::NetworkWallet<ReviveNetwork>>(
self,
wallet: &W,
) -> Result<<ReviveNetwork as Network>::TxEnvelope, TransactionBuilderError<ReviveNetwork>>
{
Ok(wallet.sign_request(self).await?)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ReviveHeader {
/// The Keccak 256-bit hash of the parent
/// blocks header, in its entirety; formally Hp.
pub parent_hash: B256,
/// The Keccak 256-bit hash of the ommers list portion of this block; formally Ho.
#[serde(rename = "sha3Uncles", alias = "ommersHash")]
pub ommers_hash: B256,
/// The 160-bit address to which all fees collected from the successful mining of this block
/// be transferred; formally Hc.
#[serde(rename = "miner", alias = "beneficiary")]
pub beneficiary: Address,
/// The Keccak 256-bit hash of the root node of the state trie, after all transactions are
/// executed and finalisations applied; formally Hr.
pub state_root: B256,
/// The Keccak 256-bit hash of the root node of the trie structure populated with each
/// transaction in the transactions list portion of the block; formally Ht.
pub transactions_root: B256,
/// The Keccak 256-bit hash of the root node of the trie structure populated with the receipts
/// of each transaction in the transactions list portion of the block; formally He.
pub receipts_root: B256,
/// The Bloom filter composed from indexable information (logger address and log topics)
/// contained in each log entry from the receipt of each transaction in the transactions list;
/// formally Hb.
pub logs_bloom: Bloom,
/// A scalar value corresponding to the difficulty level of this block. This can be calculated
/// from the previous blocks difficulty level and the timestamp; formally Hd.
pub difficulty: U256,
/// A scalar value equal to the number of ancestor blocks. The genesis block has a number of
/// zero; formally Hi.
#[serde(with = "alloy::serde::quantity")]
pub number: BlockNumber,
/// A scalar value equal to the current limit of gas expenditure per block; formally Hl.
// This is the main difference over the Ethereum network implementation. We use u128 here and
// not u64.
#[serde(with = "alloy::serde::quantity")]
pub gas_limit: u128,
/// A scalar value equal to the total gas used in transactions in this block; formally Hg.
#[serde(with = "alloy::serde::quantity")]
pub gas_used: u64,
/// A scalar value equal to the reasonable output of Unixs time() at this blocks inception;
/// formally Hs.
#[serde(with = "alloy::serde::quantity")]
pub timestamp: u64,
/// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or
/// fewer; formally Hx.
pub extra_data: Bytes,
/// A 256-bit hash which, combined with the
/// nonce, proves that a sufficient amount of computation has been carried out on this block;
/// formally Hm.
pub mix_hash: B256,
/// A 64-bit value which, combined with the mixhash, proves that a sufficient amount of
/// computation has been carried out on this block; formally Hn.
pub nonce: B64,
/// A scalar representing EIP1559 base fee which can move up or down each block according
/// to a formula which is a function of gas used in parent block and gas target
/// (block gas limit divided by elasticity multiplier) of parent block.
/// The algorithm results in the base fee per gas increasing when blocks are
/// above the gas target, and decreasing when blocks are below the gas target. The base fee per
/// gas is burned.
#[serde(
default,
with = "alloy::serde::quantity::opt",
skip_serializing_if = "Option::is_none"
)]
pub base_fee_per_gas: Option<u64>,
/// The Keccak 256-bit hash of the withdrawals list portion of this block.
/// <https://eips.ethereum.org/EIPS/eip-4895>
#[serde(default, skip_serializing_if = "Option::is_none")]
pub withdrawals_root: Option<B256>,
/// The total amount of blob gas consumed by the transactions within the block, added in
/// EIP-4844.
#[serde(
default,
with = "alloy::serde::quantity::opt",
skip_serializing_if = "Option::is_none"
)]
pub blob_gas_used: Option<u64>,
/// A running total of blob gas consumed in excess of the target, prior to the block. Blocks
/// with above-target blob gas consumption increase this value, blocks with below-target blob
/// gas consumption decrease it (bounded at 0). This was added in EIP-4844.
#[serde(
default,
with = "alloy::serde::quantity::opt",
skip_serializing_if = "Option::is_none"
)]
pub excess_blob_gas: Option<u64>,
/// The hash of the parent beacon block's root is included in execution blocks, as proposed by
/// EIP-4788.
///
/// This enables trust-minimized access to consensus state, supporting staking pools, bridges,
/// and more.
///
/// The beacon roots contract handles root storage, enhancing Ethereum's functionalities.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parent_beacon_block_root: Option<B256>,
/// The Keccak 256-bit hash of the an RLP encoded list with each
/// [EIP-7685] request in the block body.
///
/// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685
#[serde(default, skip_serializing_if = "Option::is_none")]
pub requests_hash: Option<B256>,
}
impl BlockHeader for ReviveHeader {
fn parent_hash(&self) -> B256 {
self.parent_hash
}
fn ommers_hash(&self) -> B256 {
self.ommers_hash
}
fn beneficiary(&self) -> Address {
self.beneficiary
}
fn state_root(&self) -> B256 {
self.state_root
}
fn transactions_root(&self) -> B256 {
self.transactions_root
}
fn receipts_root(&self) -> B256 {
self.receipts_root
}
fn withdrawals_root(&self) -> Option<B256> {
self.withdrawals_root
}
fn logs_bloom(&self) -> Bloom {
self.logs_bloom
}
fn difficulty(&self) -> U256 {
self.difficulty
}
fn number(&self) -> BlockNumber {
self.number
}
// There's sadly nothing that we can do about this. We're required to implement this trait on
// any type that represents a header and the gas limit type used here is a u64.
fn gas_limit(&self) -> u64 {
self.gas_limit.try_into().unwrap_or(u64::MAX)
}
fn gas_used(&self) -> u64 {
self.gas_used
}
fn timestamp(&self) -> u64 {
self.timestamp
}
fn mix_hash(&self) -> Option<B256> {
Some(self.mix_hash)
}
fn nonce(&self) -> Option<B64> {
Some(self.nonce)
}
fn base_fee_per_gas(&self) -> Option<u64> {
self.base_fee_per_gas
}
fn blob_gas_used(&self) -> Option<u64> {
self.blob_gas_used
}
fn excess_blob_gas(&self) -> Option<u64> {
self.excess_blob_gas
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.parent_beacon_block_root
}
fn requests_hash(&self) -> Option<B256> {
self.requests_hash
}
fn extra_data(&self) -> &Bytes {
&self.extra_data
}
}
#[cfg(test)]
mod tests {
use alloy::rpc::types::TransactionRequest;
@@ -1170,7 +794,9 @@ mod tests {
let mut node = SubstrateNode::new(
context.kitchensink_configuration.path.clone(),
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
&context,
&[],
);
node.init(context.genesis_configuration.genesis().unwrap().clone())
.expect("Failed to initialize the node")
@@ -1235,7 +861,9 @@ mod tests {
let mut dummy_node = SubstrateNode::new(
context.kitchensink_configuration.path.clone(),
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
None,
&context,
&[],
);
// Call `init()`
@@ -1269,49 +897,6 @@ mod tests {
);
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn test_parse_genesis_alloc() {
// Create test genesis file
let genesis_json = r#"
{
"alloc": {
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": { "balance": "1000000000000000000" },
"0x0000000000000000000000000000000000000000": { "balance": "0xDE0B6B3A7640000" },
"0xffffffffffffffffffffffffffffffffffffffff": { "balance": "123456789" }
}
}
"#;
let context = test_config();
let node = SubstrateNode::new(
context.kitchensink_configuration.path.clone(),
SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND,
&context,
);
let result = node
.extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap())
.unwrap();
let result_map: std::collections::HashMap<_, _> = result.into_iter().collect();
assert_eq!(
result_map.get("5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4"),
Some(&123_456_789u128)
);
}
#[test]
#[ignore = "Ignored since they take a long time to run"]
fn print_eth_to_substrate_mappings() {
+184 -250
View File
@@ -28,7 +28,7 @@
use std::{
fs::{create_dir_all, remove_dir_all},
path::PathBuf,
path::{Path, PathBuf},
pin::Pin,
process::{Command, Stdio},
sync::{
@@ -40,7 +40,7 @@ use std::{
use alloy::{
eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount},
genesis::Genesis,
network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
providers::{
@@ -55,15 +55,16 @@ use alloy::{
};
use anyhow::Context as _;
use futures::{Stream, StreamExt};
use futures::{FutureExt, Stream, StreamExt};
use revive_common::EVMVersion;
use revive_dt_common::fs::clear_directory;
use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation};
use serde_json::{Value as JsonValue, json};
use serde_json::json;
use sp_core::crypto::Ss58Codec;
use sp_runtime::AccountId32;
use subxt::{OnlineClient, SubstrateConfig};
use tokio::sync::OnceCell;
use tracing::instrument;
use zombienet_sdk::{LocalFileSystem, NetworkConfigBuilder, NetworkConfigExt};
@@ -72,17 +73,19 @@ use crate::{
Node,
constants::INITIAL_BALANCE,
helpers::{Process, ProcessReadinessWaitBehavior},
node_implementations::substrate::ReviveNetwork,
provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider},
provider_utils::{
ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider,
execute_transaction,
},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
/// A Zombienet network where collator is `polkadot-parachain` node with `eth-rpc`
/// [`ZombieNode`] abstracts away the details of managing the zombienet network and provides
/// an interface to interact with the parachain's Ethereum RPC.
/// A Zombienet network where collator is `polkadot-parachain` node with `eth-rpc` [`ZombieNode`]
/// abstracts away the details of managing the zombienet network and provides an interface to
/// interact with the parachain's Ethereum RPC.
#[derive(Debug, Default)]
pub struct ZombieNode {
pub struct ZombienetNode {
/* Node Identifier */
id: u32,
connection_string: String,
@@ -107,10 +110,10 @@ pub struct ZombieNode {
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>>,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
}
impl ZombieNode {
impl ZombienetNode {
const BASE_DIRECTORY: &str = "zombienet";
const DATA_DIRECTORY: &str = "data";
const LOGS_DIRECTORY: &str = "logs";
@@ -119,6 +122,8 @@ impl ZombieNode {
const PARACHAIN_ID: u32 = 100;
const ETH_RPC_BASE_PORT: u16 = 8545;
const PROXY_LOG_ENV: &str = "info,eth-rpc=debug";
const ETH_RPC_READY_MARKER: &str = "Running JSON-RPC server";
const EXPORT_CHAINSPEC_COMMAND: &str = "build-spec";
@@ -159,7 +164,7 @@ impl ZombieNode {
}
}
fn init(&mut self, genesis: Genesis) -> anyhow::Result<&mut Self> {
fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> {
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
@@ -169,7 +174,7 @@ impl ZombieNode {
.context("Failed to create logs directory for zombie node")?;
let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
self.prepare_chainspec(template_chainspec_path.clone(), genesis)?;
self.prepare_chainspec(template_chainspec_path.clone())?;
let polkadot_parachain_path = self
.polkadot_parachain_path
.to_str()
@@ -178,25 +183,35 @@ impl ZombieNode {
let node_rpc_port = Self::NODE_BASE_RPC_PORT + self.id as u16;
let network_config = NetworkConfigBuilder::new()
.with_relaychain(|r| {
r.with_chain("westend-local")
.with_relaychain(|relay_chain| {
relay_chain
.with_chain("westend-local")
.with_default_command("polkadot")
.with_node(|node| node.with_name("alice"))
.with_node(|node| node.with_name("bob"))
})
.with_global_settings(|g| g.with_base_dir(&self.base_directory))
.with_parachain(|p| {
p.with_id(Self::PARACHAIN_ID)
.with_chain_spec_path(template_chainspec_path.to_str().unwrap())
.with_global_settings(|global_settings| {
// global_settings.with_base_dir(&self.base_directory)
global_settings
})
.with_parachain(|parachain| {
parachain
.with_id(Self::PARACHAIN_ID)
.with_chain_spec_path(template_chainspec_path.to_path_buf())
.with_chain("asset-hub-westend-local")
.with_collator(|n| {
n.with_name("Collator")
.with_collator(|node_config| {
node_config
.with_name("Collator")
.with_command(polkadot_parachain_path)
.with_rpc_port(node_rpc_port)
.with_args(vec![
("--pool-limit", u32::MAX.to_string().as_str()).into(),
("--pool-kbytes", u32::MAX.to_string().as_str()).into(),
])
})
})
.build()
.map_err(|e| anyhow::anyhow!("Failed to build zombienet network config: {e:?}"))?;
.map_err(|err| anyhow::anyhow!("Failed to build zombienet network config: {err:?}"))?;
self.node_rpc_port = Some(node_rpc_port);
self.network_config = Some(network_config);
@@ -210,6 +225,9 @@ impl ZombieNode {
.clone()
.context("Node not initialized, call init() first")?;
// TODO: Look into the possibility of removing this in the future, perhaps by reintroducing
// the blocking runtime abstraction and making it available to the entire program so that we
// don't need to be spawning multiple different runtimes.
let rt = tokio::runtime::Runtime::new().unwrap();
let network = rt.block_on(async {
network_config
@@ -237,6 +255,7 @@ impl ZombieNode {
.arg(u32::MAX.to_string())
.arg("--rpc-port")
.arg(eth_rpc_port.to_string())
.env("RUST_LOG", Self::PROXY_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
},
@@ -267,70 +286,9 @@ impl ZombieNode {
Ok(())
}
fn prepare_chainspec(
&mut self,
template_chainspec_path: PathBuf,
mut genesis: Genesis,
) -> anyhow::Result<()> {
let mut cmd: Command = std::process::Command::new(&self.polkadot_parachain_path);
cmd.arg(Self::EXPORT_CHAINSPEC_COMMAND)
.arg("--chain")
.arg("asset-hub-westend-local");
let output = cmd.output().context("Failed to export the chain-spec")?;
if !output.status.success() {
anyhow::bail!(
"Build chain-spec failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
let content = String::from_utf8(output.stdout)
.context("Failed to decode collators chain-spec output as UTF-8")?;
let mut chainspec_json: JsonValue =
serde_json::from_str(&content).context("Failed to parse collators chain spec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
.as_array()
.cloned()
.unwrap_or_default();
let mut merged_balances: Vec<(String, u128)> = existing_chainspec_balances
.into_iter()
.filter_map(|val| {
if let Some(arr) = val.as_array() {
if arr.len() == 2 {
let account = arr[0].as_str()?.to_string();
let balance = arr[1].as_f64()? as u128;
return Some((account, balance));
}
}
None
})
.collect();
let mut eth_balances = {
for signer_address in
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet)
{
// Note, the use of the entry API here means that we only modify the entries for any
// account that is not in the `alloc` field of the genesis state.
genesis
.alloc
.entry(signer_address)
.or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE)));
}
self.extract_balance_from_genesis_file(&genesis)
.context("Failed to extract balances from EVM genesis JSON")?
};
merged_balances.append(&mut eth_balances);
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] =
json!(merged_balances);
fn prepare_chainspec(&mut self, template_chainspec_path: PathBuf) -> anyhow::Result<()> {
let chainspec_json = Self::node_genesis(&self.polkadot_parachain_path, &self.wallet)
.context("Failed to prepare the zombienet chainspec file")?;
let writer = std::fs::File::create(&template_chainspec_path)
.context("Failed to create template chainspec file")?;
@@ -340,21 +298,6 @@ impl ZombieNode {
Ok(())
}
fn extract_balance_from_genesis_file(
&self,
genesis: &Genesis,
) -> anyhow::Result<Vec<(String, u128)>> {
genesis
.alloc
.iter()
.try_fold(Vec::new(), |mut vec, (address, acc)| {
let polkadot_address = Self::eth_to_polkadot_address(address);
let balance = acc.balance.try_into()?;
vec.push((polkadot_address, balance));
Ok(vec)
})
}
fn eth_to_polkadot_address(address: &Address) -> String {
let eth_bytes = address.0.0;
@@ -378,14 +321,12 @@ impl ZombieNode {
Ok(String::from_utf8_lossy(&output).trim().to_string())
}
async fn provider(
&self,
) -> anyhow::Result<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>> {
async fn provider(&self) -> anyhow::Result<ConcreteProvider<Ethereum, Arc<EthereumWallet>>> {
self.provider
.get_or_try_init(|| async move {
construct_concurrency_limited_provider::<ReviveNetwork, _>(
construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(),
FallbackGasFiller::new(250_000_000, 5_000_000_000, 1_000_000_000),
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
ChainIdFiller::default(), // TODO: use CHAIN_ID constant
NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(),
@@ -396,9 +337,47 @@ impl ZombieNode {
.await
.cloned()
}
pub fn node_genesis(
node_path: &Path,
wallet: &EthereumWallet,
) -> anyhow::Result<serde_json::Value> {
let output = Command::new(node_path)
.arg(Self::EXPORT_CHAINSPEC_COMMAND)
.arg("--chain")
.arg("asset-hub-westend-local")
.env_remove("RUST_LOG")
.output()
.context("Failed to export the chainspec of the chain")?;
if !output.status.success() {
anyhow::bail!(
"Substrate-node export-chain-spec failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
let content = String::from_utf8(output.stdout)
.context("Failed to decode Substrate export-chain-spec output as UTF-8")?;
let mut chainspec_json = serde_json::from_str::<serde_json::Value>(&content)
.context("Failed to parse Substrate chain spec JSON")?;
let existing_chainspec_balances =
chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"]
.as_array_mut()
.expect("Can't fail");
for address in NetworkWallet::<Ethereum>::signer_addresses(wallet) {
let substrate_address = Self::eth_to_polkadot_address(&address);
let balance = INITIAL_BALANCE;
existing_chainspec_balances.push(json!((substrate_address, balance)));
}
Ok(chainspec_json)
}
}
impl EthereumNode for ZombieNode {
impl EthereumNode for ZombienetNode {
fn pre_transactions(&mut self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + '_>> {
Box::pin(async move { Ok(()) })
}
@@ -448,17 +427,11 @@ impl EthereumNode for ZombieNode {
transaction: alloy::rpc::types::TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move {
let receipt = self
let provider = self
.provider()
.await
.context("Failed to create provider for transaction submission")?
.send_transaction(transaction)
.await
.context("Failed to submit transaction to proxy")?
.get_receipt()
.await
.context("Failed to fetch transaction receipt from proxy")?;
Ok(receipt)
.context("Failed to create the provider")?;
execute_transaction(provider, transaction).await
})
}
@@ -552,49 +525,99 @@ impl EthereumNode for ZombieNode {
+ '_,
>,
> {
Box::pin(async move {
let provider = self
.provider()
.await
.context("Failed to create the provider for block subscription")?;
let mut block_subscription = provider
.watch_full_blocks()
.await
.context("Failed to create the blocks stream")?;
block_subscription.set_channel_size(0xFFFF);
block_subscription.set_poll_interval(Duration::from_secs(1));
let block_stream = block_subscription.into_stream();
#[subxt::subxt(runtime_metadata_path = "../../assets/revive_metadata.scale")]
pub mod revive {}
let mined_block_information_stream = block_stream.filter_map(|block| async {
let block = block.ok()?;
Some(MinedBlockInformation {
block_number: block.number(),
block_timestamp: block.header.timestamp,
mined_gas: block.header.gas_used as _,
block_gas_limit: block.header.gas_limit,
transaction_hashes: block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
})
Box::pin(async move {
let substrate_rpc_url = format!("ws://127.0.0.1:{}", self.node_rpc_port.unwrap());
let api = OnlineClient::<SubstrateConfig>::from_url(substrate_rpc_url)
.await
.context("Failed to create subxt rpc client")?;
let provider = self.provider().await.context("Failed to create provider")?;
let block_stream = api
.blocks()
.subscribe_all()
.await
.context("Failed to subscribe to blocks")?;
let mined_block_information_stream = block_stream.filter_map(move |block| {
let api = api.clone();
let provider = provider.clone();
async move {
let substrate_block = block.ok()?;
let revive_block = provider
.get_block_by_number(
BlockNumberOrTag::Number(substrate_block.number() as _),
)
.await
.expect("TODO: Remove")
.expect("TODO: Remove");
let used = api
.storage()
.at(substrate_block.reference())
.fetch_or_default(&revive::storage().system().block_weight())
.await
.expect("TODO: Remove");
let block_ref_time = (used.normal.ref_time as u128)
+ (used.operational.ref_time as u128)
+ (used.mandatory.ref_time as u128);
let block_proof_size = (used.normal.proof_size as u128)
+ (used.operational.proof_size as u128)
+ (used.mandatory.proof_size as u128);
let limits = api
.constants()
.at(&revive::constants().system().block_weights())
.expect("TODO: Remove");
let max_ref_time = limits.max_block.ref_time;
let max_proof_size = limits.max_block.proof_size;
Some(MinedBlockInformation {
block_number: substrate_block.number() as _,
block_timestamp: revive_block.header.timestamp,
mined_gas: revive_block.header.gas_used as _,
block_gas_limit: revive_block.header.gas_limit as _,
transaction_hashes: revive_block
.transactions
.into_hashes()
.as_hashes()
.expect("Must be hashes")
.to_vec(),
ref_time: block_ref_time,
max_ref_time,
proof_size: block_proof_size,
max_proof_size,
})
}
});
Ok(Box::pin(mined_block_information_stream)
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
})
}
fn provider(
&self,
) -> Pin<Box<dyn Future<Output = anyhow::Result<alloy::providers::DynProvider<Ethereum>>> + '_>>
{
Box::pin(
self.provider()
.map(|provider| provider.map(|provider| provider.erased())),
)
}
}
pub struct ZombieNodeResolver<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> {
pub struct ZombieNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
id: u32,
provider: FillProvider<F, P, ReviveNetwork>,
provider: FillProvider<F, P, Ethereum>,
}
impl<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> ResolverApi
for ZombieNodeResolver<F, P>
{
impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi for ZombieNodeResolver<F, P> {
#[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))]
fn chain_id(
&self,
@@ -717,7 +740,7 @@ impl<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> ResolverApi
}
}
impl Node for ZombieNode {
impl Node for ZombienetNode {
fn shutdown(&mut self) -> anyhow::Result<()> {
// Kill the eth_rpc process
drop(self.eth_rpc_process.take());
@@ -762,7 +785,7 @@ impl Node for ZombieNode {
}
}
impl Drop for ZombieNode {
impl Drop for ZombienetNode {
fn drop(&mut self) {
let _ = self.shutdown();
}
@@ -786,9 +809,9 @@ mod tests {
TestExecutionContext::default()
}
pub async fn new_node() -> (TestExecutionContext, ZombieNode) {
pub async fn new_node() -> (TestExecutionContext, ZombienetNode) {
let context = test_config();
let mut node = ZombieNode::new(
let mut node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
@@ -806,8 +829,9 @@ mod tests {
(context, node)
}
pub async fn shared_state() -> &'static (TestExecutionContext, Arc<ZombieNode>) {
static NODE: OnceCell<(TestExecutionContext, Arc<ZombieNode>)> = OnceCell::const_new();
pub async fn shared_state() -> &'static (TestExecutionContext, Arc<ZombienetNode>) {
static NODE: OnceCell<(TestExecutionContext, Arc<ZombienetNode>)> =
OnceCell::const_new();
NODE.get_or_init(|| async {
let (context, node) = new_node().await;
@@ -816,13 +840,14 @@ mod tests {
.await
}
pub async fn shared_node() -> &'static Arc<ZombieNode> {
pub async fn shared_node() -> &'static Arc<ZombienetNode> {
&shared_state().await.1
}
}
use utils::{new_node, test_config};
#[tokio::test]
#[ignore = "Ignored for the time being"]
async fn test_transfer_transaction_should_return_receipt() {
let (ctx, node) = new_node().await;
@@ -840,97 +865,6 @@ mod tests {
.expect("Failed to get the receipt for the transfer");
}
#[tokio::test]
async fn test_init_generates_chainspec_with_balances() {
let genesis_content = r#"
{
"alloc": {
"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": {
"balance": "1000000000000000000"
},
"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2": {
"balance": "2000000000000000000"
}
}
}
"#;
let context = test_config();
let mut node = ZombieNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
// Call `init()`
node.init(serde_json::from_str(genesis_content).unwrap())
.expect("init failed");
// Check that the patched chainspec file was generated
let final_chainspec_path = node.base_directory.join(ZombieNode::CHAIN_SPEC_JSON_FILE);
assert!(final_chainspec_path.exists(), "Chainspec file should exist");
let contents =
std::fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec");
// Validate that the Polkadot addresses derived from the Ethereum addresses are in the file
let first_eth_addr = ZombieNode::eth_to_polkadot_address(
&"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap(),
);
let second_eth_addr = ZombieNode::eth_to_polkadot_address(
&"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2".parse().unwrap(),
);
assert!(
contents.contains(&first_eth_addr),
"Chainspec should contain Polkadot address for first Ethereum account"
);
assert!(
contents.contains(&second_eth_addr),
"Chainspec should contain Polkadot address for second Ethereum account"
);
}
#[tokio::test]
async fn test_parse_genesis_alloc() {
// Create test genesis file
let genesis_json = r#"
{
"alloc": {
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": { "balance": "1000000000000000000" },
"0x0000000000000000000000000000000000000000": { "balance": "0xDE0B6B3A7640000" },
"0xffffffffffffffffffffffffffffffffffffffff": { "balance": "123456789" }
}
}
"#;
let context = test_config();
let node = ZombieNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
let result = node
.extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap())
.unwrap();
let result_map: std::collections::HashMap<_, _> = result.into_iter().collect();
assert_eq!(
result_map.get("5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1"),
Some(&1_000_000_000_000_000_000u128)
);
assert_eq!(
result_map.get("5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4"),
Some(&123_456_789u128)
);
}
#[test]
fn print_eth_to_polkadot_mappings() {
let eth_addresses = vec![
@@ -940,7 +874,7 @@ mod tests {
];
for eth_addr in eth_addresses {
let ss58 = ZombieNode::eth_to_polkadot_address(&eth_addr.parse().unwrap());
let ss58 = ZombienetNode::eth_to_polkadot_address(&eth_addr.parse().unwrap());
println!("Ethereum: {eth_addr} -> Polkadot SS58: {ss58}");
}
@@ -968,7 +902,7 @@ mod tests {
];
for (eth_addr, expected_ss58) in cases {
let result = ZombieNode::eth_to_polkadot_address(&eth_addr.parse().unwrap());
let result = ZombienetNode::eth_to_polkadot_address(&eth_addr.parse().unwrap());
assert_eq!(
result, expected_ss58,
"Mismatch for Ethereum address {eth_addr}"
@@ -980,7 +914,7 @@ mod tests {
fn eth_rpc_version_works() {
// Arrange
let context = test_config();
let node = ZombieNode::new(
let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
@@ -999,7 +933,7 @@ mod tests {
fn version_works() {
// Arrange
let context = test_config();
let node = ZombieNode::new(
let node = ZombienetNode::new(
context.polkadot_parachain_configuration.path.clone(),
&context,
);
@@ -7,6 +7,10 @@ use alloy::{
transports::TransportResult,
};
// Percentage padding applied to estimated gas (e.g. 120 = 20% padding)
const GAS_ESTIMATE_PADDING_NUMERATOR: u64 = 120;
const GAS_ESTIMATE_PADDING_DENOMINATOR: u64 = 100;
#[derive(Clone, Debug)]
pub struct FallbackGasFiller {
inner: GasFiller,
@@ -56,8 +60,6 @@ where
provider: &P,
tx: &<N as Network>::TransactionRequest,
) -> TransportResult<Self::Fillable> {
// Try to fetch GasFillers “fillable” (gas_price, base_fee, estimate_gas, …)
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it.
match self.inner.prepare(provider, tx).await {
Ok(fill) => Ok(Some(fill)),
Err(_) => Ok(None),
@@ -70,8 +72,17 @@ where
mut tx: alloy::providers::SendableTx<N>,
) -> TransportResult<SendableTx<N>> {
if let Some(fill) = fillable {
// our inner GasFiller succeeded — use it
self.inner.fill(fill, tx).await
let mut tx = self.inner.fill(fill, tx).await?;
if let Some(builder) = tx.as_mut_builder() {
if let Some(estimated) = builder.gas_limit() {
let padded = estimated
.checked_mul(GAS_ESTIMATE_PADDING_NUMERATOR)
.and_then(|v| v.checked_div(GAS_ESTIMATE_PADDING_DENOMINATOR))
.unwrap_or(u64::MAX);
builder.set_gas_limit(padded);
}
}
Ok(tx)
} else {
if let Some(builder) = tx.as_mut_builder() {
builder.set_gas_limit(self.default_gas_limit);
+2 -2
View File
@@ -1,7 +1,7 @@
mod concurrency_limiter;
mod fallback_gas_provider;
mod fallback_gas_filler;
mod provider;
pub use concurrency_limiter::*;
pub use fallback_gas_provider::*;
pub use fallback_gas_filler::*;
pub use provider::*;
+7 -3
View File
@@ -10,7 +10,7 @@ use alloy::{
};
use anyhow::{Context, Result};
use revive_dt_common::futures::{PollingWaitBehavior, poll};
use tracing::debug;
use tracing::{Instrument, debug, info, info_span};
use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller};
@@ -44,7 +44,7 @@ where
// requests at any point of time and no more than that. This is done in an effort to stabilize
// the framework from some of the interment issues that we've been seeing related to RPC calls.
static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock<ConcurrencyLimiterLayer> =
LazyLock::new(|| ConcurrencyLimiterLayer::new(10));
LazyLock::new(|| ConcurrencyLimiterLayer::new(500));
let client = ClientBuilder::default()
.layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone())
@@ -117,12 +117,16 @@ where
async move {
match provider.get_transaction_receipt(tx_hash).await {
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
Ok(Some(receipt)) => {
info!("Found the transaction receipt");
Ok(ControlFlow::Break(receipt))
}
_ => Ok(ControlFlow::Continue(())),
}
}
},
)
.instrument(info_span!("Polling for receipt", %tx_hash))
.await
.context(format!("Polling for receipt failed for {tx_hash}"))
}
+14 -13
View File
@@ -11,12 +11,12 @@ use std::{
use alloy::primitives::Address;
use anyhow::{Context as _, Result};
use indexmap::IndexMap;
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_common::types::{ParsedTestSpecifier, PlatformIdentifier};
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
use revive_dt_config::Context;
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
use revive_dt_format::{case::CaseIdx, metadata::ContractInstance};
use semver::Version;
use serde::Serialize;
use serde::{Deserialize, Serialize};
use serde_with::{DisplayFromStr, serde_as};
use tokio::sync::{
broadcast::{Sender, channel},
@@ -67,7 +67,7 @@ impl ReportAggregator {
RunnerEvent::SubscribeToEvents(event) => {
self.handle_subscribe_to_events_event(*event);
}
RunnerEvent::CorpusFileDiscovery(event) => {
RunnerEvent::CorpusDiscovery(event) => {
self.handle_corpus_file_discovered_event(*event)
}
RunnerEvent::MetadataFileDiscovery(event) => {
@@ -152,8 +152,8 @@ impl ReportAggregator {
let _ = event.tx.send(self.listener_tx.subscribe());
}
fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) {
self.report.corpora.push(event.corpus);
fn handle_corpus_file_discovered_event(&mut self, event: CorpusDiscoveryEvent) {
self.report.corpora.extend(event.test_specifiers);
}
fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) {
@@ -415,12 +415,13 @@ impl ReportAggregator {
}
#[serde_as]
#[derive(Clone, Debug, Serialize)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Report {
/// The context that the tool was started up with.
pub context: Context,
/// The list of corpus files that the tool found.
pub corpora: Vec<Corpus>,
#[serde_as(as = "Vec<DisplayFromStr>")]
pub corpora: Vec<ParsedTestSpecifier>,
/// The list of metadata files that were found by the tool.
pub metadata_files: BTreeSet<MetadataFilePath>,
/// Information relating to each test case.
@@ -440,7 +441,7 @@ impl Report {
}
}
#[derive(Clone, Debug, Serialize, Default)]
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct TestCaseReport {
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
#[serde(skip_serializing_if = "Option::is_none")]
@@ -451,7 +452,7 @@ pub struct TestCaseReport {
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
/// it was ignored.
#[derive(Clone, Debug, Serialize)]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "status")]
pub enum TestCaseStatus {
/// The test case succeeded.
@@ -475,7 +476,7 @@ pub enum TestCaseStatus {
}
/// Information related to the platform node that's being used to execute the step.
#[derive(Clone, Debug, Serialize)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TestCaseNodeInformation {
/// The ID of the node that this case is being executed on.
pub id: usize,
@@ -486,7 +487,7 @@ pub struct TestCaseNodeInformation {
}
/// Execution information tied to the platform.
#[derive(Clone, Debug, Default, Serialize)]
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ExecutionInformation {
/// Information related to the node assigned to this test case.
#[serde(skip_serializing_if = "Option::is_none")]
@@ -506,7 +507,7 @@ pub struct ExecutionInformation {
}
/// Information related to compilation
#[derive(Clone, Debug, Serialize)]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "status")]
pub enum CompilationStatus {
/// The compilation was successful.
+4 -3
View File
@@ -6,10 +6,11 @@ use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
use alloy::primitives::Address;
use anyhow::Context as _;
use indexmap::IndexMap;
use revive_dt_common::types::ParsedTestSpecifier;
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_format::metadata::ContractInstance;
use revive_dt_format::metadata::Metadata;
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
use semver::Version;
use tokio::sync::{broadcast, oneshot};
@@ -481,9 +482,9 @@ define_event! {
tx: oneshot::Sender<broadcast::Receiver<ReporterEvent>>
},
/// An event emitted by runners when they've discovered a corpus file.
CorpusFileDiscovery {
CorpusDiscovery {
/// The contents of the corpus file.
corpus: Corpus
test_specifiers: Vec<ParsedTestSpecifier>
},
/// An event emitted by runners when they've discovered a metadata file.
MetadataFileDiscovery {
+5 -21
View File
@@ -1,7 +1,7 @@
#!/bin/bash
# Revive Differential Tests - Quick Start Script
# This script clones the test repository, sets up the corpus file, and runs the tool
# This script clones the test repository, and runs the tool
set -e # Exit on any error
@@ -14,7 +14,6 @@ NC='\033[0m' # No Color
# Configuration
TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests"
TEST_REPO_DIR="resolc-compiler-tests"
CORPUS_FILE="./corpus.json"
WORKDIR="workdir"
# Optional positional argument: path to polkadot-sdk directory
@@ -68,21 +67,6 @@ else
echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}"
fi
# Create corpus file with absolute path resolved at runtime
echo -e "${GREEN}Creating corpus file...${NC}"
ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/")
cat > "$CORPUS_FILE" << EOF
{
"name": "MatterLabs Solidity Simple, Complex, and Semantic Tests",
"paths": [
"$(realpath "$TEST_REPO_DIR/fixtures/solidity/simple")"
]
}
EOF
echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}"
# Create workdir if it doesn't exist
mkdir -p "$WORKDIR"
@@ -93,17 +77,17 @@ echo ""
# Run the tool
cargo build --release;
RUST_LOG="info,alloy_pubsub::service=error" ./target/release/retester test \
--platform revive-dev-node-revm-solc \
--corpus "$CORPUS_FILE" \
--platform revive-dev-node-polkavm-resolc \
--test $(realpath "$TEST_REPO_DIR/fixtures/solidity") \
--working-directory "$WORKDIR" \
--concurrency.number-of-nodes 10 \
--concurrency.number-of-threads 5 \
--concurrency.number-of-concurrent-tasks 1000 \
--concurrency.number-of-concurrent-tasks 500 \
--wallet.additional-keys 100000 \
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
--eth-rpc.path "$ETH_RPC_BIN" \
> logs.log \
2> output.log
2> output.log
echo -e "${GREEN}=== Test run completed! ===${NC}"