Compare commits

...

16 Commits

Author SHA1 Message Date
Omar Abdulla 93f6e5efdf Increase the wait duration of kurtosis 2025-09-28 15:33:20 +03:00
Omar Abdulla ecd6252666 Skip lighthouse tests in MacOS in CI 2025-09-28 15:19:52 +03:00
Omar Abdulla 1d1642887b fix ci 2025-09-28 15:07:04 +03:00
Omar Abdulla db0522aa71 Install kurtosis in cli 2025-09-28 14:59:16 +03:00
Omar Abdulla ca5cad8e43 Add the cargo clippy component 2025-09-28 14:49:22 +03:00
Omar Abdulla 33911b5ce3 Fix CI 2025-09-28 14:25:47 +03:00
Omar Abdulla 5b730d914e Add formatting component for macos 2025-09-28 14:18:16 +03:00
Omar Abdulla 9f7a314b20 Add rustfmt to ci 2025-09-28 14:13:30 +03:00
Omar Abdulla 5cd3dd8c83 Update the ci to include cargo fmt 2025-09-28 14:03:46 +03:00
Omar Abdulla 8d15f87ff0 Connect the lighthouse node with the platforms 2025-09-26 18:34:41 +03:00
Omar Abdulla 566dd06d9a Implement production geth using kurtosis 2025-09-26 18:15:11 +03:00
Omar Abdulla 5c30e8a5bf Add a lighthouse node implementation 2025-09-24 17:13:52 +03:00
Omar c2ba2cfed6 Cleanup Processes (#171)
* Clean up the process flow for nodes

* Cleanup some of the node tests to use shared nodes

* Commit dev-genesis
2025-09-24 02:47:36 +00:00
Omar 3dda739cef Add step paths (#163)
* Support repetitions in the tool

* Add support for account allocations

* Update the JSON schema

* Support virtual repeats

* Add a step path

* Update the schema
2025-09-22 03:53:16 +00:00
Omar 97e3f8bbff Support virtual repeats for benchmarks (#162)
* Support repetitions in the tool

* Add support for account allocations

* Update the JSON schema

* Support virtual repeats
2025-09-22 03:35:31 +00:00
Omar 7189361a58 Allow account allocations (#161)
* Support repetitions in the tool

* Add support for account allocations

* Update the JSON schema
2025-09-22 03:19:55 +00:00
25 changed files with 2073 additions and 439 deletions
+32 -4
View File
@@ -91,10 +91,10 @@ jobs:
with: with:
rustflags: "" rustflags: ""
- name: Add wasm32 target - name: Add wasm32 target and formatting
run: | run: |
rustup target add wasm32-unknown-unknown rustup target add wasm32-unknown-unknown
rustup component add rust-src rustup component add rust-src rustfmt clippy
- name: Install Geth on Ubuntu - name: Install Geth on Ubuntu
if: matrix.os == 'ubuntu-24.04' if: matrix.os == 'ubuntu-24.04'
@@ -141,6 +141,17 @@ jobs:
chmod +x resolc chmod +x resolc
sudo mv resolc /usr/local/bin sudo mv resolc /usr/local/bin
- name: Install Kurtosis on macOS
if: matrix.os == 'macos-14'
run: brew install kurtosis-tech/tap/kurtosis-cli
- name: Install Kurtosis on Ubuntu
if: matrix.os == 'ubuntu-24.04'
run: |
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
sudo apt update
sudo apt install kurtosis-cli
- name: Machete - name: Machete
uses: bnjbvr/cargo-machete@v0.7.1 uses: bnjbvr/cargo-machete@v0.7.1
@@ -159,5 +170,22 @@ jobs:
- name: Check resolc version - name: Check resolc version
run: resolc --version run: resolc --version
- name: Test cargo workspace - name: Test Formatting
run: make test run: make format
- name: Test Clippy
run: make clippy
- name: Test Machete
run: make machete
- name: Unit Tests
if: matrix.os == 'ubuntu-24.04'
run: cargo test --workspace -- --nocapture
# We can't install docker in the MacOS image used in CI and therefore we need to skip the
# Kurtosis and lighthouse related tests when running the CI on MacOS.
- name: Unit Tests
if: matrix.os == 'macos-14'
run: |
cargo test --workspace -- --nocapture --skip lighthouse_geth::tests::
+2 -1
View File
@@ -12,4 +12,5 @@ profile.json.gz
resolc-compiler-tests resolc-compiler-tests
workdir workdir
!/schema.json !/schema.json
!/dev-genesis.json
Generated
+156
View File
@@ -87,6 +87,7 @@ dependencies = [
"alloy-transport", "alloy-transport",
"alloy-transport-http", "alloy-transport-http",
"alloy-transport-ipc", "alloy-transport-ipc",
"alloy-transport-ws",
] ]
[[package]] [[package]]
@@ -378,6 +379,7 @@ dependencies = [
"alloy-transport", "alloy-transport",
"alloy-transport-http", "alloy-transport-http",
"alloy-transport-ipc", "alloy-transport-ipc",
"alloy-transport-ws",
"async-stream", "async-stream",
"async-trait", "async-trait",
"auto_impl", "auto_impl",
@@ -454,6 +456,7 @@ dependencies = [
"alloy-transport", "alloy-transport",
"alloy-transport-http", "alloy-transport-http",
"alloy-transport-ipc", "alloy-transport-ipc",
"alloy-transport-ws",
"futures", "futures",
"pin-project", "pin-project",
"reqwest", "reqwest",
@@ -709,6 +712,24 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "alloy-transport-ws"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e915e1250dc129ad48d264573ccd08e4716fdda564a772fd217875b8459aff9"
dependencies = [
"alloy-pubsub",
"alloy-transport",
"futures",
"http",
"rustls",
"serde_json",
"tokio",
"tokio-tungstenite",
"tracing",
"ws_stream_wasm",
]
[[package]] [[package]]
name = "alloy-trie" name = "alloy-trie"
version = "0.9.0" version = "0.9.0"
@@ -1373,6 +1394,17 @@ dependencies = [
"syn 2.0.101", "syn 2.0.101",
] ]
[[package]]
name = "async_io_stream"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c"
dependencies = [
"futures",
"pharos",
"rustc_version 0.4.1",
]
[[package]] [[package]]
name = "atomic-waker" name = "atomic-waker"
version = "1.1.2" version = "1.1.2"
@@ -2021,6 +2053,12 @@ dependencies = [
"parking_lot_core", "parking_lot_core",
] ]
[[package]]
name = "data-encoding"
version = "2.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476"
[[package]] [[package]]
name = "der" name = "der"
version = "0.7.10" version = "0.7.10"
@@ -3956,6 +3994,16 @@ dependencies = [
"ucd-trie", "ucd-trie",
] ]
[[package]]
name = "pharos"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414"
dependencies = [
"futures",
"rustc_version 0.4.1",
]
[[package]] [[package]]
name = "pin-project" name = "pin-project"
version = "1.1.10" version = "1.1.10"
@@ -4467,6 +4515,8 @@ dependencies = [
name = "revive-dt-common" name = "revive-dt-common"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"alloy",
"alloy-primitives",
"anyhow", "anyhow",
"clap", "clap",
"moka", "moka",
@@ -4575,6 +4625,8 @@ dependencies = [
"revive-dt-node-interaction", "revive-dt-node-interaction",
"serde", "serde",
"serde_json", "serde_json",
"serde_with",
"serde_yaml_ng",
"sp-core", "sp-core",
"sp-runtime", "sp-runtime",
"temp-dir", "temp-dir",
@@ -4766,6 +4818,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321"
dependencies = [ dependencies = [
"once_cell", "once_cell",
"ring",
"rustls-pki-types", "rustls-pki-types",
"rustls-webpki", "rustls-webpki",
"subtle", "subtle",
@@ -5053,6 +5106,12 @@ dependencies = [
"pest", "pest",
] ]
[[package]]
name = "send_wrapper"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73"
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.219" version = "1.0.219"
@@ -5169,6 +5228,19 @@ dependencies = [
"syn 2.0.101", "syn 2.0.101",
] ]
[[package]]
name = "serde_yaml_ng"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b4db627b98b36d4203a7b458cf3573730f2bb591b28871d916dfa9efabfd41f"
dependencies = [
"indexmap 2.10.0",
"itoa",
"ryu",
"serde",
"unsafe-libyaml",
]
[[package]] [[package]]
name = "serdect" name = "serdect"
version = "0.2.0" version = "0.2.0"
@@ -6065,6 +6137,22 @@ dependencies = [
"tokio-util", "tokio-util",
] ]
[[package]]
name = "tokio-tungstenite"
version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084"
dependencies = [
"futures-util",
"log",
"rustls",
"rustls-pki-types",
"tokio",
"tokio-rustls",
"tungstenite",
"webpki-roots 0.26.11",
]
[[package]] [[package]]
name = "tokio-util" name = "tokio-util"
version = "0.7.15" version = "0.7.15"
@@ -6279,6 +6367,25 @@ version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "tungstenite"
version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13"
dependencies = [
"bytes",
"data-encoding",
"http",
"httparse",
"log",
"rand 0.9.2",
"rustls",
"rustls-pki-types",
"sha1",
"thiserror 2.0.12",
"utf-8",
]
[[package]] [[package]]
name = "tuplex" name = "tuplex"
version = "0.1.2" version = "0.1.2"
@@ -6366,6 +6473,12 @@ version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
[[package]]
name = "unsafe-libyaml"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
[[package]] [[package]]
name = "untrusted" name = "untrusted"
version = "0.9.0" version = "0.9.0"
@@ -6383,6 +6496,12 @@ dependencies = [
"percent-encoding", "percent-encoding",
] ]
[[package]]
name = "utf-8"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9"
[[package]] [[package]]
name = "utf8_iter" name = "utf8_iter"
version = "1.0.4" version = "1.0.4"
@@ -6637,6 +6756,24 @@ dependencies = [
"wasm-bindgen", "wasm-bindgen",
] ]
[[package]]
name = "webpki-roots"
version = "0.26.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9"
dependencies = [
"webpki-roots 1.0.2",
]
[[package]]
name = "webpki-roots"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2"
dependencies = [
"rustls-pki-types",
]
[[package]] [[package]]
name = "widestring" name = "widestring"
version = "1.2.0" version = "1.2.0"
@@ -6975,6 +7112,25 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb"
[[package]]
name = "ws_stream_wasm"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c173014acad22e83f16403ee360115b38846fe754e735c5d9d3803fe70c6abc"
dependencies = [
"async_io_stream",
"futures",
"js-sys",
"log",
"pharos",
"rustc_version 0.4.1",
"send_wrapper",
"thiserror 2.0.12",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
]
[[package]] [[package]]
name = "wyz" name = "wyz"
version = "0.5.1" version = "0.5.1"
+3 -1
View File
@@ -45,7 +45,8 @@ serde_json = { version = "1.0", default-features = false, features = [
"std", "std",
"unbounded_depth", "unbounded_depth",
] } ] }
serde_with = { version = "3.14.0" } serde_with = { version = "3.14.0", features = ["hex"] }
serde_yaml_ng = { version = "0.10.0" }
sha2 = { version = "0.10.9" } sha2 = { version = "0.10.9" }
sp-core = "36.1.0" sp-core = "36.1.0"
sp-runtime = "41.1.0" sp-runtime = "41.1.0"
@@ -80,6 +81,7 @@ features = [
"json-abi", "json-abi",
"providers", "providers",
"provider-ipc", "provider-ipc",
"provider-ws",
"provider-debug-api", "provider-debug-api",
"reqwest", "reqwest",
"rpc-types", "rpc-types",
+1
View File
@@ -44,6 +44,7 @@ This section describes the required dependencies that this framework requires to
- ETH-RPC - All communication with Kitchensink is done through the ETH RPC. - ETH-RPC - All communication with Kitchensink is done through the ETH RPC.
- Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path. - Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path.
- Resolc - This is required to compile the contracts to PolkaVM bytecode. - Resolc - This is required to compile the contracts to PolkaVM bytecode.
- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers.
All of the above need to be installed and available in the path in order for the tool to work. All of the above need to be installed and available in the path in order for the tool to work.
+2
View File
@@ -9,6 +9,8 @@ repository.workspace = true
rust-version.workspace = true rust-version.workspace = true
[dependencies] [dependencies]
alloy = { workspace = true }
alloy-primitives = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
clap = { workspace = true } clap = { workspace = true }
moka = { workspace = true, features = ["sync"] } moka = { workspace = true, features = ["sync"] }
+4
View File
@@ -29,6 +29,8 @@ use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
pub enum PlatformIdentifier { pub enum PlatformIdentifier {
/// The Go-ethereum reference full node EVM implementation with the solc compiler. /// The Go-ethereum reference full node EVM implementation with the solc compiler.
GethEvmSolc, GethEvmSolc,
/// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler.
LighthouseGethEvmSolc,
/// The kitchensink node with the PolkaVM backend with the resolc compiler. /// The kitchensink node with the PolkaVM backend with the resolc compiler.
KitchensinkPolkavmResolc, KitchensinkPolkavmResolc,
/// The kitchensink node with the REVM backend with the solc compiler. /// The kitchensink node with the REVM backend with the solc compiler.
@@ -87,6 +89,8 @@ pub enum CompilerIdentifier {
pub enum NodeIdentifier { pub enum NodeIdentifier {
/// The go-ethereum node implementation. /// The go-ethereum node implementation.
Geth, Geth,
/// The go-ethereum node implementation.
LighthouseGeth,
/// The Kitchensink node implementation. /// The Kitchensink node implementation.
Kitchensink, Kitchensink,
/// The revive dev node implementation. /// The revive dev node implementation.
+2
View File
@@ -1,7 +1,9 @@
mod identifiers; mod identifiers;
mod mode; mod mode;
mod private_key_allocator;
mod version_or_requirement; mod version_or_requirement;
pub use identifiers::*; pub use identifiers::*;
pub use mode::*; pub use mode::*;
pub use private_key_allocator::*;
pub use version_or_requirement::*; pub use version_or_requirement::*;
@@ -0,0 +1,35 @@
use alloy::signers::local::PrivateKeySigner;
use alloy_primitives::U256;
use anyhow::{Result, bail};
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
/// sequentially and in order until the maximum private key specified is reached.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PrivateKeyAllocator {
/// The next private key to be returned by the allocator when requested.
next_private_key: U256,
/// The highest private key (exclusive) that can be returned by this allocator.
highest_private_key_exclusive: U256,
}
impl PrivateKeyAllocator {
/// Creates a new instance of the private key allocator.
pub fn new(highest_private_key_exclusive: U256) -> Self {
Self {
next_private_key: U256::ZERO,
highest_private_key_exclusive,
}
}
/// Allocates a new private key and errors out if the maximum private key has been reached.
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
if self.next_private_key >= self.highest_private_key_exclusive {
bail!("Attempted to allocate a private key but failed since all have been allocated");
};
let private_key =
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())?;
self.next_private_key += U256::ONE;
Ok(private_key)
}
}
+44 -6
View File
@@ -79,6 +79,15 @@ impl AsRef<GethConfiguration> for Context {
} }
} }
impl AsRef<KurtosisConfiguration> for Context {
fn as_ref(&self) -> &KurtosisConfiguration {
match self {
Self::ExecuteTests(context) => context.as_ref().as_ref(),
Self::ExportJsonSchema => unreachable!(),
}
}
}
impl AsRef<KitchensinkConfiguration> for Context { impl AsRef<KitchensinkConfiguration> for Context {
fn as_ref(&self) -> &KitchensinkConfiguration { fn as_ref(&self) -> &KitchensinkConfiguration {
match self { match self {
@@ -190,6 +199,10 @@ pub struct TestExecutionContext {
#[clap(flatten, next_help_heading = "Geth Configuration")] #[clap(flatten, next_help_heading = "Geth Configuration")]
pub geth_configuration: GethConfiguration, pub geth_configuration: GethConfiguration,
/// Configuration parameters for the lighthouse node.
#[clap(flatten, next_help_heading = "Lighthouse Configuration")]
pub lighthouse_configuration: KurtosisConfiguration,
/// Configuration parameters for the Kitchensink. /// Configuration parameters for the Kitchensink.
#[clap(flatten, next_help_heading = "Kitchensink Configuration")] #[clap(flatten, next_help_heading = "Kitchensink Configuration")]
pub kitchensink_configuration: KitchensinkConfiguration, pub kitchensink_configuration: KitchensinkConfiguration,
@@ -253,6 +266,12 @@ impl AsRef<GethConfiguration> for TestExecutionContext {
} }
} }
impl AsRef<KurtosisConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &KurtosisConfiguration {
&self.lighthouse_configuration
}
}
impl AsRef<KitchensinkConfiguration> for TestExecutionContext { impl AsRef<KitchensinkConfiguration> for TestExecutionContext {
fn as_ref(&self) -> &KitchensinkConfiguration { fn as_ref(&self) -> &KitchensinkConfiguration {
&self.kitchensink_configuration &self.kitchensink_configuration
@@ -335,12 +354,27 @@ pub struct GethConfiguration {
#[clap( #[clap(
id = "geth.start-timeout-ms", id = "geth.start-timeout-ms",
long = "geth.start-timeout-ms", long = "geth.start-timeout-ms",
default_value = "5000", default_value = "30000",
value_parser = parse_duration value_parser = parse_duration
)] )]
pub start_timeout_ms: Duration, pub start_timeout_ms: Duration,
} }
/// A set of configuration parameters for kurtosis.
#[derive(Clone, Debug, Parser, Serialize)]
pub struct KurtosisConfiguration {
/// Specifies the path of the kurtosis node to be used by the tool.
///
/// If this is not specified, then the tool assumes that it should use the kurtosis binary that's
/// provided in the user's $PATH.
#[clap(
id = "kurtosis.path",
long = "kurtosis.path",
default_value = "kurtosis"
)]
pub path: PathBuf,
}
/// A set of configuration parameters for Kitchensink. /// A set of configuration parameters for Kitchensink.
#[derive(Clone, Debug, Parser, Serialize)] #[derive(Clone, Debug, Parser, Serialize)]
pub struct KitchensinkConfiguration { pub struct KitchensinkConfiguration {
@@ -359,7 +393,7 @@ pub struct KitchensinkConfiguration {
#[clap( #[clap(
id = "kitchensink.start-timeout-ms", id = "kitchensink.start-timeout-ms",
long = "kitchensink.start-timeout-ms", long = "kitchensink.start-timeout-ms",
default_value = "5000", default_value = "30000",
value_parser = parse_duration value_parser = parse_duration
)] )]
pub start_timeout_ms: Duration, pub start_timeout_ms: Duration,
@@ -387,7 +421,7 @@ pub struct ReviveDevNodeConfiguration {
#[clap( #[clap(
id = "revive-dev-node.start-timeout-ms", id = "revive-dev-node.start-timeout-ms",
long = "revive-dev-node.start-timeout-ms", long = "revive-dev-node.start-timeout-ms",
default_value = "5000", default_value = "30000",
value_parser = parse_duration value_parser = parse_duration
)] )]
pub start_timeout_ms: Duration, pub start_timeout_ms: Duration,
@@ -407,7 +441,7 @@ pub struct EthRpcConfiguration {
#[clap( #[clap(
id = "eth-rpc.start-timeout-ms", id = "eth-rpc.start-timeout-ms",
long = "eth-rpc.start-timeout-ms", long = "eth-rpc.start-timeout-ms",
default_value = "5000", default_value = "30000",
value_parser = parse_duration value_parser = parse_duration
)] )]
pub start_timeout_ms: Duration, pub start_timeout_ms: Duration,
@@ -431,7 +465,7 @@ pub struct GenesisConfiguration {
impl GenesisConfiguration { impl GenesisConfiguration {
pub fn genesis(&self) -> anyhow::Result<&Genesis> { pub fn genesis(&self) -> anyhow::Result<&Genesis> {
static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| { static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| {
let genesis = include_str!("../../../genesis.json"); let genesis = include_str!("../../../assets/dev-genesis.json");
serde_json::from_str(genesis).unwrap() serde_json::from_str(genesis).unwrap()
}); });
@@ -465,7 +499,7 @@ pub struct WalletConfiguration {
/// This argument controls which private keys the nodes should have access to and be added to /// This argument controls which private keys the nodes should have access to and be added to
/// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set /// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set
/// of the node. /// of the node.
#[clap(long = "wallet.additional-keys", default_value_t = 100_000)] #[clap(long = "wallet.additional-keys", default_value_t = 200)]
additional_keys: usize, additional_keys: usize,
/// The wallet object that will be used. /// The wallet object that will be used.
@@ -490,6 +524,10 @@ impl WalletConfiguration {
}) })
.clone() .clone()
} }
pub fn highest_private_key_exclusive(&self) -> U256 {
U256::try_from(self.additional_keys).unwrap()
}
} }
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error> fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
+82 -60
View File
@@ -2,6 +2,7 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc;
use alloy::consensus::EMPTY_ROOT_HASH; use alloy::consensus::EMPTY_ROOT_HASH;
use alloy::hex; use alloy::hex;
@@ -17,22 +18,23 @@ use alloy::{
primitives::Address, primitives::Address,
rpc::types::{TransactionRequest, trace::geth::DiffMode}, rpc::types::{TransactionRequest, trace::geth::DiffMode},
}; };
use anyhow::Context as _; use anyhow::{Context as _, bail};
use futures::{TryStreamExt, future::try_join_all}; use futures::{TryStreamExt, future::try_join_all};
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_common::types::PlatformIdentifier; use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator};
use revive_dt_format::traits::{ResolutionContext, ResolverApi}; use revive_dt_format::traits::{ResolutionContext, ResolverApi};
use revive_dt_report::ExecutionSpecificReporter; use revive_dt_report::ExecutionSpecificReporter;
use semver::Version; use semver::Version;
use revive_dt_format::case::Case; use revive_dt_format::case::Case;
use revive_dt_format::input::{
BalanceAssertionStep, Calldata, EtherValue, Expected, ExpectedOutput, FunctionCallStep, Method,
StepIdx, StorageEmptyAssertionStep,
};
use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent}; use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent};
use revive_dt_format::{input::Step, metadata::Metadata}; use revive_dt_format::steps::{
BalanceAssertionStep, Calldata, EtherValue, Expected, ExpectedOutput, FunctionCallStep, Method,
StepIdx, StepPath, StorageEmptyAssertionStep,
};
use revive_dt_format::{metadata::Metadata, steps::Step};
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use tokio::sync::Mutex;
use tokio::try_join; use tokio::try_join;
use tracing::{Instrument, info, info_span, instrument}; use tracing::{Instrument, info, info_span, instrument};
@@ -53,6 +55,10 @@ pub struct CaseState {
/// The execution reporter. /// The execution reporter.
execution_reporter: ExecutionSpecificReporter, execution_reporter: ExecutionSpecificReporter,
/// The private key allocator used for this case state. This is an Arc Mutex to allow for the
/// state to be cloned and for all of the clones to refer to the same allocator.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
} }
impl CaseState { impl CaseState {
@@ -61,6 +67,7 @@ impl CaseState {
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
execution_reporter: ExecutionSpecificReporter, execution_reporter: ExecutionSpecificReporter,
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
) -> Self { ) -> Self {
Self { Self {
compiled_contracts, compiled_contracts,
@@ -68,6 +75,7 @@ impl CaseState {
variables: Default::default(), variables: Default::default(),
compiler_version, compiler_version,
execution_reporter, execution_reporter,
private_key_allocator,
} }
} }
@@ -75,6 +83,7 @@ impl CaseState {
&mut self, &mut self,
metadata: &Metadata, metadata: &Metadata,
step: &Step, step: &Step,
step_path: &StepPath,
node: &dyn EthereumNode, node: &dyn EthereumNode,
) -> anyhow::Result<StepOutput> { ) -> anyhow::Result<StepOutput> {
match step { match step {
@@ -102,12 +111,19 @@ impl CaseState {
metadata, metadata,
repetition_step.repeat, repetition_step.repeat,
&repetition_step.steps, &repetition_step.steps,
step_path,
node, node,
) )
.await .await
.context("Failed to handle the repetition step")?; .context("Failed to handle the repetition step")?;
Ok(StepOutput::Repetition) Ok(StepOutput::Repetition)
} }
Step::AllocateAccount(account_allocation) => {
self.handle_account_allocation(account_allocation.variable_name.as_str())
.await
.context("Failed to allocate account")?;
Ok(StepOutput::AccountAllocation)
}
} }
.inspect(|_| info!("Step Succeeded")) .inspect(|_| info!("Step Succeeded"))
} }
@@ -186,13 +202,15 @@ impl CaseState {
metadata: &Metadata, metadata: &Metadata,
repetitions: usize, repetitions: usize,
steps: &[Step], steps: &[Step],
step_path: &StepPath,
node: &dyn EthereumNode, node: &dyn EthereumNode,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let tasks = (0..repetitions).map(|_| { let tasks = (0..repetitions).map(|_| {
let mut state = self.clone(); let mut state = self.clone();
async move { async move {
for step in steps { for (step_idx, step) in steps.iter().enumerate() {
state.handle_step(metadata, step, node).await?; let step_path = step_path.append(step_idx);
state.handle_step(metadata, step, &step_path, node).await?;
} }
Ok::<(), anyhow::Error>(()) Ok::<(), anyhow::Error>(())
} }
@@ -201,6 +219,21 @@ impl CaseState {
Ok(()) Ok(())
} }
#[instrument(level = "info", name = "Handling Account Allocation", skip_all)]
pub async fn handle_account_allocation(&mut self, variable_name: &str) -> anyhow::Result<()> {
let Some(variable_name) = variable_name.strip_prefix("$VARIABLE:") else {
bail!("Account allocation must start with $VARIABLE:");
};
let private_key = self.private_key_allocator.lock().await.allocate()?;
let account = private_key.address();
let variable = U256::from_be_slice(account.0.as_slice());
self.variables.insert(variable_name.to_string(), variable);
Ok(())
}
/// Handles the contract deployment for a given input performing it if it needs to be performed. /// Handles the contract deployment for a given input performing it if it needs to be performed.
#[instrument(level = "info", skip_all)] #[instrument(level = "info", skip_all)]
async fn handle_input_contract_deployment( async fn handle_input_contract_deployment(
@@ -227,15 +260,16 @@ impl CaseState {
.then_some(input.value) .then_some(input.value)
.flatten(); .flatten();
let caller = {
let context = self.default_resolution_context();
let resolver = node.resolver().await?;
input
.caller
.resolve_address(resolver.as_ref(), context)
.await?
};
if let (_, _, Some(receipt)) = self if let (_, _, Some(receipt)) = self
.get_or_deploy_contract_instance( .get_or_deploy_contract_instance(&instance, metadata, caller, calldata, value, node)
&instance,
metadata,
input.caller,
calldata,
value,
node,
)
.await .await
.context("Failed to get or deploy contract instance during input execution")? .context("Failed to get or deploy contract instance during input execution")?
{ {
@@ -465,13 +499,9 @@ impl CaseState {
{ {
// Handling the emitter assertion. // Handling the emitter assertion.
if let Some(ref expected_address) = expected_event.address { if let Some(ref expected_address) = expected_event.address {
let expected = Address::from_slice( let expected = expected_address
Calldata::new_compound([expected_address]) .resolve_address(resolver, resolution_context)
.calldata(resolver, resolution_context) .await?;
.await?
.get(12..32)
.expect("Can't fail"),
);
let actual = actual_event.address(); let actual = actual_event.address();
if actual != expected { if actual != expected {
tracing::error!( tracing::error!(
@@ -568,17 +598,17 @@ impl CaseState {
balance_assertion: &BalanceAssertionStep, balance_assertion: &BalanceAssertionStep,
node: &dyn EthereumNode, node: &dyn EthereumNode,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let Some(instance) = balance_assertion let Some(address) = balance_assertion.address.as_resolvable_address() else {
.address
.strip_suffix(".address")
.map(ContractInstance::new)
else {
return Ok(()); return Ok(());
}; };
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
return Ok(());
};
self.get_or_deploy_contract_instance( self.get_or_deploy_contract_instance(
&instance, &instance,
metadata, metadata,
FunctionCallStep::default_caller(), FunctionCallStep::default_caller_address(),
None, None,
None, None,
node, node,
@@ -591,20 +621,16 @@ impl CaseState {
pub async fn handle_balance_assertion_execution( pub async fn handle_balance_assertion_execution(
&mut self, &mut self,
BalanceAssertionStep { BalanceAssertionStep {
address: address_string, address,
expected_balance: amount, expected_balance: amount,
.. ..
}: &BalanceAssertionStep, }: &BalanceAssertionStep,
node: &dyn EthereumNode, node: &dyn EthereumNode,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let resolver = node.resolver().await?; let resolver = node.resolver().await?;
let address = Address::from_slice( let address = address
Calldata::new_compound([address_string]) .resolve_address(resolver.as_ref(), self.default_resolution_context())
.calldata(resolver.as_ref(), self.default_resolution_context()) .await?;
.await?
.get(12..32)
.expect("Can't fail"),
);
let balance = node.balance_of(address).await?; let balance = node.balance_of(address).await?;
@@ -616,7 +642,7 @@ impl CaseState {
"Balance assertion failed - Expected {} but got {} for {} resolved to {}", "Balance assertion failed - Expected {} but got {} for {} resolved to {}",
expected, expected,
actual, actual,
address_string, address,
address, address,
) )
} }
@@ -631,17 +657,17 @@ impl CaseState {
storage_empty_assertion: &StorageEmptyAssertionStep, storage_empty_assertion: &StorageEmptyAssertionStep,
node: &dyn EthereumNode, node: &dyn EthereumNode,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let Some(instance) = storage_empty_assertion let Some(address) = storage_empty_assertion.address.as_resolvable_address() else {
.address
.strip_suffix(".address")
.map(ContractInstance::new)
else {
return Ok(()); return Ok(());
}; };
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
return Ok(());
};
self.get_or_deploy_contract_instance( self.get_or_deploy_contract_instance(
&instance, &instance,
metadata, metadata,
FunctionCallStep::default_caller(), FunctionCallStep::default_caller_address(),
None, None,
None, None,
node, node,
@@ -654,20 +680,16 @@ impl CaseState {
pub async fn handle_storage_empty_assertion_execution( pub async fn handle_storage_empty_assertion_execution(
&mut self, &mut self,
StorageEmptyAssertionStep { StorageEmptyAssertionStep {
address: address_string, address,
is_storage_empty, is_storage_empty,
.. ..
}: &StorageEmptyAssertionStep, }: &StorageEmptyAssertionStep,
node: &dyn EthereumNode, node: &dyn EthereumNode,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let resolver = node.resolver().await?; let resolver = node.resolver().await?;
let address = Address::from_slice( let address = address
Calldata::new_compound([address_string]) .resolve_address(resolver.as_ref(), self.default_resolution_context())
.calldata(resolver.as_ref(), self.default_resolution_context()) .await?;
.await?
.get(12..32)
.expect("Can't fail"),
);
let storage = node.latest_state_proof(address, Default::default()).await?; let storage = node.latest_state_proof(address, Default::default()).await?;
let is_empty = storage.storage_hash == EMPTY_ROOT_HASH; let is_empty = storage.storage_hash == EMPTY_ROOT_HASH;
@@ -681,7 +703,7 @@ impl CaseState {
"Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}", "Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}",
expected, expected,
actual, actual,
address_string, address,
address, address,
) )
}; };
@@ -824,32 +846,31 @@ impl<'a> CaseDriver<'a> {
.enumerate() .enumerate()
.map(|(idx, v)| (StepIdx::new(idx), v)) .map(|(idx, v)| (StepIdx::new(idx), v))
{ {
// Run this step concurrently across all platforms; short-circuit on first failure
let metadata = self.metadata; let metadata = self.metadata;
let step_futs = let step_futures =
self.platform_state self.platform_state
.iter_mut() .iter_mut()
.map(|(node, platform_id, case_state)| { .map(|(node, platform_id, case_state)| {
let platform_id = *platform_id; let platform_id = *platform_id;
let node_ref = *node; let node_ref = *node;
let step_clone = step.clone(); let step = step.clone();
let span = info_span!( let span = info_span!(
"Handling Step", "Handling Step",
%step_idx, %step_idx,
platform = %platform_id, platform = %platform_id,
); );
async move { async move {
let step_path = StepPath::from_iterator([step_idx]);
case_state case_state
.handle_step(metadata, &step_clone, node_ref) .handle_step(metadata, &step, &step_path, node_ref)
.await .await
.map_err(|e| (platform_id, e)) .map_err(|e| (platform_id, e))
} }
.instrument(span) .instrument(span)
}); });
match try_join_all(step_futs).await { match try_join_all(step_futures).await {
Ok(_outputs) => { Ok(_outputs) => {
// All platforms succeeded for this step
steps_executed += 1; steps_executed += 1;
} }
Err((platform_id, error)) => { Err((platform_id, error)) => {
@@ -875,4 +896,5 @@ pub enum StepOutput {
BalanceAssertion, BalanceAssertion,
StorageEmptyAssertion, StorageEmptyAssertion,
Repetition, Repetition,
AccountAllocation,
} }
+54 -1
View File
@@ -13,7 +13,9 @@ use anyhow::Context as _;
use revive_dt_common::types::*; use revive_dt_common::types::*;
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_node::{Node, geth::GethNode, substrate::SubstrateNode}; use revive_dt_node::{
Node, geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
};
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use tracing::info; use tracing::info;
@@ -104,6 +106,51 @@ impl Platform for GethEvmSolcPlatform {
} }
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct LighthouseGethEvmSolcPlatform;
impl Platform for LighthouseGethEvmSolcPlatform {
fn platform_identifier(&self) -> PlatformIdentifier {
PlatformIdentifier::LighthouseGethEvmSolc
}
fn node_identifier(&self) -> NodeIdentifier {
NodeIdentifier::LighthouseGeth
}
fn vm_identifier(&self) -> VmIdentifier {
VmIdentifier::Evm
}
fn compiler_identifier(&self) -> CompilerIdentifier {
CompilerIdentifier::Solc
}
fn new_node(
&self,
context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || {
let node = LighthouseGethNode::new(context);
let node = spawn_node::<LighthouseGethNode>(node, genesis)?;
Ok(Box::new(node) as Box<_>)
}))
}
fn new_compiler(
&self,
context: Context,
version: Option<VersionOrRequirement>,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn SolidityCompiler>>>>> {
Box::pin(async move {
let compiler = Solc::new(context, version).await;
compiler.map(|compiler| Box::new(compiler) as Box<dyn SolidityCompiler>)
})
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
pub struct KitchensinkPolkavmResolcPlatform; pub struct KitchensinkPolkavmResolcPlatform;
@@ -316,6 +363,9 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
fn from(value: PlatformIdentifier) -> Self { fn from(value: PlatformIdentifier) -> Self {
match value { match value {
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>, PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
PlatformIdentifier::LighthouseGethEvmSolc => {
Box::new(LighthouseGethEvmSolcPlatform) as Box<_>
}
PlatformIdentifier::KitchensinkPolkavmResolc => { PlatformIdentifier::KitchensinkPolkavmResolc => {
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_> Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>
} }
@@ -336,6 +386,9 @@ impl From<PlatformIdentifier> for &dyn Platform {
fn from(value: PlatformIdentifier) -> Self { fn from(value: PlatformIdentifier) -> Self {
match value { match value {
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform, PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
PlatformIdentifier::LighthouseGethEvmSolc => {
&LighthouseGethEvmSolcPlatform as &dyn Platform
}
PlatformIdentifier::KitchensinkPolkavmResolc => { PlatformIdentifier::KitchensinkPolkavmResolc => {
&KitchensinkPolkavmResolcPlatform as &dyn Platform &KitchensinkPolkavmResolcPlatform as &dyn Platform
} }
+18 -5
View File
@@ -26,10 +26,14 @@ use revive_dt_report::{
}; };
use schemars::schema_for; use schemars::schema_for;
use serde_json::{Value, json}; use serde_json::{Value, json};
use tokio::sync::Mutex;
use tracing::{debug, error, info, info_span, instrument}; use tracing::{debug, error, info, info_span, instrument};
use tracing_subscriber::{EnvFilter, FmtSubscriber}; use tracing_subscriber::{EnvFilter, FmtSubscriber};
use revive_dt_common::{iterators::EitherIter, types::Mode}; use revive_dt_common::{
iterators::EitherIter,
types::{Mode, PrivateKeyAllocator},
};
use revive_dt_compiler::SolidityCompiler; use revive_dt_compiler::SolidityCompiler;
use revive_dt_config::{Context, *}; use revive_dt_config::{Context, *};
use revive_dt_core::{ use revive_dt_core::{
@@ -39,9 +43,9 @@ use revive_dt_core::{
use revive_dt_format::{ use revive_dt_format::{
case::{Case, CaseIdx}, case::{Case, CaseIdx},
corpus::Corpus, corpus::Corpus,
input::{FunctionCallStep, Step},
metadata::{ContractPathAndIdent, Metadata, MetadataFile}, metadata::{ContractPathAndIdent, Metadata, MetadataFile},
mode::ParsedMode, mode::ParsedMode,
steps::{FunctionCallStep, Step},
}; };
use crate::cached_compiler::CachedCompiler; use crate::cached_compiler::CachedCompiler;
@@ -326,8 +330,13 @@ async fn start_driver_task<'a>(
.expect("Can't fail"); .expect("Can't fail");
} }
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
context.wallet_configuration.highest_private_key_exclusive(),
)));
let reporter = test.reporter.clone(); let reporter = test.reporter.clone();
let result = handle_case_driver(&test, cached_compiler).await; let result =
handle_case_driver(&test, cached_compiler, private_key_allocator).await;
match result { match result {
Ok(steps_executed) => reporter Ok(steps_executed) => reporter
@@ -438,6 +447,7 @@ async fn start_cli_reporting_task(reporter: Reporter) {
async fn handle_case_driver<'a>( async fn handle_case_driver<'a>(
test: &Test<'a>, test: &Test<'a>,
cached_compiler: Arc<CachedCompiler<'a>>, cached_compiler: Arc<CachedCompiler<'a>>,
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
) -> anyhow::Result<usize> { ) -> anyhow::Result<usize> {
let platform_state = stream::iter(test.platforms.iter()) let platform_state = stream::iter(test.platforms.iter())
// Compiling the pre-link contracts. // Compiling the pre-link contracts.
@@ -511,13 +521,14 @@ async fn handle_case_driver<'a>(
.steps .steps
.iter() .iter()
.filter_map(|step| match step { .filter_map(|step| match step {
Step::FunctionCall(input) => Some(input.caller), Step::FunctionCall(input) => input.caller.as_address().copied(),
Step::BalanceAssertion(..) => None, Step::BalanceAssertion(..) => None,
Step::StorageEmptyAssertion(..) => None, Step::StorageEmptyAssertion(..) => None,
Step::Repeat(..) => None, Step::Repeat(..) => None,
Step::AllocateAccount(..) => None,
}) })
.next() .next()
.unwrap_or(FunctionCallStep::default_caller()); .unwrap_or(FunctionCallStep::default_caller_address());
let tx = TransactionBuilder::<Ethereum>::with_deploy_code( let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
TransactionRequest::default().from(deployer_address), TransactionRequest::default().from(deployer_address),
code, code,
@@ -564,6 +575,7 @@ async fn handle_case_driver<'a>(
.filter_map( .filter_map(
|(test, platform, node, compiler, reporter, _, deployed_libraries)| { |(test, platform, node, compiler, reporter, _, deployed_libraries)| {
let cached_compiler = cached_compiler.clone(); let cached_compiler = cached_compiler.clone();
let private_key_allocator = private_key_allocator.clone();
async move { async move {
let compiler_output = cached_compiler let compiler_output = cached_compiler
@@ -591,6 +603,7 @@ async fn handle_case_driver<'a>(
compiler_output.contracts, compiler_output.contracts,
deployed_libraries.unwrap_or_default(), deployed_libraries.unwrap_or_default(),
reporter.clone(), reporter.clone(),
private_key_allocator,
); );
Some((*node, platform.platform_identifier(), case_state)) Some((*node, platform.platform_identifier(), case_state))
+19 -2
View File
@@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize};
use revive_dt_common::{macros::define_wrapper_type, types::Mode}; use revive_dt_common::{macros::define_wrapper_type, types::Mode};
use crate::{ use crate::{
input::{Expected, Step},
mode::ParsedMode, mode::ParsedMode,
steps::{Expected, RepeatStep, Step},
}; };
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)] #[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
@@ -55,7 +55,6 @@ pub struct Case {
} }
impl Case { impl Case {
#[allow(irrefutable_let_patterns)]
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> { pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
let steps_len = self.steps.len(); let steps_len = self.steps.len();
self.steps self.steps
@@ -84,6 +83,24 @@ impl Case {
}) })
} }
pub fn steps_iterator_for_benchmarks(
&self,
default_repeat_count: usize,
) -> Box<dyn Iterator<Item = Step> + '_> {
let contains_repeat = self
.steps_iterator()
.any(|step| matches!(&step, Step::Repeat(..)));
if contains_repeat {
Box::new(self.steps_iterator()) as Box<_>
} else {
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
comment: None,
repeat: default_repeat_count,
steps: self.steps_iterator().collect(),
})))) as Box<_>
}
}
pub fn solc_modes(&self) -> Vec<Mode> { pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes { match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
+1 -1
View File
@@ -2,7 +2,7 @@
pub mod case; pub mod case;
pub mod corpus; pub mod corpus;
pub mod input;
pub mod metadata; pub mod metadata;
pub mod mode; pub mod mode;
pub mod steps;
pub mod traits; pub mod traits;
@@ -1,4 +1,4 @@
use std::collections::HashMap; use std::{collections::HashMap, fmt::Display, str::FromStr};
use alloy::{ use alloy::{
eips::BlockNumberOrTag, eips::BlockNumberOrTag,
@@ -29,19 +29,88 @@ use crate::{metadata::ContractInstance, traits::ResolutionContext};
pub enum Step { pub enum Step {
/// A function call or an invocation to some function on some smart contract. /// A function call or an invocation to some function on some smart contract.
FunctionCall(Box<FunctionCallStep>), FunctionCall(Box<FunctionCallStep>),
/// A step for performing a balance assertion on some account or contract. /// A step for performing a balance assertion on some account or contract.
BalanceAssertion(Box<BalanceAssertionStep>), BalanceAssertion(Box<BalanceAssertionStep>),
/// A step for asserting that the storage of some contract or account is empty. /// A step for asserting that the storage of some contract or account is empty.
StorageEmptyAssertion(Box<StorageEmptyAssertionStep>), StorageEmptyAssertion(Box<StorageEmptyAssertionStep>),
/// A special step for repeating a bunch of steps a certain number of times. /// A special step for repeating a bunch of steps a certain number of times.
Repeat(Box<RepeatStep>), Repeat(Box<RepeatStep>),
/// A step type that allows for a new account address to be allocated and to later on be used
/// as the caller in another step.
AllocateAccount(Box<AllocateAccountStep>),
} }
define_wrapper_type!( define_wrapper_type!(
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StepIdx(usize) impl Display; pub struct StepIdx(usize) impl Display, FromStr;
); );
define_wrapper_type!(
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(try_from = "String", into = "String")]
pub struct StepPath(Vec<StepIdx>);
);
impl StepPath {
pub fn from_iterator(path: impl IntoIterator<Item = impl Into<StepIdx>>) -> Self {
Self(path.into_iter().map(|value| value.into()).collect())
}
pub fn increment(&self) -> Self {
let mut this = self.clone();
if let Some(last) = this.last_mut() {
last.0 += 1
}
this
}
pub fn append(&self, step_idx: impl Into<StepIdx>) -> Self {
let mut this = self.clone();
this.0.push(step_idx.into());
this
}
}
impl Display for StepPath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0
.iter()
.map(|idx| idx.to_string())
.collect::<Vec<_>>()
.join(".")
.fmt(f)
}
}
impl FromStr for StepPath {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.split(".")
.map(StepIdx::from_str)
.collect::<anyhow::Result<Vec<_>>>()
.map(Self)
}
}
impl From<StepPath> for String {
fn from(value: StepPath) -> Self {
value.to_string()
}
}
impl TryFrom<String> for StepPath {
type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
value.parse()
}
}
/// This is an input step which is a transaction description that the framework translates into a /// This is an input step which is a transaction description that the framework translates into a
/// transaction and executes on the nodes. /// transaction and executes on the nodes.
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
@@ -49,7 +118,7 @@ pub struct FunctionCallStep {
/// The address of the account performing the call and paying the fees for it. /// The address of the account performing the call and paying the fees for it.
#[serde(default = "FunctionCallStep::default_caller")] #[serde(default = "FunctionCallStep::default_caller")]
#[schemars(with = "String")] #[schemars(with = "String")]
pub caller: Address, pub caller: StepAddress,
/// An optional comment on the step which has no impact on the execution in any way. /// An optional comment on the step which has no impact on the execution in any way.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
@@ -86,7 +155,7 @@ pub struct FunctionCallStep {
/// This represents a balance assertion step where the framework needs to query the balance of some /// This represents a balance assertion step where the framework needs to query the balance of some
/// account or contract and assert that it's some amount. /// account or contract and assert that it's some amount.
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct BalanceAssertionStep { pub struct BalanceAssertionStep {
/// An optional comment on the balance assertion. /// An optional comment on the balance assertion.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
@@ -98,7 +167,7 @@ pub struct BalanceAssertionStep {
/// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a /// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a
/// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are /// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are
/// followed in the calldata. /// followed in the calldata.
pub address: String, pub address: StepAddress,
/// The amount of balance to assert that the account or contract has. This is a 256 bit string /// The amount of balance to assert that the account or contract has. This is a 256 bit string
/// that's serialized and deserialized into a decimal string. /// that's serialized and deserialized into a decimal string.
@@ -108,7 +177,7 @@ pub struct BalanceAssertionStep {
/// This represents an assertion for the storage of some contract or account and whether it's empty /// This represents an assertion for the storage of some contract or account and whether it's empty
/// or not. /// or not.
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct StorageEmptyAssertionStep { pub struct StorageEmptyAssertionStep {
/// An optional comment on the storage empty assertion. /// An optional comment on the storage empty assertion.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
@@ -120,7 +189,7 @@ pub struct StorageEmptyAssertionStep {
/// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a /// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a
/// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are /// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are
/// followed in the calldata. /// followed in the calldata.
pub address: String, pub address: StepAddress,
/// A boolean of whether the storage of the address is empty or not. /// A boolean of whether the storage of the address is empty or not.
pub is_storage_empty: bool, pub is_storage_empty: bool,
@@ -130,6 +199,10 @@ pub struct StorageEmptyAssertionStep {
/// steps to be repeated (on different drivers) a certain number of times. /// steps to be repeated (on different drivers) a certain number of times.
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct RepeatStep { pub struct RepeatStep {
/// An optional comment on the repetition step.
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// The number of repetitions that the steps should be repeated for. /// The number of repetitions that the steps should be repeated for.
pub repeat: usize, pub repeat: usize,
@@ -137,6 +210,19 @@ pub struct RepeatStep {
pub steps: Vec<Step>, pub steps: Vec<Step>,
} }
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct AllocateAccountStep {
/// An optional comment on the account allocation step.
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// An instruction to allocate a new account with the value being the variable name of that
/// account. This must start with `$VARIABLE:` and then be followed by the variable name of the
/// account.
#[serde(rename = "allocate_account")]
pub variable_name: String,
}
/// A set of expectations and assertions to make about the transaction after it ran. /// A set of expectations and assertions to make about the transaction after it ran.
/// ///
/// If this is not specified then the only assertion that will be ran is that the transaction /// If this is not specified then the only assertion that will be ran is that the transaction
@@ -177,7 +263,7 @@ pub struct ExpectedOutput {
pub struct Event { pub struct Event {
/// An optional field of the address of the emitter of the event. /// An optional field of the address of the emitter of the event.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub address: Option<String>, pub address: Option<StepAddress>,
/// The set of topics to expect the event to have. /// The set of topics to expect the event to have.
pub topics: Vec<String>, pub topics: Vec<String>,
@@ -310,13 +396,74 @@ pub struct VariableAssignments {
pub return_data: Vec<String>, pub return_data: Vec<String>,
} }
/// An address type that might either be an address literal or a resolvable address.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
#[schemars(with = "String")]
#[serde(untagged)]
pub enum StepAddress {
Address(Address),
ResolvableAddress(String),
}
impl Default for StepAddress {
fn default() -> Self {
Self::Address(Default::default())
}
}
impl Display for StepAddress {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
StepAddress::Address(address) => Display::fmt(address, f),
StepAddress::ResolvableAddress(address) => Display::fmt(address, f),
}
}
}
impl StepAddress {
pub fn as_address(&self) -> Option<&Address> {
match self {
StepAddress::Address(address) => Some(address),
StepAddress::ResolvableAddress(_) => None,
}
}
pub fn as_resolvable_address(&self) -> Option<&str> {
match self {
StepAddress::ResolvableAddress(address) => Some(address),
StepAddress::Address(..) => None,
}
}
pub async fn resolve_address(
&self,
resolver: &(impl ResolverApi + ?Sized),
context: ResolutionContext<'_>,
) -> anyhow::Result<Address> {
match self {
StepAddress::Address(address) => Ok(*address),
StepAddress::ResolvableAddress(address) => Ok(Address::from_slice(
Calldata::new_compound([address])
.calldata(resolver, context)
.await?
.get(12..32)
.expect("Can't fail"),
)),
}
}
}
impl FunctionCallStep { impl FunctionCallStep {
pub const fn default_caller() -> Address { pub const fn default_caller_address() -> Address {
Address(FixedBytes(alloy::hex!( Address(FixedBytes(alloy::hex!(
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1" "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1"
))) )))
} }
pub const fn default_caller() -> StepAddress {
StepAddress::Address(Self::default_caller_address())
}
fn default_instance() -> ContractInstance { fn default_instance() -> ContractInstance {
ContractInstance::new("Test") ContractInstance::new("Test")
} }
@@ -399,7 +546,8 @@ impl FunctionCallStep {
.encoded_input(resolver, context) .encoded_input(resolver, context)
.await .await
.context("Failed to encode input bytes for transaction request")?; .context("Failed to encode input bytes for transaction request")?;
let transaction_request = TransactionRequest::default().from(self.caller).value( let caller = self.caller.resolve_address(resolver, context).await?;
let transaction_request = TransactionRequest::default().from(caller).value(
self.value self.value
.map(|value| value.into_inner()) .map(|value| value.into_inner())
.unwrap_or_default(), .unwrap_or_default(),
+2
View File
@@ -22,6 +22,8 @@ revive-dt-node-interaction = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
serde_with = { workspace = true }
serde_yaml_ng = { workspace = true }
sp-core = { workspace = true } sp-core = { workspace = true }
sp-runtime = { workspace = true } sp-runtime = { workspace = true }
+89 -150
View File
@@ -1,17 +1,17 @@
//! The go-ethereum node implementation. //! The go-ethereum node implementation.
use std::{ use std::{
fs::{File, OpenOptions, create_dir_all, remove_dir_all}, fs::{File, create_dir_all, remove_dir_all},
io::{BufRead, BufReader, Read, Write}, io::Read,
ops::ControlFlow, ops::ControlFlow,
path::PathBuf, path::PathBuf,
pin::Pin, pin::Pin,
process::{Child, Command, Stdio}, process::{Command, Stdio},
sync::{ sync::{
Arc, Arc,
atomic::{AtomicU32, Ordering}, atomic::{AtomicU32, Ordering},
}, },
time::{Duration, Instant}, time::Duration,
}; };
use alloy::{ use alloy::{
@@ -41,7 +41,12 @@ use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi; use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE}; use crate::{
Node,
common::FallbackGasFiller,
constants::INITIAL_BALANCE,
process::{Process, ProcessReadinessWaitBehavior},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0); static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -61,16 +66,11 @@ pub struct GethNode {
logs_directory: PathBuf, logs_directory: PathBuf,
geth: PathBuf, geth: PathBuf,
id: u32, id: u32,
handle: Option<Child>, handle: Option<Process>,
start_timeout: Duration, start_timeout: Duration,
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller, chain_id_filler: ChainIdFiller,
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
/// node object is dropped. We do not store them in a structured fashion at the moment (in
/// separate fields) as the logic that we need to apply to them is all the same regardless of
/// what it belongs to, we just want to flush them on [`Drop`] of the node.
logs_file_to_flush: Vec<File>,
} }
impl GethNode { impl GethNode {
@@ -84,9 +84,6 @@ impl GethNode {
const READY_MARKER: &str = "IPC endpoint opened"; const READY_MARKER: &str = "IPC endpoint opened";
const ERROR_MARKER: &str = "Fatal:"; const ERROR_MARKER: &str = "Fatal:";
const GETH_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log";
const GETH_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress"; const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet"; const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
@@ -124,9 +121,6 @@ impl GethNode {
wallet: wallet.clone(), wallet: wallet.clone(),
chain_id_filler: Default::default(), chain_id_filler: Default::default(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
// We know that we only need to be storing 2 files so we can specify that when creating
// the vector. It's the stdout and stderr of the geth node.
logs_file_to_flush: Vec::with_capacity(2),
} }
} }
@@ -194,118 +188,63 @@ impl GethNode {
/// [Instance::init] must be called prior. /// [Instance::init] must be called prior.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> { fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
// This is the `OpenOptions` that we wish to use for all of the log files that we will be let process = Process::new(
// opening in this method. We need to construct it in this way to: None,
// 1. Be consistent self.logs_directory.as_path(),
// 2. Less verbose and more dry self.geth.as_path(),
// 3. Because the builder pattern uses mutable references so we need to get around that. |command, stdout_file, stderr_file| {
let open_options = { command
let mut options = OpenOptions::new(); .arg("--dev")
options.create(true).truncate(true).write(true); .arg("--datadir")
options .arg(&self.data_directory)
}; .arg("--ipcpath")
.arg(&self.connection_string)
.arg("--nodiscover")
.arg("--maxpeers")
.arg("0")
.arg("--txlookuplimit")
.arg("0")
.arg("--cache.blocklogs")
.arg("512")
.arg("--state.scheme")
.arg("hash")
.arg("--syncmode")
.arg("full")
.arg("--gcmode")
.arg("archive")
.stderr(stderr_file)
.stdout(stdout_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: self.start_timeout,
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => {
if line.contains(Self::ERROR_MARKER) {
anyhow::bail!("Failed to start geth {line}");
} else if line.contains(Self::READY_MARKER) {
Ok(true)
} else {
Ok(false)
}
}
None => Ok(false),
}),
},
);
let stdout_logs_file = open_options match process {
.clone() Ok(process) => self.handle = Some(process),
.open(self.geth_stdout_log_file_path()) Err(err) => {
.context("Failed to open geth stdout logs file")?; tracing::error!(?err, "Failed to start geth, shutting down gracefully");
let stderr_logs_file = open_options self.shutdown()
.open(self.geth_stderr_log_file_path()) .context("Failed to gracefully shutdown after geth start error")?;
.context("Failed to open geth stderr logs file")?; return Err(err);
self.handle = Command::new(&self.geth) }
.arg("--dev")
.arg("--datadir")
.arg(&self.data_directory)
.arg("--ipcpath")
.arg(&self.connection_string)
.arg("--nodiscover")
.arg("--maxpeers")
.arg("0")
.arg("--txlookuplimit")
.arg("0")
.arg("--cache.blocklogs")
.arg("512")
.arg("--state.scheme")
.arg("hash")
.arg("--syncmode")
.arg("full")
.arg("--gcmode")
.arg("archive")
.stderr(
stderr_logs_file
.try_clone()
.context("Failed to clone geth stderr log file handle")?,
)
.stdout(
stdout_logs_file
.try_clone()
.context("Failed to clone geth stdout log file handle")?,
)
.spawn()
.context("Failed to spawn geth node process")?
.into();
if let Err(error) = self.wait_ready() {
tracing::error!(?error, "Failed to start geth, shutting down gracefully");
self.shutdown()
.context("Failed to gracefully shutdown after geth start error")?;
return Err(error);
} }
self.logs_file_to_flush
.extend([stderr_logs_file, stdout_logs_file]);
Ok(self) Ok(self)
} }
/// Wait for the g-ethereum node child process getting ready.
///
/// [Instance::spawn_process] must be called priorly.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn wait_ready(&mut self) -> anyhow::Result<&mut Self> {
let start_time = Instant::now();
let logs_file = OpenOptions::new()
.read(true)
.write(false)
.append(false)
.truncate(false)
.open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file for readiness check")?;
let maximum_wait_time = self.start_timeout;
let mut stderr = BufReader::new(logs_file).lines();
let mut lines = vec![];
loop {
if let Some(Ok(line)) = stderr.next() {
if line.contains(Self::ERROR_MARKER) {
anyhow::bail!("Failed to start geth {line}");
}
if line.contains(Self::READY_MARKER) {
return Ok(self);
}
lines.push(line);
}
if Instant::now().duration_since(start_time) > maximum_wait_time {
anyhow::bail!(
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
self.start_timeout.as_millis(),
lines.join("\n")
);
}
}
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn geth_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn geth_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME)
}
async fn provider( async fn provider(
&self, &self,
) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>> ) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
@@ -650,17 +589,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi for GethNodeResol
impl Node for GethNode { impl Node for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn shutdown(&mut self) -> anyhow::Result<()> { fn shutdown(&mut self) -> anyhow::Result<()> {
// Terminate the processes in a graceful manner to allow for the output to be flushed. drop(self.handle.take());
if let Some(mut child) = self.handle.take() {
child
.kill()
.map_err(|error| anyhow::anyhow!("Failed to kill the geth process: {error:?}"))?;
}
// Flushing the files that we're using for keeping the logs before shutdown.
for file in self.logs_file_to_flush.iter_mut() {
file.flush()?
}
// Remove the node's database so that subsequent runs do not run on the same database. We // Remove the node's database so that subsequent runs do not run on the same database. We
// ignore the error just in case the directory didn't exist in the first place and therefore // ignore the error just in case the directory didn't exist in the first place and therefore
@@ -701,6 +630,8 @@ impl Drop for GethNode {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::sync::LazyLock;
use super::*; use super::*;
fn test_config() -> TestExecutionContext { fn test_config() -> TestExecutionContext {
@@ -717,9 +648,21 @@ mod tests {
(context, node) (context, node)
} }
fn shared_node() -> &'static GethNode {
static NODE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node);
&NODE.1
}
#[test] #[test]
fn version_works() { fn version_works() {
let version = GethNode::new(&test_config()).version().unwrap(); // Arrange
let node = shared_node();
// Act
let version = node.version();
// Assert
let version = version.expect("Failed to get the version");
assert!( assert!(
version.starts_with("geth version"), version.starts_with("geth version"),
"expected version string, got: '{version}'" "expected version string, got: '{version}'"
@@ -729,7 +672,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_chain_id_from_node() { async fn can_get_chain_id_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let node = shared_node();
// Act // Act
let chain_id = node.resolver().await.unwrap().chain_id().await; let chain_id = node.resolver().await.unwrap().chain_id().await;
@@ -742,7 +685,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_gas_limit_from_node() { async fn can_get_gas_limit_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let node = shared_node();
// Act // Act
let gas_limit = node let gas_limit = node
@@ -753,14 +696,13 @@ mod tests {
.await; .await;
// Assert // Assert
let gas_limit = gas_limit.expect("Failed to get the gas limit"); let _ = gas_limit.expect("Failed to get the gas limit");
assert_eq!(gas_limit, u32::MAX as u128)
} }
#[tokio::test] #[tokio::test]
async fn can_get_coinbase_from_node() { async fn can_get_coinbase_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let node = shared_node();
// Act // Act
let coinbase = node let coinbase = node
@@ -771,14 +713,13 @@ mod tests {
.await; .await;
// Assert // Assert
let coinbase = coinbase.expect("Failed to get the coinbase"); let _ = coinbase.expect("Failed to get the coinbase");
assert_eq!(coinbase, Address::new([0xFF; 20]))
} }
#[tokio::test] #[tokio::test]
async fn can_get_block_difficulty_from_node() { async fn can_get_block_difficulty_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let node = shared_node();
// Act // Act
let block_difficulty = node let block_difficulty = node
@@ -789,14 +730,13 @@ mod tests {
.await; .await;
// Assert // Assert
let block_difficulty = block_difficulty.expect("Failed to get the block difficulty"); let _ = block_difficulty.expect("Failed to get the block difficulty");
assert_eq!(block_difficulty, U256::ZERO)
} }
#[tokio::test] #[tokio::test]
async fn can_get_block_hash_from_node() { async fn can_get_block_hash_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let node = shared_node();
// Act // Act
let block_hash = node let block_hash = node
@@ -813,7 +753,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_timestamp_from_node() { async fn can_get_block_timestamp_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let node = shared_node();
// Act // Act
let block_timestamp = node let block_timestamp = node
@@ -830,13 +770,12 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn can_get_block_number_from_node() { async fn can_get_block_number_from_node() {
// Arrange // Arrange
let (_context, node) = new_node(); let node = shared_node();
// Act // Act
let block_number = node.resolver().await.unwrap().last_block_number().await; let block_number = node.resolver().await.unwrap().last_block_number().await;
// Assert // Assert
let block_number = block_number.expect("Failed to get the block number"); let _ = block_number.expect("Failed to get the block number");
assert_eq!(block_number, 0)
} }
} }
+2
View File
@@ -6,6 +6,8 @@ use revive_dt_node_interaction::EthereumNode;
pub mod common; pub mod common;
pub mod constants; pub mod constants;
pub mod geth; pub mod geth;
pub mod lighthouse_geth;
pub mod process;
pub mod substrate; pub mod substrate;
/// An abstract interface for testing nodes. /// An abstract interface for testing nodes.
File diff suppressed because it is too large Load Diff
+188
View File
@@ -0,0 +1,188 @@
use std::{
fs::{File, OpenOptions},
io::{BufRead, BufReader, Write},
path::Path,
process::{Child, Command},
time::{Duration, Instant},
};
use anyhow::{Context, Result, bail};
/// A wrapper around processes which allows for their stdout and stderr to be logged and flushed
/// when the process is dropped.
#[derive(Debug)]
pub struct Process {
/// The handle of the child process.
child: Child,
/// The file that stdout is being logged to.
stdout_logs_file: File,
/// The file that stderr is being logged to.
stderr_logs_file: File,
}
impl Process {
pub fn new(
log_file_prefix: impl Into<Option<&'static str>>,
logs_directory: impl AsRef<Path>,
binary_path: impl AsRef<Path>,
command_building_callback: impl FnOnce(&mut Command, File, File),
process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
) -> Result<Self> {
let log_file_prefix = log_file_prefix.into();
let (stdout_file_name, stderr_file_name) = match log_file_prefix {
Some(prefix) => (
format!("{prefix}_stdout.log"),
format!("{prefix}_stderr.log"),
),
None => ("stdout.log".to_string(), "stderr.log".to_string()),
};
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
let stdout_logs_file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(stdout_logs_file_path.as_path())
.context("Failed to open the stdout logs file")?;
let stderr_logs_file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(stderr_logs_file_path.as_path())
.context("Failed to open the stderr logs file")?;
let mut command = {
let stdout_logs_file = stdout_logs_file
.try_clone()
.context("Failed to clone the stdout logs file")?;
let stderr_logs_file = stderr_logs_file
.try_clone()
.context("Failed to clone the stderr logs file")?;
let mut command = Command::new(binary_path.as_ref());
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
command
};
let mut child = command
.spawn()
.context("Failed to spawn the built command")?;
match process_readiness_wait_behavior {
ProcessReadinessWaitBehavior::NoStartupWait => {}
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration,
mut check_function,
} => {
let spawn_time = Instant::now();
let stdout_logs_file = OpenOptions::new()
.read(true)
.open(stdout_logs_file_path)
.context("Failed to open the stdout logs file")?;
let stderr_logs_file = OpenOptions::new()
.read(true)
.open(stderr_logs_file_path)
.context("Failed to open the stderr logs file")?;
let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
let mut stdout = String::new();
let mut stderr = String::new();
loop {
let stdout_line = stdout_lines.next().and_then(Result::ok);
let stderr_line = stderr_lines.next().and_then(Result::ok);
if let Some(stdout_line) = stdout_line.as_ref() {
stdout.push_str(stdout_line);
stdout.push('\n');
}
if let Some(stderr_line) = stderr_line.as_ref() {
stderr.push_str(stderr_line);
stderr.push('\n');
}
let check_result =
check_function(stdout_line.as_deref(), stderr_line.as_deref())
.context("Failed to wait for the process to be ready")?;
if check_result {
break;
}
if Instant::now().duration_since(spawn_time) > max_wait_duration {
bail!(
"Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}"
)
}
}
}
ProcessReadinessWaitBehavior::WaitForCommandToExit => {
if !child
.wait()
.context("Failed waiting for kurtosis run process to finish")?
.success()
{
anyhow::bail!("Failed to initialize kurtosis network",);
}
}
}
Ok(Self {
child,
stdout_logs_file,
stderr_logs_file,
})
}
}
impl Drop for Process {
fn drop(&mut self) {
self.child.kill().expect("Failed to kill the process");
self.stdout_logs_file
.flush()
.expect("Failed to flush the stdout logs file");
self.stderr_logs_file
.flush()
.expect("Failed to flush the stderr logs file");
}
}
pub enum ProcessReadinessWaitBehavior {
/// The process does not require any kind of wait after it's been spawned and can be used
/// straight away.
NoStartupWait,
/// Waits for the command to exit.
WaitForCommandToExit,
/// The process does require some amount of wait duration after it's been started.
WaitDuration(Duration),
/// The process requires a time bounded wait function which is a function of the lines that
/// appear in the log files.
TimeBoundedWaitFunction {
/// The maximum amount of time to wait for the check function to return true.
max_wait_duration: Duration,
/// The function to use to check if the process spawned is ready to use or not. This
/// function should return the following in the following cases:
///
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
/// and the wait is completed.
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears
/// that it has encountered an error when it was being spawned.
///
/// The first argument is a line from stdout and the second argument is a line from stderr.
#[allow(clippy::type_complexity)]
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
},
}
+101 -191
View File
@@ -1,9 +1,8 @@
use std::{ use std::{
fs::{File, OpenOptions, create_dir_all, remove_dir_all}, fs::{create_dir_all, remove_dir_all},
io::{BufRead, Write}, path::PathBuf,
path::{Path, PathBuf},
pin::Pin, pin::Pin,
process::{Child, Command, Stdio}, process::{Command, Stdio},
sync::{ sync::{
Arc, Arc,
atomic::{AtomicU32, Ordering}, atomic::{AtomicU32, Ordering},
@@ -47,7 +46,12 @@ use revive_dt_config::*;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use tracing::instrument; use tracing::instrument;
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE}; use crate::{
Node,
common::FallbackGasFiller,
constants::INITIAL_BALANCE,
process::{Process, ProcessReadinessWaitBehavior},
};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0); static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -63,12 +67,11 @@ pub struct SubstrateNode {
rpc_url: String, rpc_url: String,
base_directory: PathBuf, base_directory: PathBuf,
logs_directory: PathBuf, logs_directory: PathBuf,
process_substrate: Option<Child>, substrate_process: Option<Process>,
process_proxy: Option<Child>, eth_proxy_process: Option<Process>,
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller, chain_id_filler: ChainIdFiller,
logs_file_to_flush: Vec<File>,
} }
impl SubstrateNode { impl SubstrateNode {
@@ -85,12 +88,6 @@ impl SubstrateNode {
const SUBSTRATE_LOG_ENV: &str = "error,evm=debug,sc_rpc_server=info,runtime::revive=debug"; const SUBSTRATE_LOG_ENV: &str = "error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
const PROXY_LOG_ENV: &str = "info,eth-rpc=debug"; const PROXY_LOG_ENV: &str = "info,eth-rpc=debug";
const SUBSTRATE_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log";
const SUBSTRATE_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
pub const KITCHENSINK_EXPORT_CHAINSPEC_COMMAND: &str = "export-chain-spec"; pub const KITCHENSINK_EXPORT_CHAINSPEC_COMMAND: &str = "export-chain-spec";
pub const REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND: &str = "build-spec"; pub const REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND: &str = "build-spec";
@@ -121,16 +118,16 @@ impl SubstrateNode {
rpc_url: String::new(), rpc_url: String::new(),
base_directory, base_directory,
logs_directory, logs_directory,
process_substrate: None, substrate_process: None,
process_proxy: None, eth_proxy_process: None,
wallet: wallet.clone(), wallet: wallet.clone(),
chain_id_filler: Default::default(), chain_id_filler: Default::default(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
logs_file_to_flush: Vec::with_capacity(4),
} }
} }
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = remove_dir_all(self.base_directory.as_path());
let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory); let _ = clear_directory(&self.logs_directory);
@@ -213,120 +210,88 @@ impl SubstrateNode {
let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16; let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16;
let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16; let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16;
self.rpc_url = format!("http://127.0.0.1:{proxy_rpc_port}");
let chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); let chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
// This is the `OpenOptions` that we wish to use for all of the log files that we will be self.rpc_url = format!("http://127.0.0.1:{proxy_rpc_port}");
// opening in this method. We need to construct it in this way to:
// 1. Be consistent
// 2. Less verbose and more dry
// 3. Because the builder pattern uses mutable references so we need to get around that.
let open_options = {
let mut options = OpenOptions::new();
options.create(true).truncate(true).write(true);
options
};
// Start Substrate node let substrate_process = Process::new(
let substrate_stdout_logs_file = open_options "node",
.clone() self.logs_directory.as_path(),
.open(self.substrate_stdout_log_file_path()) self.node_binary.as_path(),
.context("Failed to open substrate stdout logs file")?; |command, stdout_file, stderr_file| {
let substrate_stderr_logs_file = open_options command
.clone() .arg("--dev")
.open(self.substrate_stderr_log_file_path()) .arg("--chain")
.context("Failed to open substrate stderr logs file")?; .arg(chainspec_path)
let node_binary_path = self.node_binary.as_path(); .arg("--base-path")
self.process_substrate = Command::new(node_binary_path) .arg(&self.base_directory)
.arg("--dev") .arg("--rpc-port")
.arg("--chain") .arg(substrate_rpc_port.to_string())
.arg(chainspec_path) .arg("--name")
.arg("--base-path") .arg(format!("revive-substrate-{}", self.id))
.arg(&self.base_directory) .arg("--force-authoring")
.arg("--rpc-port") .arg("--rpc-methods")
.arg(substrate_rpc_port.to_string()) .arg("Unsafe")
.arg("--name") .arg("--rpc-cors")
.arg(format!("revive-substrate-{}", self.id)) .arg("all")
.arg("--force-authoring") .arg("--rpc-max-connections")
.arg("--rpc-methods") .arg(u32::MAX.to_string())
.arg("Unsafe") .env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.arg("--rpc-cors") .stdout(stdout_file)
.arg("all") .stderr(stderr_file);
.arg("--rpc-max-connections") },
.arg(u32::MAX.to_string()) ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV) max_wait_duration: Duration::from_secs(30),
.stdout( check_function: Box::new(|_, stderr_line| match stderr_line {
substrate_stdout_logs_file Some(line) => Ok(line.contains(Self::SUBSTRATE_READY_MARKER)),
.try_clone() None => Ok(false),
.context("Failed to clone substrate stdout log file handle")?, }),
) },
.stderr( );
substrate_stderr_logs_file match substrate_process {
.try_clone() Ok(process) => self.substrate_process = Some(process),
.context("Failed to clone substrate stderr log file handle")?, Err(err) => {
) tracing::error!(?err, "Failed to start substrate, shutting down gracefully");
.spawn() self.shutdown()
.context("Failed to spawn Substrate node process")? .context("Failed to gracefully shutdown after substrate start error")?;
.into(); return Err(err);
}
}
// Give the node a moment to boot let eth_proxy_process = Process::new(
if let Err(error) = Self::wait_ready( "proxy",
self.substrate_stderr_log_file_path().as_path(), self.logs_directory.as_path(),
Self::SUBSTRATE_READY_MARKER, self.eth_proxy_binary.as_path(),
Duration::from_secs(60), |command, stdout_file, stderr_file| {
) { command
self.shutdown() .arg("--dev")
.context("Failed to gracefully shutdown after Substrate start error")?; .arg("--rpc-port")
return Err(error); .arg(proxy_rpc_port.to_string())
}; .arg("--node-rpc-url")
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}"))
let eth_proxy_stdout_logs_file = open_options .arg("--rpc-max-connections")
.clone() .arg(u32::MAX.to_string())
.open(self.proxy_stdout_log_file_path()) .env("RUST_LOG", Self::PROXY_LOG_ENV)
.context("Failed to open eth-proxy stdout logs file")?; .stdout(stdout_file)
let eth_proxy_stderr_logs_file = open_options .stderr(stderr_file);
.open(self.proxy_stderr_log_file_path()) },
.context("Failed to open eth-proxy stderr logs file")?; ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
self.process_proxy = Command::new(&self.eth_proxy_binary) max_wait_duration: Duration::from_secs(30),
.arg("--dev") check_function: Box::new(|_, stderr_line| match stderr_line {
.arg("--rpc-port") Some(line) => Ok(line.contains(Self::ETH_PROXY_READY_MARKER)),
.arg(proxy_rpc_port.to_string()) None => Ok(false),
.arg("--node-rpc-url") }),
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}")) },
.arg("--rpc-max-connections") );
.arg(u32::MAX.to_string()) match eth_proxy_process {
.env("RUST_LOG", Self::PROXY_LOG_ENV) Ok(process) => self.eth_proxy_process = Some(process),
.stdout( Err(err) => {
eth_proxy_stdout_logs_file tracing::error!(?err, "Failed to start eth proxy, shutting down gracefully");
.try_clone() self.shutdown()
.context("Failed to clone eth-proxy stdout log file handle")?, .context("Failed to gracefully shutdown after eth proxy start error")?;
) return Err(err);
.stderr( }
eth_proxy_stderr_logs_file }
.try_clone()
.context("Failed to clone eth-proxy stderr log file handle")?,
)
.spawn()
.context("Failed to spawn eth-proxy process")?
.into();
if let Err(error) = Self::wait_ready(
self.proxy_stderr_log_file_path().as_path(),
Self::ETH_PROXY_READY_MARKER,
Duration::from_secs(60),
) {
self.shutdown()
.context("Failed to gracefully shutdown after eth-proxy start error")?;
return Err(error);
};
self.logs_file_to_flush.extend([
substrate_stdout_logs_file,
substrate_stderr_logs_file,
eth_proxy_stdout_logs_file,
eth_proxy_stderr_logs_file,
]);
Ok(()) Ok(())
} }
@@ -356,29 +321,6 @@ impl SubstrateNode {
account_id.to_ss58check() account_id.to_ss58check()
} }
fn wait_ready(logs_file_path: &Path, marker: &str, timeout: Duration) -> anyhow::Result<()> {
let start_time = std::time::Instant::now();
let logs_file = OpenOptions::new()
.read(true)
.write(false)
.append(false)
.truncate(false)
.open(logs_file_path)?;
let mut lines = std::io::BufReader::new(logs_file).lines();
loop {
if let Some(Ok(line)) = lines.next() {
if line.contains(marker) {
return Ok(());
}
}
if start_time.elapsed() > timeout {
anyhow::bail!("Timeout waiting for process readiness: {marker}");
}
}
}
pub fn eth_rpc_version(&self) -> anyhow::Result<String> { pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.eth_proxy_binary) let output = Command::new(&self.eth_proxy_binary)
.arg("--version") .arg("--version")
@@ -391,24 +333,6 @@ impl SubstrateNode {
Ok(String::from_utf8_lossy(&output).trim().to_string()) Ok(String::from_utf8_lossy(&output).trim().to_string())
} }
fn substrate_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory
.join(Self::SUBSTRATE_STDOUT_LOG_FILE_NAME)
}
fn substrate_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory
.join(Self::SUBSTRATE_STDERR_LOG_FILE_NAME)
}
fn proxy_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::PROXY_STDOUT_LOG_FILE_NAME)
}
fn proxy_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::PROXY_STDERR_LOG_FILE_NAME)
}
async fn provider( async fn provider(
&self, &self,
) -> anyhow::Result< ) -> anyhow::Result<
@@ -673,22 +597,8 @@ impl<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> ResolverApi
impl Node for SubstrateNode { impl Node for SubstrateNode {
fn shutdown(&mut self) -> anyhow::Result<()> { fn shutdown(&mut self) -> anyhow::Result<()> {
// Terminate the processes in a graceful manner to allow for the output to be flushed. drop(self.substrate_process.take());
if let Some(mut child) = self.process_proxy.take() { drop(self.eth_proxy_process.take());
child
.kill()
.map_err(|error| anyhow::anyhow!("Failed to kill the proxy process: {error:?}"))?;
}
if let Some(mut child) = self.process_substrate.take() {
child.kill().map_err(|error| {
anyhow::anyhow!("Failed to kill the Substrate process: {error:?}")
})?;
}
// Flushing the files that we're using for keeping the logs before shutdown.
for file in self.logs_file_to_flush.iter_mut() {
file.flush()?
}
// Remove the node's database so that subsequent runs do not run on the same database. We // Remove the node's database so that subsequent runs do not run on the same database. We
// ignore the error just in case the directory didn't exist in the first place and therefore // ignore the error just in case the directory didn't exist in the first place and therefore
@@ -1195,19 +1105,19 @@ mod tests {
(context, node) (context, node)
} }
/// A shared node that multiple tests can use. It starts up once. fn shared_state() -> &'static (TestExecutionContext, SubstrateNode) {
static STATE: LazyLock<(TestExecutionContext, SubstrateNode)> = LazyLock::new(new_node);
&STATE
}
fn shared_node() -> &'static SubstrateNode { fn shared_node() -> &'static SubstrateNode {
static NODE: LazyLock<(TestExecutionContext, SubstrateNode)> = LazyLock::new(|| { &shared_state().1
let (context, node) = new_node();
(context, node)
});
&NODE.1
} }
#[tokio::test] #[tokio::test]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() { async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange // Arrange
let (context, node) = new_node(); let (context, node) = shared_state();
let provider = node.provider().await.expect("Failed to create provider"); let provider = node.provider().await.expect("Failed to create provider");
+2 -2
View File
@@ -4,7 +4,7 @@ use std::{path::PathBuf, sync::Arc};
use revive_dt_common::{define_wrapper_type, types::PlatformIdentifier}; use revive_dt_common::{define_wrapper_type, types::PlatformIdentifier};
use revive_dt_compiler::Mode; use revive_dt_compiler::Mode;
use revive_dt_format::{case::CaseIdx, input::StepIdx}; use revive_dt_format::{case::CaseIdx, steps::StepPath};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
define_wrapper_type!( define_wrapper_type!(
@@ -33,5 +33,5 @@ pub struct ExecutionSpecifier {
#[derive(Clone, Debug, PartialEq, Eq, Hash)] #[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct StepExecutionSpecifier { pub struct StepExecutionSpecifier {
pub execution_specifier: Arc<ExecutionSpecifier>, pub execution_specifier: Arc<ExecutionSpecifier>,
pub step_idx: StepIdx, pub step_idx: StepPath,
} }
+43 -5
View File
@@ -201,6 +201,10 @@
{ {
"description": "A special step for repeating a bunch of steps a certain number of times.", "description": "A special step for repeating a bunch of steps a certain number of times.",
"$ref": "#/$defs/RepeatStep" "$ref": "#/$defs/RepeatStep"
},
{
"description": "A step type that allows for a new account address to be allocated and to later on be used\nas the caller in another step.",
"$ref": "#/$defs/AllocateAccountStep"
} }
] ]
}, },
@@ -377,9 +381,13 @@
"properties": { "properties": {
"address": { "address": {
"description": "An optional field of the address of the emitter of the event.", "description": "An optional field of the address of the emitter of the event.",
"type": [ "anyOf": [
"string", {
"null" "$ref": "#/$defs/StepAddress"
},
{
"type": "null"
}
] ]
}, },
"topics": { "topics": {
@@ -399,6 +407,10 @@
"values" "values"
] ]
}, },
"StepAddress": {
"description": "An address type that might either be an address literal or a resolvable address.",
"type": "string"
},
"EtherValue": { "EtherValue": {
"description": "Defines an Ether value.\n\nThis is an unsigned 256 bit integer that's followed by some denomination which can either be\neth, ether, gwei, or wei.", "description": "Defines an Ether value.\n\nThis is an unsigned 256 bit integer that's followed by some denomination which can either be\neth, ether, gwei, or wei.",
"type": "string" "type": "string"
@@ -431,7 +443,7 @@
}, },
"address": { "address": {
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.", "description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
"type": "string" "$ref": "#/$defs/StepAddress"
}, },
"expected_balance": { "expected_balance": {
"description": "The amount of balance to assert that the account or contract has. This is a 256 bit string\nthat's serialized and deserialized into a decimal string.", "description": "The amount of balance to assert that the account or contract has. This is a 256 bit string\nthat's serialized and deserialized into a decimal string.",
@@ -456,7 +468,7 @@
}, },
"address": { "address": {
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.", "description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
"type": "string" "$ref": "#/$defs/StepAddress"
}, },
"is_storage_empty": { "is_storage_empty": {
"description": "A boolean of whether the storage of the address is empty or not.", "description": "A boolean of whether the storage of the address is empty or not.",
@@ -472,6 +484,13 @@
"description": "This represents a repetition step which is a special step type that allows for a sequence of\nsteps to be repeated (on different drivers) a certain number of times.", "description": "This represents a repetition step which is a special step type that allows for a sequence of\nsteps to be repeated (on different drivers) a certain number of times.",
"type": "object", "type": "object",
"properties": { "properties": {
"comment": {
"description": "An optional comment on the repetition step.",
"type": [
"string",
"null"
]
},
"repeat": { "repeat": {
"description": "The number of repetitions that the steps should be repeated for.", "description": "The number of repetitions that the steps should be repeated for.",
"type": "integer", "type": "integer",
@@ -491,6 +510,25 @@
"steps" "steps"
] ]
}, },
"AllocateAccountStep": {
"type": "object",
"properties": {
"comment": {
"description": "An optional comment on the account allocation step.",
"type": [
"string",
"null"
]
},
"allocate_account": {
"description": "An instruction to allocate a new account with the value being the variable name of that\naccount. This must start with `$VARIABLE:` and then be followed by the variable name of the\naccount.",
"type": "string"
}
},
"required": [
"allocate_account"
]
},
"ContractPathAndIdent": { "ContractPathAndIdent": {
"description": "Represents an identifier used for contracts.\n\nThe type supports serialization from and into the following string format:\n\n```text\n${path}:${contract_ident}\n```", "description": "Represents an identifier used for contracts.\n\nThe type supports serialization from and into the following string format:\n\n```text\n${path}:${contract_ident}\n```",
"type": "string" "type": "string"