diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4b52009..8f19505 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -91,10 +91,10 @@ jobs: with: rustflags: "" - - name: Add wasm32 target + - name: Add wasm32 target and formatting run: | rustup target add wasm32-unknown-unknown - rustup component add rust-src + rustup component add rust-src rustfmt clippy - name: Install Geth on Ubuntu if: matrix.os == 'ubuntu-24.04' @@ -141,6 +141,17 @@ jobs: chmod +x resolc sudo mv resolc /usr/local/bin + - name: Install Kurtosis on macOS + if: matrix.os == 'macos-14' + run: brew install kurtosis-tech/tap/kurtosis-cli + + - name: Install Kurtosis on Ubuntu + if: matrix.os == 'ubuntu-24.04' + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + - name: Machete uses: bnjbvr/cargo-machete@v0.7.1 @@ -159,5 +170,22 @@ jobs: - name: Check resolc version run: resolc --version - - name: Test cargo workspace - run: make test + - name: Test Formatting + run: make format + + - name: Test Clippy + run: make clippy + + - name: Test Machete + run: make machete + + - name: Unit Tests + if: matrix.os == 'ubuntu-24.04' + run: cargo test --workspace -- --nocapture + + # We can't install docker in the MacOS image used in CI and therefore we need to skip the + # Kurtosis and lighthouse related tests when running the CI on MacOS. + - name: Unit Tests + if: matrix.os == 'macos-14' + run: | + cargo test --workspace -- --nocapture --skip lighthouse_geth::tests:: diff --git a/Cargo.lock b/Cargo.lock index 299786f..bae4906 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -87,6 +87,7 @@ dependencies = [ "alloy-transport", "alloy-transport-http", "alloy-transport-ipc", + "alloy-transport-ws", ] [[package]] @@ -378,6 +379,7 @@ dependencies = [ "alloy-transport", "alloy-transport-http", "alloy-transport-ipc", + "alloy-transport-ws", "async-stream", "async-trait", "auto_impl", @@ -454,6 +456,7 @@ dependencies = [ "alloy-transport", "alloy-transport-http", "alloy-transport-ipc", + "alloy-transport-ws", "futures", "pin-project", "reqwest", @@ -709,6 +712,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "alloy-transport-ws" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e915e1250dc129ad48d264573ccd08e4716fdda564a772fd217875b8459aff9" +dependencies = [ + "alloy-pubsub", + "alloy-transport", + "futures", + "http", + "rustls", + "serde_json", + "tokio", + "tokio-tungstenite", + "tracing", + "ws_stream_wasm", +] + [[package]] name = "alloy-trie" version = "0.9.0" @@ -1373,6 +1394,17 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version 0.4.1", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -2021,6 +2053,12 @@ dependencies = [ "parking_lot_core", ] +[[package]] +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + [[package]] name = "der" version = "0.7.10" @@ -3956,6 +3994,16 @@ dependencies = [ "ucd-trie", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version 0.4.1", +] + [[package]] name = "pin-project" version = "1.1.10" @@ -4577,6 +4625,8 @@ dependencies = [ "revive-dt-node-interaction", "serde", "serde_json", + "serde_with", + "serde_yaml_ng", "sp-core", "sp-runtime", "temp-dir", @@ -4768,6 +4818,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" dependencies = [ "once_cell", + "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -5055,6 +5106,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "serde" version = "1.0.219" @@ -5171,6 +5228,19 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "serde_yaml_ng" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4db627b98b36d4203a7b458cf3573730f2bb591b28871d916dfa9efabfd41f" +dependencies = [ + "indexmap 2.10.0", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + [[package]] name = "serdect" version = "0.2.0" @@ -6067,6 +6137,22 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "tokio-tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" +dependencies = [ + "futures-util", + "log", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tungstenite", + "webpki-roots 0.26.11", +] + [[package]] name = "tokio-util" version = "0.7.15" @@ -6281,6 +6367,25 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13" +dependencies = [ + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "rand 0.9.2", + "rustls", + "rustls-pki-types", + "sha1", + "thiserror 2.0.12", + "utf-8", +] + [[package]] name = "tuplex" version = "0.1.2" @@ -6368,6 +6473,12 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + [[package]] name = "untrusted" version = "0.9.0" @@ -6385,6 +6496,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8_iter" version = "1.0.4" @@ -6639,6 +6756,24 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.2", +] + +[[package]] +name = "webpki-roots" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "widestring" version = "1.2.0" @@ -6977,6 +7112,25 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +[[package]] +name = "ws_stream_wasm" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c173014acad22e83f16403ee360115b38846fe754e735c5d9d3803fe70c6abc" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "log", + "pharos", + "rustc_version 0.4.1", + "send_wrapper", + "thiserror 2.0.12", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 048e8dc..32c59f2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,7 +45,8 @@ serde_json = { version = "1.0", default-features = false, features = [ "std", "unbounded_depth", ] } -serde_with = { version = "3.14.0" } +serde_with = { version = "3.14.0", features = ["hex"] } +serde_yaml_ng = { version = "0.10.0" } sha2 = { version = "0.10.9" } sp-core = "36.1.0" sp-runtime = "41.1.0" @@ -80,6 +81,7 @@ features = [ "json-abi", "providers", "provider-ipc", + "provider-ws", "provider-debug-api", "reqwest", "rpc-types", diff --git a/README.md b/README.md index 6f6c2d1..75a2e51 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,7 @@ This section describes the required dependencies that this framework requires to - ETH-RPC - All communication with Kitchensink is done through the ETH RPC. - Solc - This is actually a transitive dependency, while this tool doesn't require solc as it downloads the versions that it requires, resolc requires that Solc is installed and available in the path. - Resolc - This is required to compile the contracts to PolkaVM bytecode. +- Kurtosis - The Kurtosis CLI tool is required for the production Ethereum mainnet-like node configuration with Geth as the execution layer and lighthouse as the consensus layer. Kurtosis also requires docker to be installed since it runs everything inside of docker containers. All of the above need to be installed and available in the path in order for the tool to work. diff --git a/dev-genesis.json b/assets/dev-genesis.json similarity index 100% rename from dev-genesis.json rename to assets/dev-genesis.json diff --git a/crates/common/src/types/identifiers.rs b/crates/common/src/types/identifiers.rs index d642f98..349081c 100644 --- a/crates/common/src/types/identifiers.rs +++ b/crates/common/src/types/identifiers.rs @@ -29,6 +29,8 @@ use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; pub enum PlatformIdentifier { /// The Go-ethereum reference full node EVM implementation with the solc compiler. GethEvmSolc, + /// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler. + LighthouseGethEvmSolc, /// The kitchensink node with the PolkaVM backend with the resolc compiler. KitchensinkPolkavmResolc, /// The kitchensink node with the REVM backend with the solc compiler. @@ -87,6 +89,8 @@ pub enum CompilerIdentifier { pub enum NodeIdentifier { /// The go-ethereum node implementation. Geth, + /// The go-ethereum node implementation. + LighthouseGeth, /// The Kitchensink node implementation. Kitchensink, /// The revive dev node implementation. diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index f55b8cd..b1551bc 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -79,6 +79,15 @@ impl AsRef for Context { } } +impl AsRef for Context { + fn as_ref(&self) -> &KurtosisConfiguration { + match self { + Self::ExecuteTests(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } +} + impl AsRef for Context { fn as_ref(&self) -> &KitchensinkConfiguration { match self { @@ -190,6 +199,10 @@ pub struct TestExecutionContext { #[clap(flatten, next_help_heading = "Geth Configuration")] pub geth_configuration: GethConfiguration, + /// Configuration parameters for the lighthouse node. + #[clap(flatten, next_help_heading = "Lighthouse Configuration")] + pub lighthouse_configuration: KurtosisConfiguration, + /// Configuration parameters for the Kitchensink. #[clap(flatten, next_help_heading = "Kitchensink Configuration")] pub kitchensink_configuration: KitchensinkConfiguration, @@ -253,6 +266,12 @@ impl AsRef for TestExecutionContext { } } +impl AsRef for TestExecutionContext { + fn as_ref(&self) -> &KurtosisConfiguration { + &self.lighthouse_configuration + } +} + impl AsRef for TestExecutionContext { fn as_ref(&self) -> &KitchensinkConfiguration { &self.kitchensink_configuration @@ -335,12 +354,27 @@ pub struct GethConfiguration { #[clap( id = "geth.start-timeout-ms", long = "geth.start-timeout-ms", - default_value = "5000", + default_value = "30000", value_parser = parse_duration )] pub start_timeout_ms: Duration, } +/// A set of configuration parameters for kurtosis. +#[derive(Clone, Debug, Parser, Serialize)] +pub struct KurtosisConfiguration { + /// Specifies the path of the kurtosis node to be used by the tool. + /// + /// If this is not specified, then the tool assumes that it should use the kurtosis binary that's + /// provided in the user's $PATH. + #[clap( + id = "kurtosis.path", + long = "kurtosis.path", + default_value = "kurtosis" + )] + pub path: PathBuf, +} + /// A set of configuration parameters for Kitchensink. #[derive(Clone, Debug, Parser, Serialize)] pub struct KitchensinkConfiguration { @@ -359,7 +393,7 @@ pub struct KitchensinkConfiguration { #[clap( id = "kitchensink.start-timeout-ms", long = "kitchensink.start-timeout-ms", - default_value = "5000", + default_value = "30000", value_parser = parse_duration )] pub start_timeout_ms: Duration, @@ -387,7 +421,7 @@ pub struct ReviveDevNodeConfiguration { #[clap( id = "revive-dev-node.start-timeout-ms", long = "revive-dev-node.start-timeout-ms", - default_value = "5000", + default_value = "30000", value_parser = parse_duration )] pub start_timeout_ms: Duration, @@ -407,7 +441,7 @@ pub struct EthRpcConfiguration { #[clap( id = "eth-rpc.start-timeout-ms", long = "eth-rpc.start-timeout-ms", - default_value = "5000", + default_value = "30000", value_parser = parse_duration )] pub start_timeout_ms: Duration, @@ -431,7 +465,7 @@ pub struct GenesisConfiguration { impl GenesisConfiguration { pub fn genesis(&self) -> anyhow::Result<&Genesis> { static DEFAULT_GENESIS: LazyLock = LazyLock::new(|| { - let genesis = include_str!("../../../dev-genesis.json"); + let genesis = include_str!("../../../assets/dev-genesis.json"); serde_json::from_str(genesis).unwrap() }); @@ -465,7 +499,7 @@ pub struct WalletConfiguration { /// This argument controls which private keys the nodes should have access to and be added to /// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set /// of the node. - #[clap(long = "wallet.additional-keys", default_value_t = 100_000)] + #[clap(long = "wallet.additional-keys", default_value_t = 200)] additional_keys: usize, /// The wallet object that will be used. diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 8bc93e2..dbf576b 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -13,7 +13,9 @@ use anyhow::Context as _; use revive_dt_common::types::*; use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_config::*; -use revive_dt_node::{Node, geth::GethNode, substrate::SubstrateNode}; +use revive_dt_node::{ + Node, geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode, +}; use revive_dt_node_interaction::EthereumNode; use tracing::info; @@ -104,6 +106,51 @@ impl Platform for GethEvmSolcPlatform { } } +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] +pub struct LighthouseGethEvmSolcPlatform; + +impl Platform for LighthouseGethEvmSolcPlatform { + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::LighthouseGethEvmSolc + } + + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::LighthouseGeth + } + + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::Evm + } + + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Solc + } + + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = LighthouseGethNode::new(context); + let node = spawn_node::(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } + + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } +} + #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] pub struct KitchensinkPolkavmResolcPlatform; @@ -316,6 +363,9 @@ impl From for Box { fn from(value: PlatformIdentifier) -> Self { match value { PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>, + PlatformIdentifier::LighthouseGethEvmSolc => { + Box::new(LighthouseGethEvmSolcPlatform) as Box<_> + } PlatformIdentifier::KitchensinkPolkavmResolc => { Box::new(KitchensinkPolkavmResolcPlatform) as Box<_> } @@ -336,6 +386,9 @@ impl From for &dyn Platform { fn from(value: PlatformIdentifier) -> Self { match value { PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform, + PlatformIdentifier::LighthouseGethEvmSolc => { + &LighthouseGethEvmSolcPlatform as &dyn Platform + } PlatformIdentifier::KitchensinkPolkavmResolc => { &KitchensinkPolkavmResolcPlatform as &dyn Platform } diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index b895165..d482621 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -22,6 +22,8 @@ revive-dt-node-interaction = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +serde_with = { workspace = true } +serde_yaml_ng = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index bcf6ab6..5cbedf8 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -6,6 +6,7 @@ use revive_dt_node_interaction::EthereumNode; pub mod common; pub mod constants; pub mod geth; +pub mod lighthouse_geth; pub mod process; pub mod substrate; diff --git a/crates/node/src/lighthouse_geth.rs b/crates/node/src/lighthouse_geth.rs new file mode 100644 index 0000000..7029c68 --- /dev/null +++ b/crates/node/src/lighthouse_geth.rs @@ -0,0 +1,1033 @@ +//! This module implements an Ethereum network that's identical to mainnet in all ways where Geth is +//! used as the execution engine and doesn't perform any kind of consensus, which now happens on the +//! beacon chain and between the beacon nodes. We're using lighthouse as the consensus node in this +//! case with 12 second block slots which is identical to Ethereum mainnet. +//! +//! Ths implementation uses the Kurtosis tool to spawn the various nodes that are needed which means +//! that this tool is now a requirement that needs to be installed in order for this target to be +//! used. Additionally, the Kurtosis tool uses Docker and therefore docker is a another dependency +//! that the tool has. + +use std::{ + collections::BTreeMap, + fs::{File, create_dir_all}, + io::Read, + ops::ControlFlow, + path::PathBuf, + pin::Pin, + process::{Command, Stdio}, + sync::{ + Arc, + atomic::{AtomicU32, Ordering}, + }, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use alloy::{ + eips::BlockNumberOrTag, + genesis::{Genesis, GenesisAccount}, + network::{Ethereum, EthereumWallet, NetworkWallet}, + primitives::{ + Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256, address, + }, + providers::{ + Provider, ProviderBuilder, + ext::DebugApi, + fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller}, + }, + rpc::types::{ + EIP1186AccountProofResponse, TransactionRequest, + trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, + }, +}; +use anyhow::Context as _; +use revive_common::EVMVersion; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde_with::serde_as; +use tracing::{Instrument, instrument}; + +use revive_dt_common::{ + fs::clear_directory, + futures::{PollingWaitBehavior, poll}, +}; +use revive_dt_config::*; +use revive_dt_format::traits::ResolverApi; +use revive_dt_node_interaction::EthereumNode; + +use crate::{ + Node, + common::FallbackGasFiller, + constants::INITIAL_BALANCE, + process::{Process, ProcessReadinessWaitBehavior}, +}; + +static NODE_COUNT: AtomicU32 = AtomicU32::new(0); + +/// The go-ethereum node instance implementation. +/// +/// Implements helpers to initialize, spawn and wait the node. +/// +/// Assumes dev mode and IPC only (`P2P`, `http` etc. are kept disabled). +/// +/// Prunes the child process and the base directory on drop. +#[derive(Debug)] +#[allow(clippy::type_complexity)] +pub struct LighthouseGethNode { + /* Node Identifier */ + id: u32, + connection_string: String, + enclave_name: String, + + /* Directory Paths */ + base_directory: PathBuf, + logs_directory: PathBuf, + + /* File Paths */ + config_file_path: PathBuf, + + /* Binary Paths & Timeouts */ + kurtosis_binary_path: PathBuf, + + /* Spawned Processes */ + process: Option, + + /* Provider Related Fields */ + wallet: Arc, + nonce_manager: CachedNonceManager, + chain_id_filler: ChainIdFiller, +} + +impl LighthouseGethNode { + const BASE_DIRECTORY: &str = "lighthouse"; + const LOGS_DIRECTORY: &str = "logs"; + + const IPC_FILE_NAME: &str = "geth.ipc"; + const CONFIG_FILE_NAME: &str = "config.yaml"; + + const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress"; + const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet"; + + const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60); + const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); + + const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete"; + + pub fn new( + context: impl AsRef + + AsRef + + AsRef + + Clone, + ) -> Self { + let working_directory_configuration = + AsRef::::as_ref(&context); + let wallet_configuration = AsRef::::as_ref(&context); + let kurtosis_configuration = AsRef::::as_ref(&context); + + let geth_directory = working_directory_configuration + .as_path() + .join(Self::BASE_DIRECTORY); + let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); + let base_directory = geth_directory.join(id.to_string()); + + let wallet = wallet_configuration.wallet(); + + Self { + /* Node Identifier */ + id, + connection_string: base_directory + .join(Self::IPC_FILE_NAME) + .display() + .to_string(), + enclave_name: format!( + "enclave-{}-{}", + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Must not fail") + .as_nanos(), + id + ), + + /* File Paths */ + config_file_path: base_directory.join(Self::CONFIG_FILE_NAME), + + /* Directory Paths */ + logs_directory: base_directory.join(Self::LOGS_DIRECTORY), + base_directory, + + /* Binary Paths & Timeouts */ + kurtosis_binary_path: kurtosis_configuration.path.clone(), + + /* Spawned Processes */ + process: None, + + /* Provider Related Fields */ + wallet: wallet.clone(), + nonce_manager: Default::default(), + chain_id_filler: Default::default(), + } + } + + /// Create the node directory and call `geth init` to configure the genesis. + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> { + self.init_directories() + .context("Failed to initialize the directories of the Lighthouse Geth node.")?; + self.init_kurtosis_config_file() + .context("Failed to write the config file to the FS")?; + + Ok(self) + } + + fn init_directories(&self) -> anyhow::Result<()> { + let _ = clear_directory(&self.base_directory); + let _ = clear_directory(&self.logs_directory); + + create_dir_all(&self.base_directory) + .context("Failed to create base directory for geth node")?; + create_dir_all(&self.logs_directory) + .context("Failed to create logs directory for geth node")?; + + Ok(()) + } + + fn init_kurtosis_config_file(&self) -> anyhow::Result<()> { + let config = KurtosisNetworkConfig { + participants: vec![ParticipantParameters { + execution_layer_type: ExecutionLayerType::Geth, + consensus_layer_type: ConsensusLayerType::Lighthouse, + execution_layer_extra_parameters: vec![ + "--nodiscover".to_string(), + "--cache=4096".to_string(), + "--txpool.globalslots=100000".to_string(), + "--txpool.globalqueue=100000".to_string(), + "--txpool.accountslots=128".to_string(), + "--txpool.accountqueue=1024".to_string(), + "--http.api=admin,engine,net,eth,web3,debug,txpool".to_string(), + "--http.addr=0.0.0.0".to_string(), + "--ws".to_string(), + "--ws.addr=0.0.0.0".to_string(), + "--ws.port=8546".to_string(), + "--ws.api=eth,net,web3,txpool,engine".to_string(), + "--ws.origins=*".to_string(), + ], + consensus_layer_extra_parameters: vec![ + "--disable-deposit-contract-sync".to_string(), + ], + }], + network_parameters: NetworkParameters { + preset: NetworkPreset::Mainnet, + seconds_per_slot: 12, + network_id: 420420420, + deposit_contract_address: address!("0x00000000219ab540356cBB839Cbe05303d7705Fa"), + altair_fork_epoch: 0, + bellatrix_fork_epoch: 0, + capella_fork_epoch: 0, + deneb_fork_epoch: 0, + electra_fork_epoch: 0, + preregistered_validator_keys_mnemonic: Self::VALIDATOR_MNEMONIC.to_string(), + num_validator_keys_per_node: 64, + genesis_delay: 10, + prefunded_accounts: { + let map = NetworkWallet::::signer_addresses(&self.wallet) + .map(|address| { + ( + address, + GenesisAccount::default() + .with_balance(INITIAL_BALANCE.try_into().unwrap()), + ) + }) + .collect::>(); + serde_json::to_string(&map).unwrap() + }, + }, + wait_for_finalization: false, + port_publisher: Some(PortPublisherParameters { + execution_layer_port_publisher_parameters: Some( + PortPublisherSingleItemParameters { + enabled: Some(true), + public_port_start: Some(32000 + self.id as u16 * 1000), + }, + ), + consensus_layer_port_publisher_parameters: Default::default(), + }), + }; + + let file = File::create(self.config_file_path.as_path()) + .context("Failed to open the config yaml file")?; + serde_yaml_ng::to_writer(file, &config) + .context("Failed to write the config to the yaml file")?; + + Ok(()) + } + + /// Spawn the go-ethereum node child process. + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn spawn_process(&mut self) -> anyhow::Result<&mut Self> { + let process = Process::new( + None, + self.logs_directory.as_path(), + self.kurtosis_binary_path.as_path(), + |command, stdout, stderr| { + command + .arg("run") + .arg("--enclave") + .arg(self.enclave_name.as_str()) + .arg("github.com/ethpandaops/ethereum-package") + .arg("--args-file") + .arg(self.config_file_path.as_path()) + .stdout(stdout) + .stderr(stderr); + }, + ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { + max_wait_duration: Duration::from_secs(15 * 60), + check_function: Box::new(|stdout, stderr| { + for line in [stdout, stderr].iter().flatten() { + if line.to_lowercase().contains("error encountered") { + anyhow::bail!("Encountered an error when starting Kurtosis") + } else if line.contains("RUNNING") { + return Ok(true); + } + } + Ok(false) + }), + }, + ) + .inspect_err(|err| { + tracing::error!(?err, "Failed to spawn Kurtosis"); + self.shutdown().expect("Failed to shutdown kurtosis"); + })?; + self.process = Some(process); + + let child = Command::new(self.kurtosis_binary_path.as_path()) + .arg("enclave") + .arg("inspect") + .arg(self.enclave_name.as_str()) + .stdout(Stdio::piped()) + .spawn() + .context("Failed to spawn the kurtosis enclave inspect process")?; + + let stdout = { + let mut stdout = String::default(); + child + .stdout + .expect("Should be piped") + .read_to_string(&mut stdout) + .context("Failed to read stdout of kurtosis inspect to string")?; + stdout + }; + + self.connection_string = stdout + .split("el-1-geth-lighthouse") + .nth(1) + .and_then(|str| str.split("ws").nth(1)) + .and_then(|str| str.split("->").nth(1)) + .and_then(|str| str.split("\n").next()) + .map(|str| format!("ws://{}", str.trim())) + .context("Failed to find the WS connection string of Kurtosis")?; + + Ok(self) + } + + async fn provider( + &self, + ) -> anyhow::Result, impl Provider, Ethereum>> + { + ProviderBuilder::new() + .disable_recommended_fillers() + .filler(FallbackGasFiller::new( + 25_000_000, + 1_000_000_000, + 1_000_000_000, + )) + .filler(self.chain_id_filler.clone()) + .filler(NonceFiller::new(self.nonce_manager.clone())) + .wallet(self.wallet.clone()) + .connect(&self.connection_string) + .await + .context("Failed to create the provider for Kurtosis") + } +} + +impl EthereumNode for LighthouseGethNode { + fn id(&self) -> usize { + self.id as _ + } + + fn connection_string(&self) -> &str { + &self.connection_string + } + + #[instrument( + level = "info", + skip_all, + fields(geth_node_id = self.id, connection_string = self.connection_string), + err, + )] + fn execute_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>> + { + Box::pin(async move { + let provider = self + .provider() + .await + .context("Failed to create provider for transaction submission")?; + + let pending_transaction = provider + .send_transaction(transaction) + .await + .inspect_err(|err| { + tracing::error!( + %err, + "Encountered an error when submitting the transaction" + ) + }) + .context("Failed to submit transaction to geth node")?; + let transaction_hash = *pending_transaction.tx_hash(); + + // The following is a fix for the "transaction indexing is in progress" error that we + // used to get. You can find more information on this in the following GH issue in geth + // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, + // before we can get the receipt of the transaction it needs to have been indexed by the + // node's indexer. Just because the transaction has been confirmed it doesn't mean that + // it has been indexed. When we call alloy's `get_receipt` it checks if the transaction + // was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method + // which _might_ return the above error if the tx has not yet been indexed yet. So, we + // need to implement a retry mechanism for the receipt to keep retrying to get it until + // it eventually works, but we only do that if the error we get back is the "transaction + // indexing is in progress" error or if the receipt is None. + // + // Getting the transaction indexed and taking a receipt can take a long time especially + // when a lot of transactions are being submitted to the node. Thus, while initially we + // only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to + // allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting + // with exponential backoff each time we attempt to get the receipt and find that it's + // not available. + let provider = Arc::new(provider); + poll( + Self::RECEIPT_POLLING_DURATION, + PollingWaitBehavior::Constant(Duration::from_millis(200)), + move || { + let provider = provider.clone(); + async move { + match provider.get_transaction_receipt(transaction_hash).await { + Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), + Ok(None) => Ok(ControlFlow::Continue(())), + Err(error) => { + let error_string = error.to_string(); + match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { + true => Ok(ControlFlow::Continue(())), + false => Err(error.into()), + } + } + } + } + }, + ) + .instrument(tracing::info_span!( + "Awaiting transaction receipt", + ?transaction_hash + )) + .await + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn trace_transaction( + &self, + tx_hash: TxHash, + trace_options: GethDebugTracingOptions, + ) -> Pin> + '_>> + { + Box::pin(async move { + let provider = Arc::new( + self.provider() + .await + .context("Failed to create provider for tracing")?, + ); + poll( + Self::TRACE_POLLING_DURATION, + PollingWaitBehavior::Constant(Duration::from_millis(200)), + move || { + let provider = provider.clone(); + let trace_options = trace_options.clone(); + async move { + match provider + .debug_trace_transaction(tx_hash, trace_options) + .await + { + Ok(trace) => Ok(ControlFlow::Break(trace)), + Err(error) => { + let error_string = error.to_string(); + match error_string.contains(Self::TRANSACTION_TRACING_ERROR) { + true => Ok(ControlFlow::Continue(())), + false => Err(error.into()), + } + } + } + } + }, + ) + .await + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn state_diff( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { + diff_mode: Some(true), + disable_code: None, + disable_storage: None, + }); + match self + .trace_transaction(tx_hash, trace_options) + .await + .context("Failed to trace transaction for prestate diff")? + .try_into_pre_state_frame() + .context("Failed to convert trace into pre-state frame")? + { + PreStateFrame::Diff(diff) => Ok(diff), + _ => anyhow::bail!("expected a diff mode trace"), + } + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn balance_of( + &self, + address: Address, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the Geth provider")? + .get_balance(address) + .await + .map_err(Into::into) + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn latest_state_proof( + &self, + address: Address, + keys: Vec, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the Geth provider")? + .get_proof(address, keys) + .latest() + .await + .map_err(Into::into) + }) + } + + // #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn resolver( + &self, + ) -> Pin>> + '_>> { + Box::pin(async move { + let id = self.id; + let provider = self.provider().await?; + Ok(Arc::new(LighthouseGethNodeResolver { id, provider }) as Arc) + }) + } + + fn evm_version(&self) -> EVMVersion { + EVMVersion::Cancun + } +} + +pub struct LighthouseGethNodeResolver, P: Provider> { + id: u32, + provider: FillProvider, +} + +impl, P: Provider> ResolverApi + for LighthouseGethNodeResolver +{ + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn chain_id( + &self, + ) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn transaction_gas_price( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_transaction_receipt(tx_hash) + .await? + .context("Failed to get the transaction receipt") + .map(|receipt| receipt.effective_gas_price) + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_gas_limit( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.gas_limit as _) + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_coinbase( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.beneficiary) + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_difficulty( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_base_fee( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .and_then(|block| { + block + .header + .base_fee_per_gas + .context("Failed to get the base fee per gas") + }) + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_hash( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.hash) + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_timestamp( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.timestamp) + }) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn last_block_number(&self) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) + } +} + +impl Node for LighthouseGethNode { + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn shutdown(&mut self) -> anyhow::Result<()> { + if !Command::new(self.kurtosis_binary_path.as_path()) + .arg("enclave") + .arg("rm") + .arg("-f") + .arg(self.enclave_name.as_str()) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn() + .expect("Failed to spawn the enclave kill command") + .wait() + .expect("Failed to wait for the enclave kill command") + .success() + { + panic!("Failed to shut down the enclave {}", self.enclave_name) + } + + drop(self.process.take()); + + Ok(()) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { + self.init(genesis)?.spawn_process()?; + Ok(()) + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn version(&self) -> anyhow::Result { + let output = Command::new(&self.kurtosis_binary_path) + .arg("version") + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn() + .context("Failed to spawn geth --version process")? + .wait_with_output() + .context("Failed to wait for geth --version output")? + .stdout; + Ok(String::from_utf8_lossy(&output).into()) + } +} + +impl Drop for LighthouseGethNode { + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn drop(&mut self) { + self.shutdown().expect("Failed to shutdown") + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +struct KurtosisNetworkConfig { + pub participants: Vec, + + #[serde(rename = "network_params")] + pub network_parameters: NetworkParameters, + + pub wait_for_finalization: bool, + + #[serde(skip_serializing_if = "Option::is_none")] + pub port_publisher: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +struct ParticipantParameters { + #[serde(rename = "el_type")] + pub execution_layer_type: ExecutionLayerType, + + #[serde(rename = "el_extra_params")] + pub execution_layer_extra_parameters: Vec, + + #[serde(rename = "cl_type")] + pub consensus_layer_type: ConsensusLayerType, + + #[serde(rename = "cl_extra_params")] + pub consensus_layer_extra_parameters: Vec, +} + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +enum ExecutionLayerType { + Geth, +} + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +enum ConsensusLayerType { + Lighthouse, +} + +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize)] +struct NetworkParameters { + pub preset: NetworkPreset, + + pub seconds_per_slot: u64, + + #[serde_as(as = "serde_with::DisplayFromStr")] + pub network_id: u64, + + pub deposit_contract_address: Address, + + pub altair_fork_epoch: u64, + pub bellatrix_fork_epoch: u64, + pub capella_fork_epoch: u64, + pub deneb_fork_epoch: u64, + pub electra_fork_epoch: u64, + + pub preregistered_validator_keys_mnemonic: String, + + pub num_validator_keys_per_node: u64, + + pub genesis_delay: u64, + + pub prefunded_accounts: String, +} + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +enum NetworkPreset { + Mainnet, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +struct PortPublisherParameters { + #[serde(rename = "el", skip_serializing_if = "Option::is_none")] + pub execution_layer_port_publisher_parameters: Option, + + #[serde(rename = "cl", skip_serializing_if = "Option::is_none")] + pub consensus_layer_port_publisher_parameters: Option, +} + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +struct PortPublisherSingleItemParameters { + #[serde(skip_serializing_if = "Option::is_none")] + pub enabled: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub public_port_start: Option, +} + +/// Custom serializer/deserializer for u128 values as 0x-prefixed hex strings +pub struct HexPrefixedU128; + +impl serde_with::SerializeAs for HexPrefixedU128 { + fn serialize_as(source: &u128, serializer: S) -> Result + where + S: Serializer, + { + let hex_string = format!("0x{source:x}"); + serializer.serialize_str(&hex_string) + } +} + +impl<'de> serde_with::DeserializeAs<'de, u128> for HexPrefixedU128 { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let hex_string = String::deserialize(deserializer)?; + if let Some(hex_part) = hex_string.strip_prefix("0x") { + u128::from_str_radix(hex_part, 16).map_err(serde::de::Error::custom) + } else { + Err(serde::de::Error::custom("Expected 0x-prefixed hex string")) + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::Mutex; + + use super::*; + + fn test_config() -> TestExecutionContext { + TestExecutionContext::default() + } + + fn new_node() -> (TestExecutionContext, LighthouseGethNode) { + // Note: When we run the tests in the CI we found that if they're all + // run in parallel then the CI is unable to start all of the nodes in + // time and their start up times-out. Therefore, we want all of the + // nodes to be started in series and not in parallel. To do this, we use + // a dummy mutex here such that there can only be a single node being + // started up at any point of time. This will make our tests run slower + // but it will allow the node startup to not timeout. + // + // Note: an alternative to starting all of the nodes in series and not + // in parallel would be for us to reuse the same node between tests + // which is not the best thing to do in my opinion as it removes all + // of the isolation between tests and makes them depend on what other + // tests do. For example, if one test checks what the block number is + // and another test submits a transaction then the tx test would have + // side effects that affect the block number test. + static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); + let _guard = NODE_START_MUTEX.lock().unwrap(); + + let context = test_config(); + let mut node = LighthouseGethNode::new(&context); + node.init(context.genesis_configuration.genesis().unwrap().clone()) + .expect("Failed to initialize the node") + .spawn_process() + .expect("Failed to spawn the node process"); + (context, node) + } + + #[tokio::test] + async fn node_mines_simple_transfer_transaction_and_returns_receipt() { + // Arrange + let (context, node) = new_node(); + + let account_address = context + .wallet_configuration + .wallet() + .default_signer() + .address(); + let transaction = TransactionRequest::default() + .to(account_address) + .value(U256::from(100_000_000_000_000u128)); + + // Act + let receipt = node.execute_transaction(transaction).await; + + // Assert + let _ = receipt.expect("Failed to send the transfer transaction"); + } + + #[test] + fn version_works() { + // Arrange + let (_context, node) = new_node(); + + // Act + let version = node.version(); + + // Assert + let version = version.expect("Failed to get the version"); + assert!( + version.starts_with("CLI Version"), + "expected version string, got: '{version}'" + ); + } + + #[tokio::test] + async fn can_get_chain_id_from_node() { + // Arrange + let (_context, node) = new_node(); + + // Act + let chain_id = node.resolver().await.unwrap().chain_id().await; + + // Assert + let chain_id = chain_id.expect("Failed to get the chain id"); + assert_eq!(chain_id, 420_420_420); + } + + #[tokio::test] + async fn can_get_gas_limit_from_node() { + // Arrange + let (_context, node) = new_node(); + + // Act + let gas_limit = node + .resolver() + .await + .unwrap() + .block_gas_limit(BlockNumberOrTag::Latest) + .await; + + // Assert + let _ = gas_limit.expect("Failed to get the gas limit"); + } + + #[tokio::test] + async fn can_get_coinbase_from_node() { + // Arrange + let (_context, node) = new_node(); + + // Act + let coinbase = node + .resolver() + .await + .unwrap() + .block_coinbase(BlockNumberOrTag::Latest) + .await; + + // Assert + let _ = coinbase.expect("Failed to get the coinbase"); + } + + #[tokio::test] + async fn can_get_block_difficulty_from_node() { + // Arrange + let (_context, node) = new_node(); + + // Act + let block_difficulty = node + .resolver() + .await + .unwrap() + .block_difficulty(BlockNumberOrTag::Latest) + .await; + + // Assert + let _ = block_difficulty.expect("Failed to get the block difficulty"); + } + + #[tokio::test] + async fn can_get_block_hash_from_node() { + // Arrange + let (_context, node) = new_node(); + + // Act + let block_hash = node + .resolver() + .await + .unwrap() + .block_hash(BlockNumberOrTag::Latest) + .await; + + // Assert + let _ = block_hash.expect("Failed to get the block hash"); + } + + #[tokio::test] + async fn can_get_block_timestamp_from_node() { + // Arrange + let (_context, node) = new_node(); + + // Act + let block_timestamp = node + .resolver() + .await + .unwrap() + .block_timestamp(BlockNumberOrTag::Latest) + .await; + + // Assert + let _ = block_timestamp.expect("Failed to get the block timestamp"); + } + + #[tokio::test] + async fn can_get_block_number_from_node() { + // Arrange + let (_context, node) = new_node(); + + // Act + let block_number = node.resolver().await.unwrap().last_block_number().await; + + // Assert + let _ = block_number.expect("Failed to get the block number"); + } +} diff --git a/crates/node/src/process.rs b/crates/node/src/process.rs index ea1d29b..23057f0 100644 --- a/crates/node/src/process.rs +++ b/crates/node/src/process.rs @@ -68,7 +68,7 @@ impl Process { command_building_callback(&mut command, stdout_logs_file, stderr_logs_file); command }; - let child = command + let mut child = command .spawn() .context("Failed to spawn the built command")?; @@ -93,10 +93,22 @@ impl Process { let mut stdout_lines = BufReader::new(stdout_logs_file).lines(); let mut stderr_lines = BufReader::new(stderr_logs_file).lines(); + let mut stdout = String::new(); + let mut stderr = String::new(); + loop { let stdout_line = stdout_lines.next().and_then(Result::ok); let stderr_line = stderr_lines.next().and_then(Result::ok); + if let Some(stdout_line) = stdout_line.as_ref() { + stdout.push_str(stdout_line); + stdout.push('\n'); + } + if let Some(stderr_line) = stderr_line.as_ref() { + stderr.push_str(stderr_line); + stderr.push('\n'); + } + let check_result = check_function(stdout_line.as_deref(), stderr_line.as_deref()) .context("Failed to wait for the process to be ready")?; @@ -106,10 +118,21 @@ impl Process { } if Instant::now().duration_since(spawn_time) > max_wait_duration { - bail!("Waited for the process to start but it failed to start in time") + bail!( + "Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}" + ) } } } + ProcessReadinessWaitBehavior::WaitForCommandToExit => { + if !child + .wait() + .context("Failed waiting for kurtosis run process to finish")? + .success() + { + anyhow::bail!("Failed to initialize kurtosis network",); + } + } } Ok(Self { @@ -137,6 +160,9 @@ pub enum ProcessReadinessWaitBehavior { /// straight away. NoStartupWait, + /// Waits for the command to exit. + WaitForCommandToExit, + /// The process does require some amount of wait duration after it's been started. WaitDuration(Duration),