Compare commits

..

20 Commits

Author SHA1 Message Date
Omar Abdulla 372cd5c52b Update tests 2025-09-19 18:55:50 +03:00
Omar Abdulla e122bbd996 Update the default values of the cli 2025-09-19 18:15:50 +03:00
Omar Abdulla 6313ccb9b5 Resolve merge conflicts 2025-09-18 22:59:11 +03:00
Omar Abdulla 6b2516f639 Final set of renames 2025-09-18 22:44:39 +03:00
Omar Abdulla d4869deb68 Update the default values for the platforms 2025-09-18 20:16:57 +03:00
Omar Abdulla 52b21f8982 Remove an un-needed dependency 2025-09-18 20:11:33 +03:00
Omar Abdulla 13a5b5a7ee Remove the old traits 2025-09-18 20:10:32 +03:00
Omar Abdulla b962d032b9 Remoe all references to leader and follower 2025-09-18 20:03:33 +03:00
Omar Abdulla 496bc9a0ec Replace infra with the dyn infra 2025-09-18 19:59:52 +03:00
Omar Abdulla 92fc7894c0 Add a way to convert platform identifier into a platform 2025-09-17 21:27:33 +03:00
Omar Abdulla d7f69449af Add all of the platforms that we support 2025-09-17 21:06:29 +03:00
Omar Abdulla f0f59ad024 Provide a common node implementation for substrate chains 2025-09-17 20:23:31 +03:00
Omar Abdulla ac0f4e0cf2 Introduce a geth platform 2025-09-17 19:54:50 +03:00
Omar Abdulla 9e4f2e95f1 Support the dyn compiler in the builder pattern 2025-09-17 19:31:12 +03:00
Omar Abdulla 7aadd0a7f7 Implement the dyn compiler trait for compilers 2025-09-17 19:29:23 +03:00
Omar Abdulla 1a25c8e0ab Add more identifiers to the platform 2025-09-17 06:25:35 +03:00
Omar Abdulla 01d8042841 Allow for compilers to be created in the dyn trait 2025-09-17 06:10:44 +03:00
Omar Abdulla 8a05f8e6e8 Make the ethereum node trait object compatible 2025-09-17 06:01:13 +03:00
Omar Abdulla 9fc74aeea0 Groundwork for dyn traits 2025-09-17 05:47:13 +03:00
Omar Abdulla 49cbc51546 Generate schema for the metadata file 2025-09-08 17:09:35 +03:00
19 changed files with 464 additions and 857 deletions
+1 -2
View File
@@ -12,5 +12,4 @@ profile.json.gz
resolc-compiler-tests
workdir
!/schema.json
!/dev-genesis.json
!/schema.json
Generated
-2
View File
@@ -4467,8 +4467,6 @@ dependencies = [
name = "revive-dt-common"
version = "0.1.0"
dependencies = [
"alloy",
"alloy-primitives",
"anyhow",
"clap",
"moka",
-2
View File
@@ -9,8 +9,6 @@ repository.workspace = true
rust-version.workspace = true
[dependencies]
alloy = { workspace = true }
alloy-primitives = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
moka = { workspace = true, features = ["sync"] }
-2
View File
@@ -1,9 +1,7 @@
mod identifiers;
mod mode;
mod private_key_allocator;
mod version_or_requirement;
pub use identifiers::*;
pub use mode::*;
pub use private_key_allocator::*;
pub use version_or_requirement::*;
@@ -1,35 +0,0 @@
use alloy::signers::local::PrivateKeySigner;
use alloy_primitives::U256;
use anyhow::{Result, bail};
/// This is a sequential private key allocator. When instantiated, it allocated private keys in
/// sequentially and in order until the maximum private key specified is reached.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PrivateKeyAllocator {
/// The next private key to be returned by the allocator when requested.
next_private_key: U256,
/// The highest private key (exclusive) that can be returned by this allocator.
highest_private_key_exclusive: U256,
}
impl PrivateKeyAllocator {
/// Creates a new instance of the private key allocator.
pub fn new(highest_private_key_exclusive: U256) -> Self {
Self {
next_private_key: U256::ZERO,
highest_private_key_exclusive,
}
}
/// Allocates a new private key and errors out if the maximum private key has been reached.
pub fn allocate(&mut self) -> Result<PrivateKeySigner> {
if self.next_private_key >= self.highest_private_key_exclusive {
bail!("Attempted to allocate a private key but failed since all have been allocated");
};
let private_key =
PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice())?;
self.next_private_key += U256::ONE;
Ok(private_key)
}
}
+1 -5
View File
@@ -431,7 +431,7 @@ pub struct GenesisConfiguration {
impl GenesisConfiguration {
pub fn genesis(&self) -> anyhow::Result<&Genesis> {
static DEFAULT_GENESIS: LazyLock<Genesis> = LazyLock::new(|| {
let genesis = include_str!("../../../dev-genesis.json");
let genesis = include_str!("../../../genesis.json");
serde_json::from_str(genesis).unwrap()
});
@@ -490,10 +490,6 @@ impl WalletConfiguration {
})
.clone()
}
pub fn highest_private_key_exclusive(&self) -> U256 {
U256::try_from(self.additional_keys).unwrap()
}
}
fn serialize_private_key<S>(value: &PrivateKeySigner, serializer: S) -> Result<S::Ok, S::Error>
+75 -131
View File
@@ -2,7 +2,6 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use alloy::consensus::EMPTY_ROOT_HASH;
use alloy::hex;
@@ -18,27 +17,25 @@ use alloy::{
primitives::Address,
rpc::types::{TransactionRequest, trace::geth::DiffMode},
};
use anyhow::{Context as _, bail};
use anyhow::Context as _;
use futures::{TryStreamExt, future::try_join_all};
use indexmap::IndexMap;
use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator};
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
use revive_dt_report::ExecutionSpecificReporter;
use semver::Version;
use revive_dt_format::case::Case;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent};
use revive_dt_format::steps::{
BalanceAssertionStep, Calldata, EtherValue, Expected, ExpectedOutput, FunctionCallStep, Method,
StepIdx, StepPath, StorageEmptyAssertionStep,
use revive_dt_format::input::{
BalanceAssertion, Calldata, EtherValue, Expected, ExpectedOutput, Input, Method, StepIdx,
StorageEmptyAssertion,
};
use revive_dt_format::{metadata::Metadata, steps::Step};
use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent};
use revive_dt_format::{input::Step, metadata::Metadata};
use revive_dt_node_interaction::EthereumNode;
use tokio::sync::Mutex;
use tokio::try_join;
use tracing::{Instrument, info, info_span, instrument};
#[derive(Clone)]
pub struct CaseState {
/// A map of all of the compiled contracts for the given metadata file.
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
@@ -55,10 +52,6 @@ pub struct CaseState {
/// The execution reporter.
execution_reporter: ExecutionSpecificReporter,
/// The private key allocator used for this case state. This is an Arc Mutex to allow for the
/// state to be cloned and for all of the clones to refer to the same allocator.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
}
impl CaseState {
@@ -67,7 +60,6 @@ impl CaseState {
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
execution_reporter: ExecutionSpecificReporter,
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
) -> Self {
Self {
compiled_contracts,
@@ -75,7 +67,6 @@ impl CaseState {
variables: Default::default(),
compiler_version,
execution_reporter,
private_key_allocator,
}
}
@@ -83,7 +74,6 @@ impl CaseState {
&mut self,
metadata: &Metadata,
step: &Step,
step_path: &StepPath,
node: &dyn EthereumNode,
) -> anyhow::Result<StepOutput> {
match step {
@@ -106,24 +96,6 @@ impl CaseState {
.context("Failed to handle storage empty assertion step")?;
Ok(StepOutput::StorageEmptyAssertion)
}
Step::Repeat(repetition_step) => {
self.handle_repeat(
metadata,
repetition_step.repeat,
&repetition_step.steps,
step_path,
node,
)
.await
.context("Failed to handle the repetition step")?;
Ok(StepOutput::Repetition)
}
Step::AllocateAccount(account_allocation) => {
self.handle_account_allocation(account_allocation.variable_name.as_str())
.await
.context("Failed to allocate account")?;
Ok(StepOutput::AccountAllocation)
}
}
.inspect(|_| info!("Step Succeeded"))
}
@@ -132,7 +104,7 @@ impl CaseState {
pub async fn handle_input(
&mut self,
metadata: &Metadata,
input: &FunctionCallStep,
input: &Input,
node: &dyn EthereumNode,
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
let resolver = node.resolver().await?;
@@ -168,7 +140,7 @@ impl CaseState {
pub async fn handle_balance_assertion(
&mut self,
metadata: &Metadata,
balance_assertion: &BalanceAssertionStep,
balance_assertion: &BalanceAssertion,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
@@ -184,7 +156,7 @@ impl CaseState {
pub async fn handle_storage_empty(
&mut self,
metadata: &Metadata,
storage_empty: &StorageEmptyAssertionStep,
storage_empty: &StorageEmptyAssertion,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
@@ -196,50 +168,12 @@ impl CaseState {
Ok(())
}
#[instrument(level = "info", name = "Handling Repetition", skip_all)]
pub async fn handle_repeat(
&mut self,
metadata: &Metadata,
repetitions: usize,
steps: &[Step],
step_path: &StepPath,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let tasks = (0..repetitions).map(|_| {
let mut state = self.clone();
async move {
for (step_idx, step) in steps.iter().enumerate() {
let step_path = step_path.append(step_idx);
state.handle_step(metadata, step, &step_path, node).await?;
}
Ok::<(), anyhow::Error>(())
}
});
try_join_all(tasks).await?;
Ok(())
}
#[instrument(level = "info", name = "Handling Account Allocation", skip_all)]
pub async fn handle_account_allocation(&mut self, variable_name: &str) -> anyhow::Result<()> {
let Some(variable_name) = variable_name.strip_prefix("$VARIABLE:") else {
bail!("Account allocation must start with $VARIABLE:");
};
let private_key = self.private_key_allocator.lock().await.allocate()?;
let account = private_key.address();
let variable = U256::from_be_slice(account.0.as_slice());
self.variables.insert(variable_name.to_string(), variable);
Ok(())
}
/// Handles the contract deployment for a given input performing it if it needs to be performed.
#[instrument(level = "info", skip_all)]
async fn handle_input_contract_deployment(
&mut self,
metadata: &Metadata,
input: &FunctionCallStep,
input: &Input,
node: &dyn EthereumNode,
) -> anyhow::Result<HashMap<ContractInstance, TransactionReceipt>> {
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
@@ -260,16 +194,15 @@ impl CaseState {
.then_some(input.value)
.flatten();
let caller = {
let context = self.default_resolution_context();
let resolver = node.resolver().await?;
input
.caller
.resolve_address(resolver.as_ref(), context)
.await?
};
if let (_, _, Some(receipt)) = self
.get_or_deploy_contract_instance(&instance, metadata, caller, calldata, value, node)
.get_or_deploy_contract_instance(
&instance,
metadata,
input.caller,
calldata,
value,
node,
)
.await
.context("Failed to get or deploy contract instance during input execution")?
{
@@ -284,7 +217,7 @@ impl CaseState {
#[instrument(level = "info", skip_all)]
async fn handle_input_execution(
&mut self,
input: &FunctionCallStep,
input: &Input,
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
node: &dyn EthereumNode,
) -> anyhow::Result<TransactionReceipt> {
@@ -348,7 +281,7 @@ impl CaseState {
#[instrument(level = "info", skip_all)]
fn handle_input_variable_assignment(
&mut self,
input: &FunctionCallStep,
input: &Input,
tracing_result: &CallFrame,
) -> anyhow::Result<()> {
let Some(ref assignments) = input.variable_assignments else {
@@ -379,26 +312,26 @@ impl CaseState {
#[instrument(level = "info", skip_all)]
async fn handle_input_expectations(
&self,
input: &FunctionCallStep,
input: &Input,
execution_receipt: &TransactionReceipt,
resolver: &(impl ResolverApi + ?Sized),
tracing_result: &CallFrame,
) -> anyhow::Result<()> {
// Resolving the `input.expected` into a series of expectations that we can then assert on.
let mut expectations = match input {
FunctionCallStep {
Input {
expected: Some(Expected::Calldata(calldata)),
..
} => vec![ExpectedOutput::new().with_calldata(calldata.clone())],
FunctionCallStep {
Input {
expected: Some(Expected::Expected(expected)),
..
} => vec![expected.clone()],
FunctionCallStep {
Input {
expected: Some(Expected::ExpectedMany(expected)),
..
} => expected.clone(),
FunctionCallStep { expected: None, .. } => vec![ExpectedOutput::new().with_success()],
Input { expected: None, .. } => vec![ExpectedOutput::new().with_success()],
};
// This is a bit of a special case and we have to support it separately on it's own. If it's
@@ -499,9 +432,13 @@ impl CaseState {
{
// Handling the emitter assertion.
if let Some(ref expected_address) = expected_event.address {
let expected = expected_address
.resolve_address(resolver, resolution_context)
.await?;
let expected = Address::from_slice(
Calldata::new_compound([expected_address])
.calldata(resolver, resolution_context)
.await?
.get(12..32)
.expect("Can't fail"),
);
let actual = actual_event.address();
if actual != expected {
tracing::error!(
@@ -595,20 +532,20 @@ impl CaseState {
pub async fn handle_balance_assertion_contract_deployment(
&mut self,
metadata: &Metadata,
balance_assertion: &BalanceAssertionStep,
balance_assertion: &BalanceAssertion,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let Some(address) = balance_assertion.address.as_resolvable_address() else {
let Some(instance) = balance_assertion
.address
.strip_suffix(".address")
.map(ContractInstance::new)
else {
return Ok(());
};
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
return Ok(());
};
self.get_or_deploy_contract_instance(
&instance,
metadata,
FunctionCallStep::default_caller_address(),
Input::default_caller(),
None,
None,
node,
@@ -620,17 +557,21 @@ impl CaseState {
#[instrument(level = "info", skip_all)]
pub async fn handle_balance_assertion_execution(
&mut self,
BalanceAssertionStep {
address,
BalanceAssertion {
address: address_string,
expected_balance: amount,
..
}: &BalanceAssertionStep,
}: &BalanceAssertion,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let resolver = node.resolver().await?;
let address = address
.resolve_address(resolver.as_ref(), self.default_resolution_context())
.await?;
let address = Address::from_slice(
Calldata::new_compound([address_string])
.calldata(resolver.as_ref(), self.default_resolution_context())
.await?
.get(12..32)
.expect("Can't fail"),
);
let balance = node.balance_of(address).await?;
@@ -642,7 +583,7 @@ impl CaseState {
"Balance assertion failed - Expected {} but got {} for {} resolved to {}",
expected,
actual,
address,
address_string,
address,
)
}
@@ -654,20 +595,20 @@ impl CaseState {
pub async fn handle_storage_empty_assertion_contract_deployment(
&mut self,
metadata: &Metadata,
storage_empty_assertion: &StorageEmptyAssertionStep,
storage_empty_assertion: &StorageEmptyAssertion,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let Some(address) = storage_empty_assertion.address.as_resolvable_address() else {
let Some(instance) = storage_empty_assertion
.address
.strip_suffix(".address")
.map(ContractInstance::new)
else {
return Ok(());
};
let Some(instance) = address.strip_suffix(".address").map(ContractInstance::new) else {
return Ok(());
};
self.get_or_deploy_contract_instance(
&instance,
metadata,
FunctionCallStep::default_caller_address(),
Input::default_caller(),
None,
None,
node,
@@ -679,17 +620,21 @@ impl CaseState {
#[instrument(level = "info", skip_all)]
pub async fn handle_storage_empty_assertion_execution(
&mut self,
StorageEmptyAssertionStep {
address,
StorageEmptyAssertion {
address: address_string,
is_storage_empty,
..
}: &StorageEmptyAssertionStep,
}: &StorageEmptyAssertion,
node: &dyn EthereumNode,
) -> anyhow::Result<()> {
let resolver = node.resolver().await?;
let address = address
.resolve_address(resolver.as_ref(), self.default_resolution_context())
.await?;
let address = Address::from_slice(
Calldata::new_compound([address_string])
.calldata(resolver.as_ref(), self.default_resolution_context())
.await?
.get(12..32)
.expect("Can't fail"),
);
let storage = node.latest_state_proof(address, Default::default()).await?;
let is_empty = storage.storage_hash == EMPTY_ROOT_HASH;
@@ -703,7 +648,7 @@ impl CaseState {
"Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}",
expected,
actual,
address,
address_string,
address,
)
};
@@ -846,31 +791,32 @@ impl<'a> CaseDriver<'a> {
.enumerate()
.map(|(idx, v)| (StepIdx::new(idx), v))
{
// Run this step concurrently across all platforms; short-circuit on first failure
let metadata = self.metadata;
let step_futures =
let step_futs =
self.platform_state
.iter_mut()
.map(|(node, platform_id, case_state)| {
let platform_id = *platform_id;
let node_ref = *node;
let step = step.clone();
let step_clone = step.clone();
let span = info_span!(
"Handling Step",
%step_idx,
platform = %platform_id,
);
async move {
let step_path = StepPath::from_iterator([step_idx]);
case_state
.handle_step(metadata, &step, &step_path, node_ref)
.handle_step(metadata, &step_clone, node_ref)
.await
.map_err(|e| (platform_id, e))
}
.instrument(span)
});
match try_join_all(step_futures).await {
match try_join_all(step_futs).await {
Ok(_outputs) => {
// All platforms succeeded for this step
steps_executed += 1;
}
Err((platform_id, error)) => {
@@ -895,6 +841,4 @@ pub enum StepOutput {
FunctionCall(TransactionReceipt, GethTrace, DiffMode),
BalanceAssertion,
StorageEmptyAssertion,
Repetition,
AccountAllocation,
}
+5 -19
View File
@@ -26,14 +26,10 @@ use revive_dt_report::{
};
use schemars::schema_for;
use serde_json::{Value, json};
use tokio::sync::Mutex;
use tracing::{debug, error, info, info_span, instrument};
use tracing_subscriber::{EnvFilter, FmtSubscriber};
use revive_dt_common::{
iterators::EitherIter,
types::{Mode, PrivateKeyAllocator},
};
use revive_dt_common::{iterators::EitherIter, types::Mode};
use revive_dt_compiler::SolidityCompiler;
use revive_dt_config::{Context, *};
use revive_dt_core::{
@@ -43,9 +39,9 @@ use revive_dt_core::{
use revive_dt_format::{
case::{Case, CaseIdx},
corpus::Corpus,
input::{Input, Step},
metadata::{ContractPathAndIdent, Metadata, MetadataFile},
mode::ParsedMode,
steps::{FunctionCallStep, Step},
};
use crate::cached_compiler::CachedCompiler;
@@ -330,13 +326,8 @@ async fn start_driver_task<'a>(
.expect("Can't fail");
}
let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new(
context.wallet_configuration.highest_private_key_exclusive(),
)));
let reporter = test.reporter.clone();
let result =
handle_case_driver(&test, cached_compiler, private_key_allocator).await;
let result = handle_case_driver(&test, cached_compiler).await;
match result {
Ok(steps_executed) => reporter
@@ -447,7 +438,6 @@ async fn start_cli_reporting_task(reporter: Reporter) {
async fn handle_case_driver<'a>(
test: &Test<'a>,
cached_compiler: Arc<CachedCompiler<'a>>,
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
) -> anyhow::Result<usize> {
let platform_state = stream::iter(test.platforms.iter())
// Compiling the pre-link contracts.
@@ -521,14 +511,12 @@ async fn handle_case_driver<'a>(
.steps
.iter()
.filter_map(|step| match step {
Step::FunctionCall(input) => input.caller.as_address().copied(),
Step::FunctionCall(input) => Some(input.caller),
Step::BalanceAssertion(..) => None,
Step::StorageEmptyAssertion(..) => None,
Step::Repeat(..) => None,
Step::AllocateAccount(..) => None,
})
.next()
.unwrap_or(FunctionCallStep::default_caller_address());
.unwrap_or(Input::default_caller());
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
TransactionRequest::default().from(deployer_address),
code,
@@ -575,7 +563,6 @@ async fn handle_case_driver<'a>(
.filter_map(
|(test, platform, node, compiler, reporter, _, deployed_libraries)| {
let cached_compiler = cached_compiler.clone();
let private_key_allocator = private_key_allocator.clone();
async move {
let compiler_output = cached_compiler
@@ -603,7 +590,6 @@ async fn handle_case_driver<'a>(
compiler_output.contracts,
deployed_libraries.unwrap_or_default(),
reporter.clone(),
private_key_allocator,
);
Some((*node, platform.platform_identifier(), case_state))
+2 -19
View File
@@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize};
use revive_dt_common::{macros::define_wrapper_type, types::Mode};
use crate::{
input::{Expected, Step},
mode::ParsedMode,
steps::{Expected, RepeatStep, Step},
};
#[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)]
@@ -55,6 +55,7 @@ pub struct Case {
}
impl Case {
#[allow(irrefutable_let_patterns)]
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
let steps_len = self.steps.len();
self.steps
@@ -83,24 +84,6 @@ impl Case {
})
}
pub fn steps_iterator_for_benchmarks(
&self,
default_repeat_count: usize,
) -> Box<dyn Iterator<Item = Step> + '_> {
let contains_repeat = self
.steps_iterator()
.any(|step| matches!(&step, Step::Repeat(..)));
if contains_repeat {
Box::new(self.steps_iterator()) as Box<_>
} else {
Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep {
comment: None,
repeat: default_repeat_count,
steps: self.steps_iterator().collect(),
})))) as Box<_>
}
}
pub fn solc_modes(&self) -> Vec<Mode> {
match &self.modes {
Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(),
@@ -1,4 +1,4 @@
use std::{collections::HashMap, fmt::Display, str::FromStr};
use std::collections::HashMap;
use alloy::{
eips::BlockNumberOrTag,
@@ -28,104 +28,33 @@ use crate::{metadata::ContractInstance, traits::ResolutionContext};
#[serde(untagged)]
pub enum Step {
/// A function call or an invocation to some function on some smart contract.
FunctionCall(Box<FunctionCallStep>),
FunctionCall(Box<Input>),
/// A step for performing a balance assertion on some account or contract.
BalanceAssertion(Box<BalanceAssertionStep>),
BalanceAssertion(Box<BalanceAssertion>),
/// A step for asserting that the storage of some contract or account is empty.
StorageEmptyAssertion(Box<StorageEmptyAssertionStep>),
/// A special step for repeating a bunch of steps a certain number of times.
Repeat(Box<RepeatStep>),
/// A step type that allows for a new account address to be allocated and to later on be used
/// as the caller in another step.
AllocateAccount(Box<AllocateAccountStep>),
StorageEmptyAssertion(Box<StorageEmptyAssertion>),
}
define_wrapper_type!(
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct StepIdx(usize) impl Display, FromStr;
pub struct StepIdx(usize) impl Display;
);
define_wrapper_type!(
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(try_from = "String", into = "String")]
pub struct StepPath(Vec<StepIdx>);
);
impl StepPath {
pub fn from_iterator(path: impl IntoIterator<Item = impl Into<StepIdx>>) -> Self {
Self(path.into_iter().map(|value| value.into()).collect())
}
pub fn increment(&self) -> Self {
let mut this = self.clone();
if let Some(last) = this.last_mut() {
last.0 += 1
}
this
}
pub fn append(&self, step_idx: impl Into<StepIdx>) -> Self {
let mut this = self.clone();
this.0.push(step_idx.into());
this
}
}
impl Display for StepPath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0
.iter()
.map(|idx| idx.to_string())
.collect::<Vec<_>>()
.join(".")
.fmt(f)
}
}
impl FromStr for StepPath {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.split(".")
.map(StepIdx::from_str)
.collect::<anyhow::Result<Vec<_>>>()
.map(Self)
}
}
impl From<StepPath> for String {
fn from(value: StepPath) -> Self {
value.to_string()
}
}
impl TryFrom<String> for StepPath {
type Error = anyhow::Error;
fn try_from(value: String) -> Result<Self, Self::Error> {
value.parse()
}
}
/// This is an input step which is a transaction description that the framework translates into a
/// transaction and executes on the nodes.
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct FunctionCallStep {
pub struct Input {
/// The address of the account performing the call and paying the fees for it.
#[serde(default = "FunctionCallStep::default_caller")]
#[serde(default = "Input::default_caller")]
#[schemars(with = "String")]
pub caller: StepAddress,
pub caller: Address,
/// An optional comment on the step which has no impact on the execution in any way.
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// The contract instance that's being called in this transaction step.
#[serde(default = "FunctionCallStep::default_instance")]
#[serde(default = "Input::default_instance")]
pub instance: ContractInstance,
/// The method that's being called in this step.
@@ -155,8 +84,8 @@ pub struct FunctionCallStep {
/// This represents a balance assertion step where the framework needs to query the balance of some
/// account or contract and assert that it's some amount.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct BalanceAssertionStep {
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct BalanceAssertion {
/// An optional comment on the balance assertion.
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
@@ -167,7 +96,7 @@ pub struct BalanceAssertionStep {
/// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a
/// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are
/// followed in the calldata.
pub address: StepAddress,
pub address: String,
/// The amount of balance to assert that the account or contract has. This is a 256 bit string
/// that's serialized and deserialized into a decimal string.
@@ -175,10 +104,8 @@ pub struct BalanceAssertionStep {
pub expected_balance: U256,
}
/// This represents an assertion for the storage of some contract or account and whether it's empty
/// or not.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct StorageEmptyAssertionStep {
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct StorageEmptyAssertion {
/// An optional comment on the storage empty assertion.
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
@@ -189,40 +116,12 @@ pub struct StorageEmptyAssertionStep {
/// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a
/// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are
/// followed in the calldata.
pub address: StepAddress,
pub address: String,
/// A boolean of whether the storage of the address is empty or not.
pub is_storage_empty: bool,
}
/// This represents a repetition step which is a special step type that allows for a sequence of
/// steps to be repeated (on different drivers) a certain number of times.
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct RepeatStep {
/// An optional comment on the repetition step.
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// The number of repetitions that the steps should be repeated for.
pub repeat: usize,
/// The sequence of steps to repeat for the above defined number of repetitions.
pub steps: Vec<Step>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
pub struct AllocateAccountStep {
/// An optional comment on the account allocation step.
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// An instruction to allocate a new account with the value being the variable name of that
/// account. This must start with `$VARIABLE:` and then be followed by the variable name of the
/// account.
#[serde(rename = "allocate_account")]
pub variable_name: String,
}
/// A set of expectations and assertions to make about the transaction after it ran.
///
/// If this is not specified then the only assertion that will be ran is that the transaction
@@ -263,7 +162,7 @@ pub struct ExpectedOutput {
pub struct Event {
/// An optional field of the address of the emitter of the event.
#[serde(skip_serializing_if = "Option::is_none")]
pub address: Option<StepAddress>,
pub address: Option<String>,
/// The set of topics to expect the event to have.
pub topics: Vec<String>,
@@ -396,74 +295,13 @@ pub struct VariableAssignments {
pub return_data: Vec<String>,
}
/// An address type that might either be an address literal or a resolvable address.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)]
#[schemars(with = "String")]
#[serde(untagged)]
pub enum StepAddress {
Address(Address),
ResolvableAddress(String),
}
impl Default for StepAddress {
fn default() -> Self {
Self::Address(Default::default())
}
}
impl Display for StepAddress {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
StepAddress::Address(address) => Display::fmt(address, f),
StepAddress::ResolvableAddress(address) => Display::fmt(address, f),
}
}
}
impl StepAddress {
pub fn as_address(&self) -> Option<&Address> {
match self {
StepAddress::Address(address) => Some(address),
StepAddress::ResolvableAddress(_) => None,
}
}
pub fn as_resolvable_address(&self) -> Option<&str> {
match self {
StepAddress::ResolvableAddress(address) => Some(address),
StepAddress::Address(..) => None,
}
}
pub async fn resolve_address(
&self,
resolver: &(impl ResolverApi + ?Sized),
context: ResolutionContext<'_>,
) -> anyhow::Result<Address> {
match self {
StepAddress::Address(address) => Ok(*address),
StepAddress::ResolvableAddress(address) => Ok(Address::from_slice(
Calldata::new_compound([address])
.calldata(resolver, context)
.await?
.get(12..32)
.expect("Can't fail"),
)),
}
}
}
impl FunctionCallStep {
pub const fn default_caller_address() -> Address {
impl Input {
pub const fn default_caller() -> Address {
Address(FixedBytes(alloy::hex!(
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1"
)))
}
pub const fn default_caller() -> StepAddress {
StepAddress::Address(Self::default_caller_address())
}
fn default_instance() -> ContractInstance {
ContractInstance::new("Test")
}
@@ -546,8 +384,7 @@ impl FunctionCallStep {
.encoded_input(resolver, context)
.await
.context("Failed to encode input bytes for transaction request")?;
let caller = self.caller.resolve_address(resolver, context).await?;
let transaction_request = TransactionRequest::default().from(caller).value(
let transaction_request = TransactionRequest::default().from(self.caller).value(
self.value
.map(|value| value.into_inner())
.unwrap_or_default(),
@@ -1053,7 +890,7 @@ mod tests {
.selector()
.0;
let input = FunctionCallStep {
let input = Input {
instance: ContractInstance::new("Contract"),
method: Method::FunctionName("store".to_owned()),
calldata: Calldata::new_compound(["42"]),
@@ -1097,7 +934,7 @@ mod tests {
.selector()
.0;
let input: FunctionCallStep = FunctionCallStep {
let input: Input = Input {
instance: "Contract".to_owned().into(),
method: Method::FunctionName("send(address)".to_owned()),
calldata: Calldata::new_compound(["0x1000000000000000000000000000000000000001"]),
@@ -1144,7 +981,7 @@ mod tests {
.selector()
.0;
let input: FunctionCallStep = FunctionCallStep {
let input: Input = Input {
instance: ContractInstance::new("Contract"),
method: Method::FunctionName("send".to_owned()),
calldata: Calldata::new_compound(["0x1000000000000000000000000000000000000001"]),
+1 -1
View File
@@ -2,7 +2,7 @@
pub mod case;
pub mod corpus;
pub mod input;
pub mod metadata;
pub mod mode;
pub mod steps;
pub mod traits;
+150 -89
View File
@@ -1,17 +1,17 @@
//! The go-ethereum node implementation.
use std::{
fs::{File, create_dir_all, remove_dir_all},
io::Read,
fs::{File, OpenOptions, create_dir_all, remove_dir_all},
io::{BufRead, BufReader, Read, Write},
ops::ControlFlow,
path::PathBuf,
pin::Pin,
process::{Command, Stdio},
process::{Child, Command, Stdio},
sync::{
Arc,
atomic::{AtomicU32, Ordering},
},
time::Duration,
time::{Duration, Instant},
};
use alloy::{
@@ -41,12 +41,7 @@ use revive_dt_config::*;
use revive_dt_format::traits::ResolverApi;
use revive_dt_node_interaction::EthereumNode;
use crate::{
Node,
common::FallbackGasFiller,
constants::INITIAL_BALANCE,
process::{Process, ProcessReadinessWaitBehavior},
};
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -66,11 +61,16 @@ pub struct GethNode {
logs_directory: PathBuf,
geth: PathBuf,
id: u32,
handle: Option<Process>,
handle: Option<Child>,
start_timeout: Duration,
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
/// This vector stores [`File`] objects that we use for logging which we want to flush when the
/// node object is dropped. We do not store them in a structured fashion at the moment (in
/// separate fields) as the logic that we need to apply to them is all the same regardless of
/// what it belongs to, we just want to flush them on [`Drop`] of the node.
logs_file_to_flush: Vec<File>,
}
impl GethNode {
@@ -84,6 +84,9 @@ impl GethNode {
const READY_MARKER: &str = "IPC endpoint opened";
const ERROR_MARKER: &str = "Fatal:";
const GETH_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log";
const GETH_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
@@ -121,6 +124,9 @@ impl GethNode {
wallet: wallet.clone(),
chain_id_filler: Default::default(),
nonce_manager: Default::default(),
// We know that we only need to be storing 2 files so we can specify that when creating
// the vector. It's the stdout and stderr of the geth node.
logs_file_to_flush: Vec::with_capacity(2),
}
}
@@ -188,63 +194,118 @@ impl GethNode {
/// [Instance::init] must be called prior.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn spawn_process(&mut self) -> anyhow::Result<&mut Self> {
let process = Process::new(
None,
self.logs_directory.as_path(),
self.geth.as_path(),
|command, stdout_file, stderr_file| {
command
.arg("--dev")
.arg("--datadir")
.arg(&self.data_directory)
.arg("--ipcpath")
.arg(&self.connection_string)
.arg("--nodiscover")
.arg("--maxpeers")
.arg("0")
.arg("--txlookuplimit")
.arg("0")
.arg("--cache.blocklogs")
.arg("512")
.arg("--state.scheme")
.arg("hash")
.arg("--syncmode")
.arg("full")
.arg("--gcmode")
.arg("archive")
.stderr(stderr_file)
.stdout(stdout_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: self.start_timeout,
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => {
if line.contains(Self::ERROR_MARKER) {
anyhow::bail!("Failed to start geth {line}");
} else if line.contains(Self::READY_MARKER) {
Ok(true)
} else {
Ok(false)
}
}
None => Ok(false),
}),
},
);
// This is the `OpenOptions` that we wish to use for all of the log files that we will be
// opening in this method. We need to construct it in this way to:
// 1. Be consistent
// 2. Less verbose and more dry
// 3. Because the builder pattern uses mutable references so we need to get around that.
let open_options = {
let mut options = OpenOptions::new();
options.create(true).truncate(true).write(true);
options
};
match process {
Ok(process) => self.handle = Some(process),
Err(err) => {
tracing::error!(?err, "Failed to start geth, shutting down gracefully");
self.shutdown()
.context("Failed to gracefully shutdown after geth start error")?;
return Err(err);
}
let stdout_logs_file = open_options
.clone()
.open(self.geth_stdout_log_file_path())
.context("Failed to open geth stdout logs file")?;
let stderr_logs_file = open_options
.open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file")?;
self.handle = Command::new(&self.geth)
.arg("--dev")
.arg("--datadir")
.arg(&self.data_directory)
.arg("--ipcpath")
.arg(&self.connection_string)
.arg("--nodiscover")
.arg("--maxpeers")
.arg("0")
.arg("--txlookuplimit")
.arg("0")
.arg("--cache.blocklogs")
.arg("512")
.arg("--state.scheme")
.arg("hash")
.arg("--syncmode")
.arg("full")
.arg("--gcmode")
.arg("archive")
.stderr(
stderr_logs_file
.try_clone()
.context("Failed to clone geth stderr log file handle")?,
)
.stdout(
stdout_logs_file
.try_clone()
.context("Failed to clone geth stdout log file handle")?,
)
.spawn()
.context("Failed to spawn geth node process")?
.into();
if let Err(error) = self.wait_ready() {
tracing::error!(?error, "Failed to start geth, shutting down gracefully");
self.shutdown()
.context("Failed to gracefully shutdown after geth start error")?;
return Err(error);
}
self.logs_file_to_flush
.extend([stderr_logs_file, stdout_logs_file]);
Ok(self)
}
/// Wait for the g-ethereum node child process getting ready.
///
/// [Instance::spawn_process] must be called priorly.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn wait_ready(&mut self) -> anyhow::Result<&mut Self> {
let start_time = Instant::now();
let logs_file = OpenOptions::new()
.read(true)
.write(false)
.append(false)
.truncate(false)
.open(self.geth_stderr_log_file_path())
.context("Failed to open geth stderr logs file for readiness check")?;
let maximum_wait_time = self.start_timeout;
let mut stderr = BufReader::new(logs_file).lines();
let mut lines = vec![];
loop {
if let Some(Ok(line)) = stderr.next() {
if line.contains(Self::ERROR_MARKER) {
anyhow::bail!("Failed to start geth {line}");
}
if line.contains(Self::READY_MARKER) {
return Ok(self);
}
lines.push(line);
}
if Instant::now().duration_since(start_time) > maximum_wait_time {
anyhow::bail!(
"Timeout in starting geth: took longer than {}ms. stdout:\n\n{}\n",
self.start_timeout.as_millis(),
lines.join("\n")
);
}
}
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn geth_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME)
}
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn geth_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME)
}
async fn provider(
&self,
) -> anyhow::Result<FillProvider<impl TxFiller<Ethereum>, impl Provider<Ethereum>, Ethereum>>
@@ -589,7 +650,17 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi for GethNodeResol
impl Node for GethNode {
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn shutdown(&mut self) -> anyhow::Result<()> {
drop(self.handle.take());
// Terminate the processes in a graceful manner to allow for the output to be flushed.
if let Some(mut child) = self.handle.take() {
child
.kill()
.map_err(|error| anyhow::anyhow!("Failed to kill the geth process: {error:?}"))?;
}
// Flushing the files that we're using for keeping the logs before shutdown.
for file in self.logs_file_to_flush.iter_mut() {
file.flush()?
}
// Remove the node's database so that subsequent runs do not run on the same database. We
// ignore the error just in case the directory didn't exist in the first place and therefore
@@ -630,8 +701,6 @@ impl Drop for GethNode {
#[cfg(test)]
mod tests {
use std::sync::LazyLock;
use super::*;
fn test_config() -> TestExecutionContext {
@@ -648,21 +717,9 @@ mod tests {
(context, node)
}
fn shared_node() -> &'static GethNode {
static NODE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node);
&NODE.1
}
#[test]
fn version_works() {
// Arrange
let node = shared_node();
// Act
let version = node.version();
// Assert
let version = version.expect("Failed to get the version");
let version = GethNode::new(&test_config()).version().unwrap();
assert!(
version.starts_with("geth version"),
"expected version string, got: '{version}'"
@@ -672,7 +729,7 @@ mod tests {
#[tokio::test]
async fn can_get_chain_id_from_node() {
// Arrange
let node = shared_node();
let (_context, node) = new_node();
// Act
let chain_id = node.resolver().await.unwrap().chain_id().await;
@@ -685,7 +742,7 @@ mod tests {
#[tokio::test]
async fn can_get_gas_limit_from_node() {
// Arrange
let node = shared_node();
let (_context, node) = new_node();
// Act
let gas_limit = node
@@ -696,13 +753,14 @@ mod tests {
.await;
// Assert
let _ = gas_limit.expect("Failed to get the gas limit");
let gas_limit = gas_limit.expect("Failed to get the gas limit");
assert_eq!(gas_limit, u32::MAX as u128)
}
#[tokio::test]
async fn can_get_coinbase_from_node() {
// Arrange
let node = shared_node();
let (_context, node) = new_node();
// Act
let coinbase = node
@@ -713,13 +771,14 @@ mod tests {
.await;
// Assert
let _ = coinbase.expect("Failed to get the coinbase");
let coinbase = coinbase.expect("Failed to get the coinbase");
assert_eq!(coinbase, Address::new([0xFF; 20]))
}
#[tokio::test]
async fn can_get_block_difficulty_from_node() {
// Arrange
let node = shared_node();
let (_context, node) = new_node();
// Act
let block_difficulty = node
@@ -730,13 +789,14 @@ mod tests {
.await;
// Assert
let _ = block_difficulty.expect("Failed to get the block difficulty");
let block_difficulty = block_difficulty.expect("Failed to get the block difficulty");
assert_eq!(block_difficulty, U256::ZERO)
}
#[tokio::test]
async fn can_get_block_hash_from_node() {
// Arrange
let node = shared_node();
let (_context, node) = new_node();
// Act
let block_hash = node
@@ -753,7 +813,7 @@ mod tests {
#[tokio::test]
async fn can_get_block_timestamp_from_node() {
// Arrange
let node = shared_node();
let (_context, node) = new_node();
// Act
let block_timestamp = node
@@ -770,12 +830,13 @@ mod tests {
#[tokio::test]
async fn can_get_block_number_from_node() {
// Arrange
let node = shared_node();
let (_context, node) = new_node();
// Act
let block_number = node.resolver().await.unwrap().last_block_number().await;
// Assert
let _ = block_number.expect("Failed to get the block number");
let block_number = block_number.expect("Failed to get the block number");
assert_eq!(block_number, 0)
}
}
-1
View File
@@ -6,7 +6,6 @@ use revive_dt_node_interaction::EthereumNode;
pub mod common;
pub mod constants;
pub mod geth;
pub mod process;
pub mod substrate;
/// An abstract interface for testing nodes.
-162
View File
@@ -1,162 +0,0 @@
use std::{
fs::{File, OpenOptions},
io::{BufRead, BufReader, Write},
path::Path,
process::{Child, Command},
time::{Duration, Instant},
};
use anyhow::{Context, Result, bail};
/// A wrapper around processes which allows for their stdout and stderr to be logged and flushed
/// when the process is dropped.
#[derive(Debug)]
pub struct Process {
/// The handle of the child process.
child: Child,
/// The file that stdout is being logged to.
stdout_logs_file: File,
/// The file that stderr is being logged to.
stderr_logs_file: File,
}
impl Process {
pub fn new(
log_file_prefix: impl Into<Option<&'static str>>,
logs_directory: impl AsRef<Path>,
binary_path: impl AsRef<Path>,
command_building_callback: impl FnOnce(&mut Command, File, File),
process_readiness_wait_behavior: ProcessReadinessWaitBehavior,
) -> Result<Self> {
let log_file_prefix = log_file_prefix.into();
let (stdout_file_name, stderr_file_name) = match log_file_prefix {
Some(prefix) => (
format!("{prefix}_stdout.log"),
format!("{prefix}_stderr.log"),
),
None => ("stdout.log".to_string(), "stderr.log".to_string()),
};
let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name);
let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name);
let stdout_logs_file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(stdout_logs_file_path.as_path())
.context("Failed to open the stdout logs file")?;
let stderr_logs_file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(stderr_logs_file_path.as_path())
.context("Failed to open the stderr logs file")?;
let mut command = {
let stdout_logs_file = stdout_logs_file
.try_clone()
.context("Failed to clone the stdout logs file")?;
let stderr_logs_file = stderr_logs_file
.try_clone()
.context("Failed to clone the stderr logs file")?;
let mut command = Command::new(binary_path.as_ref());
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
command
};
let child = command
.spawn()
.context("Failed to spawn the built command")?;
match process_readiness_wait_behavior {
ProcessReadinessWaitBehavior::NoStartupWait => {}
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration,
mut check_function,
} => {
let spawn_time = Instant::now();
let stdout_logs_file = OpenOptions::new()
.read(true)
.open(stdout_logs_file_path)
.context("Failed to open the stdout logs file")?;
let stderr_logs_file = OpenOptions::new()
.read(true)
.open(stderr_logs_file_path)
.context("Failed to open the stderr logs file")?;
let mut stdout_lines = BufReader::new(stdout_logs_file).lines();
let mut stderr_lines = BufReader::new(stderr_logs_file).lines();
loop {
let stdout_line = stdout_lines.next().and_then(Result::ok);
let stderr_line = stderr_lines.next().and_then(Result::ok);
let check_result =
check_function(stdout_line.as_deref(), stderr_line.as_deref())
.context("Failed to wait for the process to be ready")?;
if check_result {
break;
}
if Instant::now().duration_since(spawn_time) > max_wait_duration {
bail!("Waited for the process to start but it failed to start in time")
}
}
}
}
Ok(Self {
child,
stdout_logs_file,
stderr_logs_file,
})
}
}
impl Drop for Process {
fn drop(&mut self) {
self.child.kill().expect("Failed to kill the process");
self.stdout_logs_file
.flush()
.expect("Failed to flush the stdout logs file");
self.stderr_logs_file
.flush()
.expect("Failed to flush the stderr logs file");
}
}
pub enum ProcessReadinessWaitBehavior {
/// The process does not require any kind of wait after it's been spawned and can be used
/// straight away.
NoStartupWait,
/// The process does require some amount of wait duration after it's been started.
WaitDuration(Duration),
/// The process requires a time bounded wait function which is a function of the lines that
/// appear in the log files.
TimeBoundedWaitFunction {
/// The maximum amount of time to wait for the check function to return true.
max_wait_duration: Duration,
/// The function to use to check if the process spawned is ready to use or not. This
/// function should return the following in the following cases:
///
/// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled
/// and the wait is completed.
/// - `Ok(false)`: The process is not ready yet but it might be ready in the future.
/// - `Err`: The process is not ready yet and will not be ready in the future as it appears
/// that it has encountered an error when it was being spawned.
///
/// The first argument is a line from stdout and the second argument is a line from stderr.
#[allow(clippy::type_complexity)]
check_function: Box<dyn FnMut(Option<&str>, Option<&str>) -> anyhow::Result<bool>>,
},
}
+192 -102
View File
@@ -1,8 +1,9 @@
use std::{
fs::{create_dir_all, remove_dir_all},
path::PathBuf,
fs::{File, OpenOptions, create_dir_all, remove_dir_all},
io::{BufRead, Write},
path::{Path, PathBuf},
pin::Pin,
process::{Command, Stdio},
process::{Child, Command, Stdio},
sync::{
Arc,
atomic::{AtomicU32, Ordering},
@@ -46,12 +47,7 @@ use revive_dt_config::*;
use revive_dt_node_interaction::EthereumNode;
use tracing::instrument;
use crate::{
Node,
common::FallbackGasFiller,
constants::INITIAL_BALANCE,
process::{Process, ProcessReadinessWaitBehavior},
};
use crate::{Node, common::FallbackGasFiller, constants::INITIAL_BALANCE};
static NODE_COUNT: AtomicU32 = AtomicU32::new(0);
@@ -67,11 +63,12 @@ pub struct SubstrateNode {
rpc_url: String,
base_directory: PathBuf,
logs_directory: PathBuf,
substrate_process: Option<Process>,
eth_proxy_process: Option<Process>,
process_substrate: Option<Child>,
process_proxy: Option<Child>,
wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager,
chain_id_filler: ChainIdFiller,
logs_file_to_flush: Vec<File>,
}
impl SubstrateNode {
@@ -88,6 +85,12 @@ impl SubstrateNode {
const SUBSTRATE_LOG_ENV: &str = "error,evm=debug,sc_rpc_server=info,runtime::revive=debug";
const PROXY_LOG_ENV: &str = "info,eth-rpc=debug";
const SUBSTRATE_STDOUT_LOG_FILE_NAME: &str = "node_stdout.log";
const SUBSTRATE_STDERR_LOG_FILE_NAME: &str = "node_stderr.log";
const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log";
const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log";
pub const KITCHENSINK_EXPORT_CHAINSPEC_COMMAND: &str = "export-chain-spec";
pub const REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND: &str = "build-spec";
@@ -118,16 +121,16 @@ impl SubstrateNode {
rpc_url: String::new(),
base_directory,
logs_directory,
substrate_process: None,
eth_proxy_process: None,
process_substrate: None,
process_proxy: None,
wallet: wallet.clone(),
chain_id_filler: Default::default(),
nonce_manager: Default::default(),
logs_file_to_flush: Vec::with_capacity(4),
}
}
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = remove_dir_all(self.base_directory.as_path());
let _ = clear_directory(&self.base_directory);
let _ = clear_directory(&self.logs_directory);
@@ -210,88 +213,120 @@ impl SubstrateNode {
let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16;
let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16;
let chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
self.rpc_url = format!("http://127.0.0.1:{proxy_rpc_port}");
let substrate_process = Process::new(
"node",
self.logs_directory.as_path(),
self.node_binary.as_path(),
|command, stdout_file, stderr_file| {
command
.arg("--dev")
.arg("--chain")
.arg(chainspec_path)
.arg("--base-path")
.arg(&self.base_directory)
.arg("--rpc-port")
.arg(substrate_rpc_port.to_string())
.arg("--name")
.arg(format!("revive-substrate-{}", self.id))
.arg("--force-authoring")
.arg("--rpc-methods")
.arg("Unsafe")
.arg("--rpc-cors")
.arg("all")
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: Duration::from_secs(30),
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => Ok(line.contains(Self::SUBSTRATE_READY_MARKER)),
None => Ok(false),
}),
},
);
match substrate_process {
Ok(process) => self.substrate_process = Some(process),
Err(err) => {
tracing::error!(?err, "Failed to start substrate, shutting down gracefully");
self.shutdown()
.context("Failed to gracefully shutdown after substrate start error")?;
return Err(err);
}
}
let chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE);
let eth_proxy_process = Process::new(
"proxy",
self.logs_directory.as_path(),
self.eth_proxy_binary.as_path(),
|command, stdout_file, stderr_file| {
command
.arg("--dev")
.arg("--rpc-port")
.arg(proxy_rpc_port.to_string())
.arg("--node-rpc-url")
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}"))
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::PROXY_LOG_ENV)
.stdout(stdout_file)
.stderr(stderr_file);
},
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: Duration::from_secs(30),
check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => Ok(line.contains(Self::ETH_PROXY_READY_MARKER)),
None => Ok(false),
}),
},
);
match eth_proxy_process {
Ok(process) => self.eth_proxy_process = Some(process),
Err(err) => {
tracing::error!(?err, "Failed to start eth proxy, shutting down gracefully");
self.shutdown()
.context("Failed to gracefully shutdown after eth proxy start error")?;
return Err(err);
}
}
// This is the `OpenOptions` that we wish to use for all of the log files that we will be
// opening in this method. We need to construct it in this way to:
// 1. Be consistent
// 2. Less verbose and more dry
// 3. Because the builder pattern uses mutable references so we need to get around that.
let open_options = {
let mut options = OpenOptions::new();
options.create(true).truncate(true).write(true);
options
};
// Start Substrate node
let substrate_stdout_logs_file = open_options
.clone()
.open(self.substrate_stdout_log_file_path())
.context("Failed to open substrate stdout logs file")?;
let substrate_stderr_logs_file = open_options
.clone()
.open(self.substrate_stderr_log_file_path())
.context("Failed to open substrate stderr logs file")?;
let node_binary_path = self.node_binary.as_path();
self.process_substrate = Command::new(node_binary_path)
.arg("--dev")
.arg("--chain")
.arg(chainspec_path)
.arg("--base-path")
.arg(&self.base_directory)
.arg("--rpc-port")
.arg(substrate_rpc_port.to_string())
.arg("--name")
.arg(format!("revive-substrate-{}", self.id))
.arg("--force-authoring")
.arg("--rpc-methods")
.arg("Unsafe")
.arg("--rpc-cors")
.arg("all")
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::SUBSTRATE_LOG_ENV)
.stdout(
substrate_stdout_logs_file
.try_clone()
.context("Failed to clone substrate stdout log file handle")?,
)
.stderr(
substrate_stderr_logs_file
.try_clone()
.context("Failed to clone substrate stderr log file handle")?,
)
.spawn()
.context("Failed to spawn Substrate node process")?
.into();
// Give the node a moment to boot
if let Err(error) = Self::wait_ready(
self.substrate_stderr_log_file_path().as_path(),
Self::SUBSTRATE_READY_MARKER,
Duration::from_secs(60),
) {
self.shutdown()
.context("Failed to gracefully shutdown after Substrate start error")?;
return Err(error);
};
let eth_proxy_stdout_logs_file = open_options
.clone()
.open(self.proxy_stdout_log_file_path())
.context("Failed to open eth-proxy stdout logs file")?;
let eth_proxy_stderr_logs_file = open_options
.open(self.proxy_stderr_log_file_path())
.context("Failed to open eth-proxy stderr logs file")?;
self.process_proxy = Command::new(&self.eth_proxy_binary)
.arg("--dev")
.arg("--rpc-port")
.arg(proxy_rpc_port.to_string())
.arg("--node-rpc-url")
.arg(format!("ws://127.0.0.1:{substrate_rpc_port}"))
.arg("--rpc-max-connections")
.arg(u32::MAX.to_string())
.env("RUST_LOG", Self::PROXY_LOG_ENV)
.stdout(
eth_proxy_stdout_logs_file
.try_clone()
.context("Failed to clone eth-proxy stdout log file handle")?,
)
.stderr(
eth_proxy_stderr_logs_file
.try_clone()
.context("Failed to clone eth-proxy stderr log file handle")?,
)
.spawn()
.context("Failed to spawn eth-proxy process")?
.into();
if let Err(error) = Self::wait_ready(
self.proxy_stderr_log_file_path().as_path(),
Self::ETH_PROXY_READY_MARKER,
Duration::from_secs(60),
) {
self.shutdown()
.context("Failed to gracefully shutdown after eth-proxy start error")?;
return Err(error);
};
self.logs_file_to_flush.extend([
substrate_stdout_logs_file,
substrate_stderr_logs_file,
eth_proxy_stdout_logs_file,
eth_proxy_stderr_logs_file,
]);
Ok(())
}
@@ -321,6 +356,29 @@ impl SubstrateNode {
account_id.to_ss58check()
}
fn wait_ready(logs_file_path: &Path, marker: &str, timeout: Duration) -> anyhow::Result<()> {
let start_time = std::time::Instant::now();
let logs_file = OpenOptions::new()
.read(true)
.write(false)
.append(false)
.truncate(false)
.open(logs_file_path)?;
let mut lines = std::io::BufReader::new(logs_file).lines();
loop {
if let Some(Ok(line)) = lines.next() {
if line.contains(marker) {
return Ok(());
}
}
if start_time.elapsed() > timeout {
anyhow::bail!("Timeout waiting for process readiness: {marker}");
}
}
}
pub fn eth_rpc_version(&self) -> anyhow::Result<String> {
let output = Command::new(&self.eth_proxy_binary)
.arg("--version")
@@ -333,6 +391,24 @@ impl SubstrateNode {
Ok(String::from_utf8_lossy(&output).trim().to_string())
}
fn substrate_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory
.join(Self::SUBSTRATE_STDOUT_LOG_FILE_NAME)
}
fn substrate_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory
.join(Self::SUBSTRATE_STDERR_LOG_FILE_NAME)
}
fn proxy_stdout_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::PROXY_STDOUT_LOG_FILE_NAME)
}
fn proxy_stderr_log_file_path(&self) -> PathBuf {
self.logs_directory.join(Self::PROXY_STDERR_LOG_FILE_NAME)
}
async fn provider(
&self,
) -> anyhow::Result<
@@ -597,8 +673,22 @@ impl<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> ResolverApi
impl Node for SubstrateNode {
fn shutdown(&mut self) -> anyhow::Result<()> {
drop(self.substrate_process.take());
drop(self.eth_proxy_process.take());
// Terminate the processes in a graceful manner to allow for the output to be flushed.
if let Some(mut child) = self.process_proxy.take() {
child
.kill()
.map_err(|error| anyhow::anyhow!("Failed to kill the proxy process: {error:?}"))?;
}
if let Some(mut child) = self.process_substrate.take() {
child.kill().map_err(|error| {
anyhow::anyhow!("Failed to kill the Substrate process: {error:?}")
})?;
}
// Flushing the files that we're using for keeping the logs before shutdown.
for file in self.logs_file_to_flush.iter_mut() {
file.flush()?
}
// Remove the node's database so that subsequent runs do not run on the same database. We
// ignore the error just in case the directory didn't exist in the first place and therefore
@@ -1105,19 +1195,19 @@ mod tests {
(context, node)
}
fn shared_state() -> &'static (TestExecutionContext, SubstrateNode) {
static STATE: LazyLock<(TestExecutionContext, SubstrateNode)> = LazyLock::new(new_node);
&STATE
}
/// A shared node that multiple tests can use. It starts up once.
fn shared_node() -> &'static SubstrateNode {
&shared_state().1
static NODE: LazyLock<(TestExecutionContext, SubstrateNode)> = LazyLock::new(|| {
let (context, node) = new_node();
(context, node)
});
&NODE.1
}
#[tokio::test]
async fn node_mines_simple_transfer_transaction_and_returns_receipt() {
// Arrange
let (context, node) = shared_state();
let (context, node) = new_node();
let provider = node.provider().await.expect("Failed to create provider");
+2 -2
View File
@@ -4,7 +4,7 @@ use std::{path::PathBuf, sync::Arc};
use revive_dt_common::{define_wrapper_type, types::PlatformIdentifier};
use revive_dt_compiler::Mode;
use revive_dt_format::{case::CaseIdx, steps::StepPath};
use revive_dt_format::{case::CaseIdx, input::StepIdx};
use serde::{Deserialize, Serialize};
define_wrapper_type!(
@@ -33,5 +33,5 @@ pub struct ExecutionSpecifier {
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct StepExecutionSpecifier {
pub execution_specifier: Arc<ExecutionSpecifier>,
pub step_idx: StepPath,
pub step_idx: StepIdx,
}
View File
+1
View File
@@ -95,6 +95,7 @@ RUST_LOG="info" cargo run --release -- execute-tests \
--corpus "$CORPUS_FILE" \
--working-directory "$WORKDIR" \
--concurrency.number-of-nodes 5 \
--concurrency.ignore-concurrency-limit \
--kitchensink.path "$SUBSTRATE_NODE_BIN" \
--revive-dev-node.path "$REVIVE_DEV_NODE_BIN" \
--eth-rpc.path "$ETH_RPC_BIN" \
+12 -98
View File
@@ -25,7 +25,7 @@
"null"
],
"items": {
"$ref": "#/$defs/VmIdentifier"
"type": "string"
}
},
"cases": {
@@ -95,26 +95,6 @@
"cases"
],
"$defs": {
"VmIdentifier": {
"description": "An enum representing the identifiers of the supported VMs.",
"oneOf": [
{
"description": "The ethereum virtual machine.",
"type": "string",
"const": "evm"
},
{
"description": "The EraVM virtual machine.",
"type": "string",
"const": "eravm"
},
{
"description": "Polkadot's PolaVM Risc-v based virtual machine.",
"type": "string",
"const": "polkavm"
}
]
},
"Case": {
"type": "object",
"properties": {
@@ -188,27 +168,19 @@
"anyOf": [
{
"description": "A function call or an invocation to some function on some smart contract.",
"$ref": "#/$defs/FunctionCallStep"
"$ref": "#/$defs/Input"
},
{
"description": "A step for performing a balance assertion on some account or contract.",
"$ref": "#/$defs/BalanceAssertionStep"
"$ref": "#/$defs/BalanceAssertion"
},
{
"description": "A step for asserting that the storage of some contract or account is empty.",
"$ref": "#/$defs/StorageEmptyAssertionStep"
},
{
"description": "A special step for repeating a bunch of steps a certain number of times.",
"$ref": "#/$defs/RepeatStep"
},
{
"description": "A step type that allows for a new account address to be allocated and to later on be used\nas the caller in another step.",
"$ref": "#/$defs/AllocateAccountStep"
"$ref": "#/$defs/StorageEmptyAssertion"
}
]
},
"FunctionCallStep": {
"Input": {
"description": "This is an input step which is a transaction description that the framework translates into a\ntransaction and executes on the nodes.",
"type": "object",
"properties": {
@@ -381,13 +353,9 @@
"properties": {
"address": {
"description": "An optional field of the address of the emitter of the event.",
"anyOf": [
{
"$ref": "#/$defs/StepAddress"
},
{
"type": "null"
}
"type": [
"string",
"null"
]
},
"topics": {
@@ -407,10 +375,6 @@
"values"
]
},
"StepAddress": {
"description": "An address type that might either be an address literal or a resolvable address.",
"type": "string"
},
"EtherValue": {
"description": "Defines an Ether value.\n\nThis is an unsigned 256 bit integer that's followed by some denomination which can either be\neth, ether, gwei, or wei.",
"type": "string"
@@ -430,7 +394,7 @@
"return_data"
]
},
"BalanceAssertionStep": {
"BalanceAssertion": {
"description": "This represents a balance assertion step where the framework needs to query the balance of some\naccount or contract and assert that it's some amount.",
"type": "object",
"properties": {
@@ -443,7 +407,7 @@
},
"address": {
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
"$ref": "#/$defs/StepAddress"
"type": "string"
},
"expected_balance": {
"description": "The amount of balance to assert that the account or contract has. This is a 256 bit string\nthat's serialized and deserialized into a decimal string.",
@@ -455,8 +419,7 @@
"expected_balance"
]
},
"StorageEmptyAssertionStep": {
"description": "This represents an assertion for the storage of some contract or account and whether it's empty\nor not.",
"StorageEmptyAssertion": {
"type": "object",
"properties": {
"comment": {
@@ -468,7 +431,7 @@
},
"address": {
"description": "The address that the balance assertion should be done on.\n\nThis is a string which will be resolved into an address when being processed. Therefore,\nthis could be a normal hex address, a variable such as `Test.address`, or perhaps even a\nfull on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are\nfollowed in the calldata.",
"$ref": "#/$defs/StepAddress"
"type": "string"
},
"is_storage_empty": {
"description": "A boolean of whether the storage of the address is empty or not.",
@@ -480,55 +443,6 @@
"is_storage_empty"
]
},
"RepeatStep": {
"description": "This represents a repetition step which is a special step type that allows for a sequence of\nsteps to be repeated (on different drivers) a certain number of times.",
"type": "object",
"properties": {
"comment": {
"description": "An optional comment on the repetition step.",
"type": [
"string",
"null"
]
},
"repeat": {
"description": "The number of repetitions that the steps should be repeated for.",
"type": "integer",
"format": "uint",
"minimum": 0
},
"steps": {
"description": "The sequence of steps to repeat for the above defined number of repetitions.",
"type": "array",
"items": {
"$ref": "#/$defs/Step"
}
}
},
"required": [
"repeat",
"steps"
]
},
"AllocateAccountStep": {
"type": "object",
"properties": {
"comment": {
"description": "An optional comment on the account allocation step.",
"type": [
"string",
"null"
]
},
"allocate_account": {
"description": "An instruction to allocate a new account with the value being the variable name of that\naccount. This must start with `$VARIABLE:` and then be followed by the variable name of the\naccount.",
"type": "string"
}
},
"required": [
"allocate_account"
]
},
"ContractPathAndIdent": {
"description": "Represents an identifier used for contracts.\n\nThe type supports serialization from and into the following string format:\n\n```text\n${path}:${contract_ident}\n```",
"type": "string"