mirror of
https://github.com/pezkuwichain/revive-differential-tests.git
synced 2026-04-28 17:48:00 +00:00
Replace infra with the dyn infra
This commit is contained in:
Generated
+4
@@ -4471,6 +4471,7 @@ dependencies = [
|
||||
"clap",
|
||||
"moka",
|
||||
"once_cell",
|
||||
"schemars 1.0.4",
|
||||
"semver 1.0.26",
|
||||
"serde",
|
||||
"strum",
|
||||
@@ -4505,6 +4506,7 @@ dependencies = [
|
||||
"alloy",
|
||||
"anyhow",
|
||||
"clap",
|
||||
"revive-dt-common",
|
||||
"semver 1.0.26",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -4586,6 +4588,8 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"alloy",
|
||||
"anyhow",
|
||||
"revive-common",
|
||||
"revive-dt-common",
|
||||
"revive-dt-format",
|
||||
]
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ moka = { workspace = true, features = ["sync"] }
|
||||
once_cell = { workspace = true }
|
||||
semver = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
tokio = { workspace = true, default-features = false, features = ["time"] }
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use clap::ValueEnum;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||
|
||||
@@ -19,6 +20,7 @@ use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename = "kebab-case")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
@@ -52,9 +54,8 @@ pub enum PlatformIdentifier {
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename = "kebab-case")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum CompilerIdentifier {
|
||||
/// The solc compiler.
|
||||
Solc,
|
||||
@@ -79,9 +80,8 @@ pub enum CompilerIdentifier {
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename = "kebab-case")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum NodeIdentifier {
|
||||
/// The go-ethereum node implementation.
|
||||
Geth,
|
||||
@@ -108,12 +108,15 @@ pub enum NodeIdentifier {
|
||||
Display,
|
||||
AsRefStr,
|
||||
IntoStaticStr,
|
||||
JsonSchema,
|
||||
)]
|
||||
#[serde(rename = "kebab-case")]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
#[serde(rename = "lowercase")]
|
||||
#[strum(serialize_all = "lowercase")]
|
||||
pub enum VmIdentifier {
|
||||
/// The ethereum virtual machine.
|
||||
Evm,
|
||||
/// The EraVM virtual machine.
|
||||
EraVM,
|
||||
/// Polkadot's PolaVM Risc-v based virtual machine.
|
||||
Polkavm,
|
||||
PolkaVM,
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-dt-common = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
|
||||
@@ -18,6 +18,7 @@ use alloy::{
|
||||
signers::local::PrivateKeySigner,
|
||||
};
|
||||
use clap::{Parser, ValueEnum, ValueHint};
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use semver::Version;
|
||||
use serde::{Serialize, Serializer};
|
||||
use strum::{AsRefStr, Display, EnumString, IntoStaticStr};
|
||||
@@ -165,13 +166,9 @@ pub struct ExecutionContext {
|
||||
)]
|
||||
pub working_directory: WorkingDirectoryConfiguration,
|
||||
|
||||
/// The differential testing leader node implementation.
|
||||
#[arg(short, long = "leader", default_value_t = TestingPlatform::Geth)]
|
||||
pub leader: TestingPlatform,
|
||||
|
||||
/// The differential testing follower node implementation.
|
||||
#[arg(short, long = "follower", default_value_t = TestingPlatform::Kitchensink)]
|
||||
pub follower: TestingPlatform,
|
||||
/// The set of platforms that the differential tests should run on.
|
||||
#[arg(short = 'p', long = "platform")]
|
||||
pub platforms: Vec<PlatformIdentifier>,
|
||||
|
||||
/// A list of test corpus JSON files to be tested.
|
||||
#[arg(long = "corpus", short)]
|
||||
|
||||
@@ -9,9 +9,9 @@ use std::{
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use revive_dt_common::iterators::FilesWithExtensionIterator;
|
||||
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
|
||||
use revive_dt_config::TestingPlatform;
|
||||
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
|
||||
use revive_dt_compiler::{Compiler, CompilerOutput, DynSolidityCompiler, Mode};
|
||||
use revive_dt_core::DynPlatform;
|
||||
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
|
||||
|
||||
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
|
||||
@@ -22,8 +22,6 @@ use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tracing::{Instrument, debug, debug_span, instrument};
|
||||
|
||||
use crate::Platform;
|
||||
|
||||
pub struct CachedCompiler<'a> {
|
||||
/// The cache that stores the compiled contracts.
|
||||
artifacts_cache: ArtifactsCache,
|
||||
@@ -57,21 +55,22 @@ impl<'a> CachedCompiler<'a> {
|
||||
fields(
|
||||
metadata_file_path = %metadata_file_path.display(),
|
||||
%mode,
|
||||
platform = P::config_id().to_string()
|
||||
platform = %platform.platform_identifier()
|
||||
),
|
||||
err
|
||||
)]
|
||||
pub async fn compile_contracts<P: Platform>(
|
||||
pub async fn compile_contracts(
|
||||
&self,
|
||||
metadata: &'a Metadata,
|
||||
metadata_file_path: &'a Path,
|
||||
mode: Cow<'a, Mode>,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &P::Compiler,
|
||||
compiler: &dyn DynSolidityCompiler,
|
||||
platform: &dyn DynPlatform,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
) -> Result<CompilerOutput> {
|
||||
let cache_key = CacheKey {
|
||||
platform_key: P::config_id(),
|
||||
compiler_identifier: platform.compiler_identifier(),
|
||||
compiler_version: compiler.version().clone(),
|
||||
metadata_file_path,
|
||||
solc_mode: mode.clone(),
|
||||
@@ -79,7 +78,7 @@ impl<'a> CachedCompiler<'a> {
|
||||
|
||||
let compilation_callback = || {
|
||||
async move {
|
||||
compile_contracts::<P>(
|
||||
compile_contracts(
|
||||
metadata
|
||||
.directory()
|
||||
.context("Failed to get metadata directory while preparing compilation")?,
|
||||
@@ -96,7 +95,7 @@ impl<'a> CachedCompiler<'a> {
|
||||
}
|
||||
.instrument(debug_span!(
|
||||
"Running compilation for the cache key",
|
||||
cache_key.platform_key = %cache_key.platform_key,
|
||||
cache_key.compiler_identifier = %cache_key.compiler_identifier,
|
||||
cache_key.compiler_version = %cache_key.compiler_version,
|
||||
cache_key.metadata_file_path = %cache_key.metadata_file_path.display(),
|
||||
cache_key.solc_mode = %cache_key.solc_mode,
|
||||
@@ -179,12 +178,12 @@ impl<'a> CachedCompiler<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn compile_contracts<P: Platform>(
|
||||
async fn compile_contracts(
|
||||
metadata_directory: impl AsRef<Path>,
|
||||
mut files_to_compile: impl Iterator<Item = PathBuf>,
|
||||
mode: &Mode,
|
||||
deployed_libraries: Option<&HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>>,
|
||||
compiler: &P::Compiler,
|
||||
compiler: &dyn DynSolidityCompiler,
|
||||
reporter: &ExecutionSpecificReporter,
|
||||
) -> Result<CompilerOutput> {
|
||||
let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref())
|
||||
@@ -218,7 +217,7 @@ async fn compile_contracts<P: Platform>(
|
||||
});
|
||||
|
||||
let input = compilation.input().clone();
|
||||
let output = compilation.try_build(compiler).await;
|
||||
let output = compilation.dyn_try_build(compiler).await;
|
||||
|
||||
match (output.as_ref(), deployed_libraries.is_some()) {
|
||||
(Ok(output), true) => {
|
||||
@@ -332,9 +331,8 @@ impl ArtifactsCache {
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||
struct CacheKey<'a> {
|
||||
/// The platform name that this artifact was compiled for. For example, this could be EVM or
|
||||
/// PVM.
|
||||
platform_key: &'a TestingPlatform,
|
||||
/// The identifier of the used compiler.
|
||||
compiler_identifier: CompilerIdentifier,
|
||||
|
||||
/// The version of the compiler that was used to compile the artifacts.
|
||||
compiler_version: Version,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
//! The test driver handles the compilation and execution of the test cases.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use alloy::consensus::EMPTY_ROOT_HASH;
|
||||
@@ -19,8 +18,9 @@ use alloy::{
|
||||
rpc::types::{TransactionRequest, trace::geth::DiffMode},
|
||||
};
|
||||
use anyhow::Context as _;
|
||||
use futures::TryStreamExt;
|
||||
use futures::{TryStreamExt, future::try_join_all};
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_format::traits::{ResolutionContext, ResolverApi};
|
||||
use revive_dt_report::ExecutionSpecificReporter;
|
||||
use semver::Version;
|
||||
@@ -36,9 +36,7 @@ use revive_dt_node_interaction::EthereumNode;
|
||||
use tokio::try_join;
|
||||
use tracing::{Instrument, info, info_span, instrument};
|
||||
|
||||
use crate::Platform;
|
||||
|
||||
pub struct CaseState<T: Platform> {
|
||||
pub struct CaseState {
|
||||
/// A map of all of the compiled contracts for the given metadata file.
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
|
||||
@@ -54,14 +52,9 @@ pub struct CaseState<T: Platform> {
|
||||
|
||||
/// The execution reporter.
|
||||
execution_reporter: ExecutionSpecificReporter,
|
||||
|
||||
phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> CaseState<T>
|
||||
where
|
||||
T: Platform,
|
||||
{
|
||||
impl CaseState {
|
||||
pub fn new(
|
||||
compiler_version: Version,
|
||||
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
|
||||
@@ -74,7 +67,6 @@ where
|
||||
variables: Default::default(),
|
||||
compiler_version,
|
||||
execution_reporter,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,7 +74,7 @@ where
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
step: &Step,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<StepOutput> {
|
||||
match step {
|
||||
Step::FunctionCall(input) => {
|
||||
@@ -113,8 +105,10 @@ where
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
input: &Input,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> {
|
||||
let resolver = node.resolver().await?;
|
||||
|
||||
let deployment_receipts = self
|
||||
.handle_input_contract_deployment(metadata, input, node)
|
||||
.await
|
||||
@@ -130,7 +124,12 @@ where
|
||||
self.handle_input_variable_assignment(input, &tracing_result)
|
||||
.context("Failed to assign variables from callframe output")?;
|
||||
let (_, (geth_trace, diff_mode)) = try_join!(
|
||||
self.handle_input_expectations(input, &execution_receipt, node, &tracing_result),
|
||||
self.handle_input_expectations(
|
||||
input,
|
||||
&execution_receipt,
|
||||
resolver.as_ref(),
|
||||
&tracing_result
|
||||
),
|
||||
self.handle_input_diff(execution_receipt.transaction_hash, node)
|
||||
)
|
||||
.context("Failed while evaluating expectations and diffs in parallel")?;
|
||||
@@ -142,7 +141,7 @@ where
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
balance_assertion: &BalanceAssertion,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node)
|
||||
.await
|
||||
@@ -158,7 +157,7 @@ where
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
storage_empty: &StorageEmptyAssertion,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node)
|
||||
.await
|
||||
@@ -175,7 +174,7 @@ where
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
input: &Input,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<HashMap<ContractInstance, TransactionReceipt>> {
|
||||
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
|
||||
for instance in input.find_all_contract_instances().into_iter() {
|
||||
@@ -220,7 +219,7 @@ where
|
||||
&mut self,
|
||||
input: &Input,
|
||||
mut deployment_receipts: HashMap<ContractInstance, TransactionReceipt>,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<TransactionReceipt> {
|
||||
match input.method {
|
||||
// This input was already executed when `handle_input` was called. We just need to
|
||||
@@ -229,8 +228,9 @@ where
|
||||
.remove(&input.instance)
|
||||
.context("Failed to find deployment receipt for constructor call"),
|
||||
Method::Fallback | Method::FunctionName(_) => {
|
||||
let resolver = node.resolver().await?;
|
||||
let tx = match input
|
||||
.legacy_transaction(node, self.default_resolution_context())
|
||||
.legacy_transaction(resolver.as_ref(), self.default_resolution_context())
|
||||
.await
|
||||
{
|
||||
Ok(tx) => tx,
|
||||
@@ -251,7 +251,7 @@ where
|
||||
async fn handle_input_call_frame_tracing(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<CallFrame> {
|
||||
node.trace_transaction(
|
||||
tx_hash,
|
||||
@@ -314,7 +314,7 @@ where
|
||||
&self,
|
||||
input: &Input,
|
||||
execution_receipt: &TransactionReceipt,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
tracing_result: &CallFrame,
|
||||
) -> anyhow::Result<()> {
|
||||
// Resolving the `input.expected` into a series of expectations that we can then assert on.
|
||||
@@ -362,7 +362,7 @@ where
|
||||
async fn handle_input_expectation_item(
|
||||
&self,
|
||||
execution_receipt: &TransactionReceipt,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
expectation: ExpectedOutput,
|
||||
tracing_result: &CallFrame,
|
||||
) -> anyhow::Result<()> {
|
||||
@@ -508,7 +508,7 @@ where
|
||||
async fn handle_input_diff(
|
||||
&self,
|
||||
tx_hash: TxHash,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<(GethTrace, DiffMode)> {
|
||||
let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig {
|
||||
diff_mode: Some(true),
|
||||
@@ -533,7 +533,7 @@ where
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
balance_assertion: &BalanceAssertion,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
let Some(instance) = balance_assertion
|
||||
.address
|
||||
@@ -562,11 +562,12 @@ where
|
||||
expected_balance: amount,
|
||||
..
|
||||
}: &BalanceAssertion,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
let resolver = node.resolver().await?;
|
||||
let address = Address::from_slice(
|
||||
Calldata::new_compound([address_string])
|
||||
.calldata(node, self.default_resolution_context())
|
||||
.calldata(resolver.as_ref(), self.default_resolution_context())
|
||||
.await?
|
||||
.get(12..32)
|
||||
.expect("Can't fail"),
|
||||
@@ -595,7 +596,7 @@ where
|
||||
&mut self,
|
||||
metadata: &Metadata,
|
||||
storage_empty_assertion: &StorageEmptyAssertion,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
let Some(instance) = storage_empty_assertion
|
||||
.address
|
||||
@@ -624,11 +625,12 @@ where
|
||||
is_storage_empty,
|
||||
..
|
||||
}: &StorageEmptyAssertion,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<()> {
|
||||
let resolver = node.resolver().await?;
|
||||
let address = Address::from_slice(
|
||||
Calldata::new_compound([address_string])
|
||||
.calldata(node, self.default_resolution_context())
|
||||
.calldata(resolver.as_ref(), self.default_resolution_context())
|
||||
.await?
|
||||
.get(12..32)
|
||||
.expect("Can't fail"),
|
||||
@@ -667,7 +669,7 @@ where
|
||||
deployer: Address,
|
||||
calldata: Option<&Calldata>,
|
||||
value: Option<EtherValue>,
|
||||
node: &T::Blockchain,
|
||||
node: &dyn EthereumNode,
|
||||
) -> anyhow::Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
|
||||
if let Some((_, address, abi)) = self.deployed_contracts.get(contract_instance) {
|
||||
return Ok((*address, abi.clone(), None));
|
||||
@@ -710,8 +712,9 @@ where
|
||||
};
|
||||
|
||||
if let Some(calldata) = calldata {
|
||||
let resolver = node.resolver().await?;
|
||||
let calldata = calldata
|
||||
.calldata(node, self.default_resolution_context())
|
||||
.calldata(resolver.as_ref(), self.default_resolution_context())
|
||||
.await?;
|
||||
code.extend(calldata);
|
||||
}
|
||||
@@ -728,11 +731,7 @@ where
|
||||
let receipt = match node.execute_transaction(tx).await {
|
||||
Ok(receipt) => receipt,
|
||||
Err(error) => {
|
||||
tracing::error!(
|
||||
node = std::any::type_name::<T>(),
|
||||
?error,
|
||||
"Contract deployment transaction failed."
|
||||
);
|
||||
tracing::error!(?error, "Contract deployment transaction failed.");
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
@@ -763,36 +762,23 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CaseDriver<'a, Leader: Platform, Follower: Platform> {
|
||||
pub struct CaseDriver<'a> {
|
||||
metadata: &'a Metadata,
|
||||
case: &'a Case,
|
||||
leader_node: &'a Leader::Blockchain,
|
||||
follower_node: &'a Follower::Blockchain,
|
||||
leader_state: CaseState<Leader>,
|
||||
follower_state: CaseState<Follower>,
|
||||
platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>,
|
||||
}
|
||||
|
||||
impl<'a, L, F> CaseDriver<'a, L, F>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
{
|
||||
impl<'a> CaseDriver<'a> {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
metadata: &'a Metadata,
|
||||
case: &'a Case,
|
||||
leader_node: &'a L::Blockchain,
|
||||
follower_node: &'a F::Blockchain,
|
||||
leader_state: CaseState<L>,
|
||||
follower_state: CaseState<F>,
|
||||
) -> CaseDriver<'a, L, F> {
|
||||
platform_state: Vec<(&'a dyn EthereumNode, PlatformIdentifier, CaseState)>,
|
||||
) -> CaseDriver<'a> {
|
||||
Self {
|
||||
metadata,
|
||||
case,
|
||||
leader_node,
|
||||
follower_node,
|
||||
leader_state,
|
||||
follower_state,
|
||||
platform_state,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -805,42 +791,44 @@ where
|
||||
.enumerate()
|
||||
.map(|(idx, v)| (StepIdx::new(idx), v))
|
||||
{
|
||||
let (leader_step_output, follower_step_output) = try_join!(
|
||||
self.leader_state
|
||||
.handle_step(self.metadata, &step, self.leader_node)
|
||||
.instrument(info_span!(
|
||||
"Handling Step",
|
||||
%step_idx,
|
||||
target = "Leader",
|
||||
)),
|
||||
self.follower_state
|
||||
.handle_step(self.metadata, &step, self.follower_node)
|
||||
.instrument(info_span!(
|
||||
"Handling Step",
|
||||
%step_idx,
|
||||
target = "Follower",
|
||||
))
|
||||
)?;
|
||||
// Run this step concurrently across all platforms; short-circuit on first failure
|
||||
let metadata = self.metadata;
|
||||
let step_futs =
|
||||
self.platform_state
|
||||
.iter_mut()
|
||||
.map(|(node, platform_id, case_state)| {
|
||||
let platform_id = *platform_id;
|
||||
let node_ref = *node;
|
||||
let step_clone = step.clone();
|
||||
let span = info_span!(
|
||||
"Handling Step",
|
||||
%step_idx,
|
||||
platform = %platform_id,
|
||||
);
|
||||
async move {
|
||||
case_state
|
||||
.handle_step(metadata, &step_clone, node_ref)
|
||||
.await
|
||||
.map_err(|e| (platform_id, e))
|
||||
}
|
||||
.instrument(span)
|
||||
});
|
||||
|
||||
match (leader_step_output, follower_step_output) {
|
||||
(StepOutput::FunctionCall(..), StepOutput::FunctionCall(..)) => {
|
||||
// TODO: We need to actually work out how/if we will compare the diff between
|
||||
// the leader and the follower. The diffs are almost guaranteed to be different
|
||||
// from leader and follower and therefore without an actual strategy for this
|
||||
// we have something that's guaranteed to fail. Even a simple call to some
|
||||
// contract will produce two non-equal diffs because on the leader the contract
|
||||
// has address X and on the follower it has address Y. On the leader contract X
|
||||
// contains address A in the state and on the follower it contains address B. So
|
||||
// this isn't exactly a straightforward thing to do and I'm not even sure that
|
||||
// it's possible to do. Once we have an actual strategy for doing the diffs we
|
||||
// will implement it here. Until then, this remains empty.
|
||||
match try_join_all(step_futs).await {
|
||||
Ok(_outputs) => {
|
||||
// All platforms succeeded for this step
|
||||
steps_executed += 1;
|
||||
}
|
||||
Err((platform_id, error)) => {
|
||||
tracing::error!(
|
||||
%step_idx,
|
||||
platform = %platform_id,
|
||||
?error,
|
||||
"Step failed on platform",
|
||||
);
|
||||
return Err(error);
|
||||
}
|
||||
(StepOutput::BalanceAssertion, StepOutput::BalanceAssertion) => {}
|
||||
(StepOutput::StorageEmptyAssertion, StepOutput::StorageEmptyAssertion) => {}
|
||||
_ => unreachable!("The two step outputs can not be of a different kind"),
|
||||
}
|
||||
|
||||
steps_executed += 1;
|
||||
}
|
||||
|
||||
Ok(steps_executed)
|
||||
|
||||
+27
-2
@@ -103,6 +103,7 @@ pub trait DynPlatform {
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn DynSolidityCompiler>>>>>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct GethEvmSolcPlatform;
|
||||
|
||||
impl DynPlatform for GethEvmSolcPlatform {
|
||||
@@ -147,6 +148,7 @@ impl DynPlatform for GethEvmSolcPlatform {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct KitchensinkPolkavmResolcPlatform;
|
||||
|
||||
impl DynPlatform for KitchensinkPolkavmResolcPlatform {
|
||||
@@ -159,7 +161,7 @@ impl DynPlatform for KitchensinkPolkavmResolcPlatform {
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Polkavm
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
@@ -198,6 +200,7 @@ impl DynPlatform for KitchensinkPolkavmResolcPlatform {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct KitchensinkRevmSolcPlatform;
|
||||
|
||||
impl DynPlatform for KitchensinkRevmSolcPlatform {
|
||||
@@ -249,6 +252,7 @@ impl DynPlatform for KitchensinkRevmSolcPlatform {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ReviveDevNodePolkavmResolcPlatform;
|
||||
|
||||
impl DynPlatform for ReviveDevNodePolkavmResolcPlatform {
|
||||
@@ -261,7 +265,7 @@ impl DynPlatform for ReviveDevNodePolkavmResolcPlatform {
|
||||
}
|
||||
|
||||
fn vm_identifier(&self) -> VmIdentifier {
|
||||
VmIdentifier::Polkavm
|
||||
VmIdentifier::PolkaVM
|
||||
}
|
||||
|
||||
fn compiler_identifier(&self) -> CompilerIdentifier {
|
||||
@@ -300,6 +304,7 @@ impl DynPlatform for ReviveDevNodePolkavmResolcPlatform {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)]
|
||||
pub struct ReviveDevNodeRevmSolcPlatform;
|
||||
|
||||
impl DynPlatform for ReviveDevNodeRevmSolcPlatform {
|
||||
@@ -371,6 +376,26 @@ impl From<PlatformIdentifier> for Box<dyn DynPlatform> {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PlatformIdentifier> for &dyn DynPlatform {
|
||||
fn from(value: PlatformIdentifier) -> Self {
|
||||
match value {
|
||||
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn DynPlatform,
|
||||
PlatformIdentifier::KitchensinkPolkavmResolc => {
|
||||
&KitchensinkPolkavmResolcPlatform as &dyn DynPlatform
|
||||
}
|
||||
PlatformIdentifier::KitchensinkRevmSolc => {
|
||||
&KitchensinkRevmSolcPlatform as &dyn DynPlatform
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodePolkavmResolc => {
|
||||
&ReviveDevNodePolkavmResolcPlatform as &dyn DynPlatform
|
||||
}
|
||||
PlatformIdentifier::ReviveDevNodeRevmSolc => {
|
||||
&ReviveDevNodeRevmSolcPlatform as &dyn DynPlatform
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_node<T: Node + EthereumNode + Send + Sync>(
|
||||
mut node: T,
|
||||
genesis: Genesis,
|
||||
|
||||
+285
-342
@@ -1,8 +1,9 @@
|
||||
mod cached_compiler;
|
||||
mod pool;
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::{BTreeMap, HashMap},
|
||||
collections::{BTreeSet, HashMap},
|
||||
io::{BufWriter, Write, stderr},
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
@@ -20,20 +21,19 @@ use futures::{Stream, StreamExt};
|
||||
use indexmap::{IndexMap, indexmap};
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
use revive_dt_report::{
|
||||
NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
|
||||
ExecutionSpecificReporter, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus,
|
||||
TestSpecificReporter, TestSpecifier,
|
||||
};
|
||||
use schemars::schema_for;
|
||||
use serde_json::{Value, json};
|
||||
use tokio::try_join;
|
||||
use tracing::{debug, error, info, info_span, instrument};
|
||||
use tracing_subscriber::{EnvFilter, FmtSubscriber};
|
||||
|
||||
use revive_dt_common::{iterators::EitherIter, types::Mode};
|
||||
use revive_dt_compiler::{CompilerOutput, SolidityCompiler};
|
||||
use revive_dt_compiler::DynSolidityCompiler;
|
||||
use revive_dt_config::{Context, *};
|
||||
use revive_dt_core::{
|
||||
Geth, Kitchensink, Platform,
|
||||
DynPlatform,
|
||||
driver::{CaseDriver, CaseState},
|
||||
};
|
||||
use revive_dt_format::{
|
||||
@@ -43,9 +43,9 @@ use revive_dt_format::{
|
||||
metadata::{ContractPathAndIdent, Metadata, MetadataFile},
|
||||
mode::ParsedMode,
|
||||
};
|
||||
use revive_dt_node::{Node, pool::NodePool};
|
||||
|
||||
use crate::cached_compiler::CachedCompiler;
|
||||
use crate::pool::NodePool;
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default()
|
||||
@@ -133,32 +133,28 @@ fn collect_corpora(
|
||||
Ok(corpora)
|
||||
}
|
||||
|
||||
async fn run_driver<L, F>(
|
||||
async fn run_driver(
|
||||
context: ExecutionContext,
|
||||
metadata_files: &[MetadataFile],
|
||||
reporter: Reporter,
|
||||
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
{
|
||||
let leader_nodes = NodePool::<L::Blockchain>::new(context.clone())
|
||||
.context("Failed to initialize leader node pool")?;
|
||||
let follower_nodes = NodePool::<F::Blockchain>::new(context.clone())
|
||||
.context("Failed to initialize follower node pool")?;
|
||||
platforms: Vec<&dyn DynPlatform>,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut nodes = Vec::<(&dyn DynPlatform, NodePool)>::new();
|
||||
for platform in platforms.into_iter() {
|
||||
let pool = NodePool::new(Context::ExecuteTests(Box::new(context.clone())), platform)
|
||||
.context("Failed to initialize follower node pool")?;
|
||||
nodes.push((platform, pool));
|
||||
}
|
||||
|
||||
let tests_stream = tests_stream(
|
||||
&context,
|
||||
metadata_files.iter(),
|
||||
&leader_nodes,
|
||||
&follower_nodes,
|
||||
nodes.as_slice(),
|
||||
reporter.clone(),
|
||||
)
|
||||
.await;
|
||||
let driver_task = start_driver_task::<L, F>(&context, tests_stream)
|
||||
let driver_task = start_driver_task(&context, tests_stream)
|
||||
.await
|
||||
.context("Failed to start driver task")?;
|
||||
let cli_reporting_task = start_cli_reporting_task(reporter);
|
||||
@@ -169,19 +165,12 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn tests_stream<'a, L, F>(
|
||||
async fn tests_stream<'a>(
|
||||
args: &ExecutionContext,
|
||||
metadata_files: impl IntoIterator<Item = &'a MetadataFile> + Clone,
|
||||
leader_node_pool: &'a NodePool<L::Blockchain>,
|
||||
follower_node_pool: &'a NodePool<F::Blockchain>,
|
||||
nodes: &'a [(&dyn DynPlatform, NodePool)],
|
||||
reporter: Reporter,
|
||||
) -> impl Stream<Item = Test<'a, L, F>>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
{
|
||||
) -> impl Stream<Item = Test<'a>> {
|
||||
let tests = metadata_files
|
||||
.into_iter()
|
||||
.flat_map(|metadata_file| {
|
||||
@@ -231,35 +220,36 @@ where
|
||||
stream::iter(tests.into_iter())
|
||||
.filter_map(
|
||||
move |(metadata_file, case_idx, case, mode, reporter)| async move {
|
||||
let leader_compiler = <L::Compiler as SolidityCompiler>::new(
|
||||
args,
|
||||
mode.version.clone().map(Into::into),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| error!(?err, "Failed to instantiate the leader compiler"))
|
||||
.ok()?;
|
||||
let mut platforms = Vec::new();
|
||||
for (platform, node_pool) in nodes.iter() {
|
||||
let node = node_pool.round_robbin();
|
||||
let compiler = platform
|
||||
.new_compiler(
|
||||
Context::ExecuteTests(Box::new(args.clone())),
|
||||
mode.version.clone().map(Into::into),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to instantiate the compiler"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
let follower_compiler = <F::Compiler as SolidityCompiler>::new(
|
||||
args,
|
||||
mode.version.clone().map(Into::into),
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| error!(?err, "Failed to instantiate the follower compiler"))
|
||||
.ok()?;
|
||||
let reporter = reporter
|
||||
.execution_specific_reporter(node.id(), platform.platform_identifier());
|
||||
platforms.push((*platform, node, compiler, reporter));
|
||||
}
|
||||
|
||||
let leader_node = leader_node_pool.round_robbin();
|
||||
let follower_node = follower_node_pool.round_robbin();
|
||||
|
||||
Some(Test::<L, F> {
|
||||
Some(Test {
|
||||
metadata: metadata_file,
|
||||
metadata_file_path: metadata_file.metadata_file_path.as_path(),
|
||||
mode: mode.clone(),
|
||||
case_idx: CaseIdx::new(case_idx),
|
||||
case,
|
||||
leader_node,
|
||||
follower_node,
|
||||
leader_compiler,
|
||||
follower_compiler,
|
||||
platforms,
|
||||
reporter,
|
||||
})
|
||||
},
|
||||
@@ -293,18 +283,10 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
async fn start_driver_task<'a, L, F>(
|
||||
async fn start_driver_task<'a>(
|
||||
context: &ExecutionContext,
|
||||
tests: impl Stream<Item = Test<'a, L, F>>,
|
||||
) -> anyhow::Result<impl Future<Output = ()>>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
L::Compiler: 'a,
|
||||
F::Compiler: 'a,
|
||||
{
|
||||
tests: impl Stream<Item = Test<'a>>,
|
||||
) -> anyhow::Result<impl Future<Output = ()>> {
|
||||
info!("Starting driver task");
|
||||
|
||||
let cached_compiler = Arc::new(
|
||||
@@ -327,23 +309,18 @@ where
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
|
||||
async move {
|
||||
test.reporter
|
||||
.report_leader_node_assigned_event(
|
||||
test.leader_node.id(),
|
||||
*L::config_id(),
|
||||
test.leader_node.connection_string(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
test.reporter
|
||||
.report_follower_node_assigned_event(
|
||||
test.follower_node.id(),
|
||||
*F::config_id(),
|
||||
test.follower_node.connection_string(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
for (platform, node, _, _) in test.platforms.iter() {
|
||||
test.reporter
|
||||
.report_node_assigned_event(
|
||||
node.id(),
|
||||
platform.platform_identifier(),
|
||||
node.connection_string(),
|
||||
)
|
||||
.expect("Can't fail");
|
||||
}
|
||||
|
||||
let reporter = test.reporter.clone();
|
||||
let result = handle_case_driver::<L, F>(test, cached_compiler).await;
|
||||
let result = handle_case_driver(&test, cached_compiler).await;
|
||||
|
||||
match result {
|
||||
Ok(steps_executed) => reporter
|
||||
@@ -449,230 +426,174 @@ async fn start_cli_reporting_task(reporter: Reporter) {
|
||||
mode = %test.mode,
|
||||
case_idx = %test.case_idx,
|
||||
case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"),
|
||||
leader_node = test.leader_node.id(),
|
||||
follower_node = test.follower_node.id(),
|
||||
)
|
||||
)]
|
||||
async fn handle_case_driver<'a, L, F>(
|
||||
test: Test<'a, L, F>,
|
||||
async fn handle_case_driver<'a>(
|
||||
test: &Test<'a>,
|
||||
cached_compiler: Arc<CachedCompiler<'a>>,
|
||||
) -> anyhow::Result<usize>
|
||||
where
|
||||
L: Platform,
|
||||
F: Platform,
|
||||
L::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
F::Blockchain: revive_dt_node::Node + Send + Sync + 'static,
|
||||
L::Compiler: 'a,
|
||||
F::Compiler: 'a,
|
||||
{
|
||||
let leader_reporter = test
|
||||
.reporter
|
||||
.execution_specific_reporter(test.leader_node.id(), NodeDesignation::Leader);
|
||||
let follower_reporter = test
|
||||
.reporter
|
||||
.execution_specific_reporter(test.follower_node.id(), NodeDesignation::Follower);
|
||||
) -> anyhow::Result<usize> {
|
||||
let platform_state = stream::iter(test.platforms.iter())
|
||||
// Compiling the pre-link contracts.
|
||||
.filter_map(|(platform, node, compiler, reporter)| {
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
|
||||
let (
|
||||
CompilerOutput {
|
||||
contracts: leader_pre_link_contracts,
|
||||
},
|
||||
CompilerOutput {
|
||||
contracts: follower_pre_link_contracts,
|
||||
},
|
||||
) = try_join!(
|
||||
cached_compiler.compile_contracts::<L>(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
None,
|
||||
&test.leader_compiler,
|
||||
&leader_reporter,
|
||||
),
|
||||
cached_compiler.compile_contracts::<F>(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
None,
|
||||
&test.follower_compiler,
|
||||
&follower_reporter
|
||||
)
|
||||
)
|
||||
.context("Failed to compile pre-link contracts for leader/follower in parallel")?;
|
||||
|
||||
let mut leader_deployed_libraries = None::<HashMap<_, _>>;
|
||||
let mut follower_deployed_libraries = None::<HashMap<_, _>>;
|
||||
let mut contract_sources = test
|
||||
.metadata
|
||||
.contract_sources()
|
||||
.context("Failed to retrieve contract sources from metadata")?;
|
||||
for library_instance in test
|
||||
.metadata
|
||||
.libraries
|
||||
.iter()
|
||||
.flatten()
|
||||
.flat_map(|(_, map)| map.values())
|
||||
{
|
||||
debug!(%library_instance, "Deploying Library Instance");
|
||||
|
||||
let ContractPathAndIdent {
|
||||
contract_source_path: library_source_path,
|
||||
contract_ident: library_ident,
|
||||
} = contract_sources
|
||||
.remove(library_instance)
|
||||
.context("Failed to find the contract source")?;
|
||||
|
||||
let (leader_code, leader_abi) = leader_pre_link_contracts
|
||||
.get(&library_source_path)
|
||||
.and_then(|contracts| contracts.get(library_ident.as_str()))
|
||||
.context("Declared library was not compiled")?;
|
||||
let (follower_code, follower_abi) = follower_pre_link_contracts
|
||||
.get(&library_source_path)
|
||||
.and_then(|contracts| contracts.get(library_ident.as_str()))
|
||||
.context("Declared library was not compiled")?;
|
||||
|
||||
let leader_code = match alloy::hex::decode(leader_code) {
|
||||
Ok(code) => code,
|
||||
Err(error) => {
|
||||
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
||||
async move {
|
||||
let compiler_output = cached_compiler
|
||||
.compile_contracts(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
None,
|
||||
compiler.as_ref(),
|
||||
*platform,
|
||||
reporter,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
%err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Pre-linking compilation failed"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
Some((test, platform, node, compiler, reporter, compiler_output))
|
||||
}
|
||||
};
|
||||
let follower_code = match alloy::hex::decode(follower_code) {
|
||||
Ok(code) => code,
|
||||
Err(error) => {
|
||||
anyhow::bail!("Failed to hex-decode the byte code {}", error)
|
||||
}
|
||||
};
|
||||
})
|
||||
// Deploying the libraries for the platform.
|
||||
.filter_map(
|
||||
|(test, platform, node, compiler, reporter, compiler_output)| async move {
|
||||
let mut deployed_libraries = None::<HashMap<_, _>>;
|
||||
let mut contract_sources = test
|
||||
.metadata
|
||||
.contract_sources()
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
%err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to retrieve contract sources from metadata"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
for library_instance in test
|
||||
.metadata
|
||||
.libraries
|
||||
.iter()
|
||||
.flatten()
|
||||
.flat_map(|(_, map)| map.values())
|
||||
{
|
||||
debug!(%library_instance, "Deploying Library Instance");
|
||||
|
||||
// Getting the deployer address from the cases themselves. This is to ensure that we're
|
||||
// doing the deployments from different accounts and therefore we're not slowed down by
|
||||
// the nonce.
|
||||
let deployer_address = test
|
||||
.case
|
||||
.steps
|
||||
.iter()
|
||||
.filter_map(|step| match step {
|
||||
Step::FunctionCall(input) => Some(input.caller),
|
||||
Step::BalanceAssertion(..) => None,
|
||||
Step::StorageEmptyAssertion(..) => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap_or(Input::default_caller());
|
||||
let leader_tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||
TransactionRequest::default().from(deployer_address),
|
||||
leader_code,
|
||||
);
|
||||
let follower_tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||
TransactionRequest::default().from(deployer_address),
|
||||
follower_code,
|
||||
);
|
||||
let ContractPathAndIdent {
|
||||
contract_source_path: library_source_path,
|
||||
contract_ident: library_ident,
|
||||
} = contract_sources.remove(library_instance)?;
|
||||
|
||||
let (leader_receipt, follower_receipt) = try_join!(
|
||||
test.leader_node.execute_transaction(leader_tx),
|
||||
test.follower_node.execute_transaction(follower_tx)
|
||||
)?;
|
||||
let (code, leader_abi) = compiler_output
|
||||
.contracts
|
||||
.get(&library_source_path)
|
||||
.and_then(|contracts| contracts.get(library_ident.as_str()))?;
|
||||
|
||||
debug!(
|
||||
?library_instance,
|
||||
library_address = ?leader_receipt.contract_address,
|
||||
"Deployed library to leader"
|
||||
);
|
||||
debug!(
|
||||
?library_instance,
|
||||
library_address = ?follower_receipt.contract_address,
|
||||
"Deployed library to follower"
|
||||
);
|
||||
let code = alloy::hex::decode(code).ok()?;
|
||||
|
||||
let leader_library_address = leader_receipt
|
||||
.contract_address
|
||||
.context("Contract deployment didn't return an address")?;
|
||||
let follower_library_address = follower_receipt
|
||||
.contract_address
|
||||
.context("Contract deployment didn't return an address")?;
|
||||
// Getting the deployer address from the cases themselves. This is to ensure
|
||||
// that we're doing the deployments from different accounts and therefore we're
|
||||
// not slowed down by the nonce.
|
||||
let deployer_address = test
|
||||
.case
|
||||
.steps
|
||||
.iter()
|
||||
.filter_map(|step| match step {
|
||||
Step::FunctionCall(input) => Some(input.caller),
|
||||
Step::BalanceAssertion(..) => None,
|
||||
Step::StorageEmptyAssertion(..) => None,
|
||||
})
|
||||
.next()
|
||||
.unwrap_or(Input::default_caller());
|
||||
let tx = TransactionBuilder::<Ethereum>::with_deploy_code(
|
||||
TransactionRequest::default().from(deployer_address),
|
||||
code,
|
||||
);
|
||||
let receipt = node
|
||||
.execute_transaction(tx)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
%err,
|
||||
%library_instance,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Failed to deploy the library"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
leader_deployed_libraries.get_or_insert_default().insert(
|
||||
library_instance.clone(),
|
||||
(
|
||||
library_ident.clone(),
|
||||
leader_library_address,
|
||||
leader_abi.clone(),
|
||||
),
|
||||
);
|
||||
follower_deployed_libraries.get_or_insert_default().insert(
|
||||
library_instance.clone(),
|
||||
(
|
||||
library_ident,
|
||||
follower_library_address,
|
||||
follower_abi.clone(),
|
||||
),
|
||||
);
|
||||
}
|
||||
if let Some(ref leader_deployed_libraries) = leader_deployed_libraries {
|
||||
leader_reporter.report_libraries_deployed_event(
|
||||
leader_deployed_libraries
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(key, (_, address, _))| (key, address))
|
||||
.collect::<BTreeMap<_, _>>(),
|
||||
)?;
|
||||
}
|
||||
if let Some(ref follower_deployed_libraries) = follower_deployed_libraries {
|
||||
follower_reporter.report_libraries_deployed_event(
|
||||
follower_deployed_libraries
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(key, (_, address, _))| (key, address))
|
||||
.collect::<BTreeMap<_, _>>(),
|
||||
)?;
|
||||
}
|
||||
debug!(
|
||||
?library_instance,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Deployed library"
|
||||
);
|
||||
|
||||
let (
|
||||
CompilerOutput {
|
||||
contracts: leader_post_link_contracts,
|
||||
},
|
||||
CompilerOutput {
|
||||
contracts: follower_post_link_contracts,
|
||||
},
|
||||
) = try_join!(
|
||||
cached_compiler.compile_contracts::<L>(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
leader_deployed_libraries.as_ref(),
|
||||
&test.leader_compiler,
|
||||
&leader_reporter,
|
||||
),
|
||||
cached_compiler.compile_contracts::<F>(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
follower_deployed_libraries.as_ref(),
|
||||
&test.follower_compiler,
|
||||
&follower_reporter
|
||||
let library_address = receipt.contract_address?;
|
||||
|
||||
deployed_libraries.get_or_insert_default().insert(
|
||||
library_instance.clone(),
|
||||
(library_ident.clone(), library_address, leader_abi.clone()),
|
||||
);
|
||||
}
|
||||
|
||||
Some((
|
||||
test,
|
||||
platform,
|
||||
node,
|
||||
compiler,
|
||||
reporter,
|
||||
compiler_output,
|
||||
deployed_libraries,
|
||||
))
|
||||
},
|
||||
)
|
||||
)
|
||||
.context("Failed to compile post-link contracts for leader/follower in parallel")?;
|
||||
// Compiling the post-link contracts.
|
||||
.filter_map(
|
||||
|(test, platform, node, compiler, reporter, _, deployed_libraries)| {
|
||||
let cached_compiler = cached_compiler.clone();
|
||||
|
||||
let leader_state = CaseState::<L>::new(
|
||||
test.leader_compiler.version().clone(),
|
||||
leader_post_link_contracts,
|
||||
leader_deployed_libraries.unwrap_or_default(),
|
||||
leader_reporter,
|
||||
);
|
||||
let follower_state = CaseState::<F>::new(
|
||||
test.follower_compiler.version().clone(),
|
||||
follower_post_link_contracts,
|
||||
follower_deployed_libraries.unwrap_or_default(),
|
||||
follower_reporter,
|
||||
);
|
||||
async move {
|
||||
let compiler_output = cached_compiler
|
||||
.compile_contracts(
|
||||
test.metadata,
|
||||
test.metadata_file_path,
|
||||
test.mode.clone(),
|
||||
deployed_libraries.as_ref(),
|
||||
compiler.as_ref(),
|
||||
*platform,
|
||||
reporter,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!(
|
||||
%err,
|
||||
platform_identifier = %platform.platform_identifier(),
|
||||
"Pre-linking compilation failed"
|
||||
)
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
let mut driver = CaseDriver::<L, F>::new(
|
||||
test.metadata,
|
||||
test.case,
|
||||
test.leader_node,
|
||||
test.follower_node,
|
||||
leader_state,
|
||||
follower_state,
|
||||
);
|
||||
let case_state = CaseState::new(
|
||||
compiler.version().clone(),
|
||||
compiler_output.contracts,
|
||||
deployed_libraries.unwrap_or_default(),
|
||||
reporter.clone(),
|
||||
);
|
||||
|
||||
Some((*node, platform.platform_identifier(), case_state))
|
||||
}
|
||||
},
|
||||
)
|
||||
// Collect
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
let mut driver = CaseDriver::new(test.metadata, test.case, platform_state);
|
||||
driver
|
||||
.execute()
|
||||
.await
|
||||
@@ -685,36 +606,38 @@ async fn execute_corpus(
|
||||
reporter: Reporter,
|
||||
report_aggregator_task: impl Future<Output = anyhow::Result<()>>,
|
||||
) -> anyhow::Result<()> {
|
||||
match (&context.leader, &context.follower) {
|
||||
(TestingPlatform::Geth, TestingPlatform::Kitchensink) => {
|
||||
run_driver::<Geth, Kitchensink>(context, tests, reporter, report_aggregator_task)
|
||||
.await?
|
||||
}
|
||||
(TestingPlatform::Geth, TestingPlatform::Geth) => {
|
||||
run_driver::<Geth, Geth>(context, tests, reporter, report_aggregator_task).await?
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
let platforms = context
|
||||
.platforms
|
||||
.iter()
|
||||
.copied()
|
||||
.collect::<BTreeSet<_>>()
|
||||
.into_iter()
|
||||
.map(Into::<&dyn DynPlatform>::into)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
run_driver(context, tests, reporter, report_aggregator_task, platforms).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// this represents a single "test"; a mode, path and collection of cases.
|
||||
#[derive(Clone)]
|
||||
struct Test<'a, L: Platform, F: Platform> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
struct Test<'a> {
|
||||
metadata: &'a MetadataFile,
|
||||
metadata_file_path: &'a Path,
|
||||
mode: Cow<'a, Mode>,
|
||||
case_idx: CaseIdx,
|
||||
case: &'a Case,
|
||||
leader_node: &'a <L as Platform>::Blockchain,
|
||||
follower_node: &'a <F as Platform>::Blockchain,
|
||||
leader_compiler: L::Compiler,
|
||||
follower_compiler: F::Compiler,
|
||||
platforms: Vec<(
|
||||
&'a dyn DynPlatform,
|
||||
&'a dyn EthereumNode,
|
||||
Box<dyn DynSolidityCompiler>,
|
||||
ExecutionSpecificReporter,
|
||||
)>,
|
||||
reporter: TestSpecificReporter,
|
||||
}
|
||||
|
||||
impl<'a, L: Platform, F: Platform> Test<'a, L, F> {
|
||||
impl<'a> Test<'a> {
|
||||
/// Checks if this test can be ran with the current configuration.
|
||||
pub fn check_compatibility(&self) -> TestCheckFunctionResult {
|
||||
self.check_metadata_file_ignored()?;
|
||||
@@ -743,24 +666,39 @@ impl<'a, L: Platform, F: Platform> Test<'a, L, F> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the leader and the follower both support the desired targets in the metadata file.
|
||||
/// Checks if the platforms all support the desired targets in the metadata file.
|
||||
fn check_target_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let leader_support =
|
||||
<L::Blockchain as Node>::matches_target(self.metadata.targets.as_deref());
|
||||
let follower_support =
|
||||
<F::Blockchain as Node>::matches_target(self.metadata.targets.as_deref());
|
||||
let is_allowed = leader_support && follower_support;
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (platform, ..) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = match self.metadata.targets.as_ref() {
|
||||
None => true,
|
||||
Some(targets) => {
|
||||
let mut target_matches = false;
|
||||
for target in targets.iter() {
|
||||
if &platform.vm_identifier() == target {
|
||||
target_matches = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
target_matches
|
||||
}
|
||||
};
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"Either the leader or the follower do not support the target desired by the test.",
|
||||
indexmap! {
|
||||
"test_desired_targets" => json!(self.metadata.targets.as_ref()),
|
||||
"leader_support" => json!(leader_support),
|
||||
"follower_support" => json!(follower_support),
|
||||
},
|
||||
"One of the platforms do do not support the targets allowed by the test.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -771,46 +709,51 @@ impl<'a, L: Platform, F: Platform> Test<'a, L, F> {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let leader_support = evm_version_requirement
|
||||
.matches(&<L::Blockchain as revive_dt_node::Node>::evm_version());
|
||||
let follower_support = evm_version_requirement
|
||||
.matches(&<F::Blockchain as revive_dt_node::Node>::evm_version());
|
||||
let is_allowed = leader_support && follower_support;
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (platform, node, ..) in self.platforms.iter() {
|
||||
let is_allowed_for_platform = evm_version_requirement.matches(&node.evm_version());
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"EVM version is incompatible with either the leader or the follower.",
|
||||
indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
"leader_support" => json!(leader_support),
|
||||
"follower_support" => json!(follower_support),
|
||||
},
|
||||
"EVM version is incompatible for the platforms specified",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the leader and follower compilers support the mode that the test is for.
|
||||
fn check_compiler_compatibility(&self) -> TestCheckFunctionResult {
|
||||
let leader_support = self
|
||||
.leader_compiler
|
||||
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||
let follower_support = self
|
||||
.follower_compiler
|
||||
.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||
let is_allowed = leader_support && follower_support;
|
||||
let mut error_map = indexmap! {
|
||||
"test_desired_evm_version" => json!(self.metadata.required_evm_version),
|
||||
};
|
||||
let mut is_allowed = true;
|
||||
for (platform, _, compiler, ..) in self.platforms.iter() {
|
||||
let is_allowed_for_platform =
|
||||
compiler.supports_mode(self.mode.optimize_setting, self.mode.pipeline);
|
||||
is_allowed &= is_allowed_for_platform;
|
||||
error_map.insert(
|
||||
platform.platform_identifier().into(),
|
||||
json!(is_allowed_for_platform),
|
||||
);
|
||||
}
|
||||
|
||||
if is_allowed {
|
||||
Ok(())
|
||||
} else {
|
||||
Err((
|
||||
"Compilers do not support this mode either for the leader or for the follower.",
|
||||
indexmap! {
|
||||
"mode" => json!(self.mode),
|
||||
"leader_support" => json!(leader_support),
|
||||
"follower_support" => json!(follower_support),
|
||||
},
|
||||
"Compilers do not support this mode either for the provided platforms.",
|
||||
error_map,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
//! This crate implements concurrent handling of testing node.
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::Context as _;
|
||||
use revive_dt_config::*;
|
||||
use revive_dt_core::DynPlatform;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
/// The node pool starts one or more [Node] which then can be accessed
|
||||
/// in a round robbin fashion.
|
||||
pub struct NodePool {
|
||||
next: AtomicUsize,
|
||||
nodes: Vec<Box<dyn EthereumNode + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl NodePool {
|
||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||
pub fn new(context: Context, platform: &dyn DynPlatform) -> anyhow::Result<Self> {
|
||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||
let nodes = concurrency_configuration.number_of_nodes;
|
||||
|
||||
let mut handles = Vec::with_capacity(nodes);
|
||||
for _ in 0..nodes {
|
||||
let context = context.clone();
|
||||
handles.push(platform.new_node(context)?);
|
||||
}
|
||||
|
||||
let mut nodes = Vec::with_capacity(nodes);
|
||||
for handle in handles {
|
||||
nodes.push(
|
||||
handle
|
||||
.join()
|
||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||
.context("Failed to join node spawn thread")?
|
||||
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
|
||||
.context("Node failed to spawn")?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
nodes,
|
||||
next: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a handle to the next node.
|
||||
pub fn round_robbin(&self) -> &dyn EthereumNode {
|
||||
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
||||
self.nodes.get(current).unwrap().as_ref()
|
||||
}
|
||||
}
|
||||
@@ -308,7 +308,7 @@ impl Input {
|
||||
|
||||
pub async fn encoded_input(
|
||||
&self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<Bytes> {
|
||||
match self.method {
|
||||
@@ -377,7 +377,7 @@ impl Input {
|
||||
/// Parse this input into a legacy transaction.
|
||||
pub async fn legacy_transaction(
|
||||
&self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<TransactionRequest> {
|
||||
let input_data = self
|
||||
@@ -466,7 +466,7 @@ impl Calldata {
|
||||
|
||||
pub async fn calldata(
|
||||
&self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<Vec<u8>> {
|
||||
let mut buffer = Vec::<u8>::with_capacity(self.size_requirement());
|
||||
@@ -478,7 +478,7 @@ impl Calldata {
|
||||
pub async fn calldata_into_slice(
|
||||
&self,
|
||||
buffer: &mut Vec<u8>,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<()> {
|
||||
match self {
|
||||
@@ -515,7 +515,7 @@ impl Calldata {
|
||||
pub async fn is_equivalent(
|
||||
&self,
|
||||
other: &[u8],
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<bool> {
|
||||
match self {
|
||||
@@ -557,7 +557,7 @@ impl CalldataItem {
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<U256> {
|
||||
let mut stack = Vec::<CalldataToken<U256>>::new();
|
||||
@@ -662,7 +662,7 @@ impl<T: AsRef<str>> CalldataToken<T> {
|
||||
/// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146
|
||||
async fn resolve(
|
||||
self,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
context: ResolutionContext<'_>,
|
||||
) -> anyhow::Result<CalldataToken<U256>> {
|
||||
match self {
|
||||
@@ -1010,7 +1010,7 @@ mod tests {
|
||||
async fn resolve_calldata_item(
|
||||
input: &str,
|
||||
deployed_contracts: &HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
|
||||
resolver: &impl ResolverApi,
|
||||
resolver: &(impl ResolverApi + ?Sized),
|
||||
) -> anyhow::Result<U256> {
|
||||
let context = ResolutionContext::default().with_deployed_contracts(deployed_contracts);
|
||||
CalldataItem::new(input).resolve(resolver, context).await
|
||||
|
||||
@@ -13,8 +13,10 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_common::{
|
||||
cached_fs::read_to_string, iterators::FilesWithExtensionIterator, macros::define_wrapper_type,
|
||||
types::Mode,
|
||||
cached_fs::read_to_string,
|
||||
iterators::FilesWithExtensionIterator,
|
||||
macros::define_wrapper_type,
|
||||
types::{Mode, VmIdentifier},
|
||||
};
|
||||
use tracing::error;
|
||||
|
||||
@@ -81,7 +83,7 @@ pub struct Metadata {
|
||||
/// example, if we wish for the metadata file's cases to only be run on PolkaVM then we'd
|
||||
/// specify a target of "PolkaVM" in here.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub targets: Option<Vec<String>>,
|
||||
pub targets: Option<Vec<VmIdentifier>>,
|
||||
|
||||
/// A vector of the test cases and workloads contained within the metadata file. This is their
|
||||
/// primary description.
|
||||
|
||||
@@ -9,6 +9,9 @@ repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
revive-common = { workspace = true }
|
||||
|
||||
revive-dt-common = { workspace = true }
|
||||
revive-dt-format = { workspace = true }
|
||||
|
||||
alloy = { workspace = true }
|
||||
|
||||
@@ -1,16 +1,24 @@
|
||||
//! This crate implements all node interactions.
|
||||
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy::primitives::{Address, StorageKey, TxHash, U256};
|
||||
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace};
|
||||
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest};
|
||||
use anyhow::Result;
|
||||
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_format::traits::ResolverApi;
|
||||
|
||||
/// An interface for all interactions with Ethereum compatible nodes.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub trait EthereumNode {
|
||||
fn id(&self) -> usize;
|
||||
|
||||
/// Returns the nodes connection string.
|
||||
fn connection_string(&self) -> &str;
|
||||
|
||||
/// Execute the [TransactionRequest] and return a [TransactionReceipt].
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
@@ -38,5 +46,8 @@ pub trait EthereumNode {
|
||||
) -> Pin<Box<dyn Future<Output = Result<EIP1186AccountProofResponse>> + '_>>;
|
||||
|
||||
/// Returns the resolver that is to use with this ethereum node.
|
||||
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Box<dyn ResolverApi + '_>>> + '_>>;
|
||||
fn resolver(&self) -> Pin<Box<dyn Future<Output = Result<Arc<dyn ResolverApi + '_>>> + '_>>;
|
||||
|
||||
/// Returns the EVM version of the node.
|
||||
fn evm_version(&self) -> EVMVersion;
|
||||
}
|
||||
|
||||
+14
-23
@@ -327,6 +327,14 @@ impl GethNode {
|
||||
}
|
||||
|
||||
impl EthereumNode for GethNode {
|
||||
fn id(&self) -> usize {
|
||||
self.id as _
|
||||
}
|
||||
|
||||
fn connection_string(&self) -> &str {
|
||||
&self.connection_string
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
level = "info",
|
||||
skip_all,
|
||||
@@ -498,13 +506,17 @@ impl EthereumNode for GethNode {
|
||||
// #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn resolver(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn ResolverApi + '_>>> + '_>> {
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let id = self.id;
|
||||
let provider = self.provider().await?;
|
||||
Ok(Box::new(GethNodeResolver { id, provider }) as Box<dyn ResolverApi>)
|
||||
Ok(Arc::new(GethNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
||||
})
|
||||
}
|
||||
|
||||
fn evm_version(&self) -> EVMVersion {
|
||||
EVMVersion::Cancun
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
|
||||
@@ -788,16 +800,6 @@ impl ResolverApi for GethNode {
|
||||
}
|
||||
|
||||
impl Node for GethNode {
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn id(&self) -> usize {
|
||||
self.id as _
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn connection_string(&self) -> String {
|
||||
self.connection_string.clone()
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
|
||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||
// Terminate the processes in a graceful manner to allow for the output to be flushed.
|
||||
@@ -840,17 +842,6 @@ impl Node for GethNode {
|
||||
.stdout;
|
||||
Ok(String::from_utf8_lossy(&output).into())
|
||||
}
|
||||
|
||||
fn matches_target(targets: Option<&[String]>) -> bool {
|
||||
match targets {
|
||||
None => true,
|
||||
Some(targets) => targets.iter().any(|str| str.as_str() == "evm"),
|
||||
}
|
||||
}
|
||||
|
||||
fn evm_version() -> EVMVersion {
|
||||
EVMVersion::Cancun
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for GethNode {
|
||||
|
||||
@@ -1,20 +1,15 @@
|
||||
//! This crate implements the testing nodes.
|
||||
|
||||
use alloy::genesis::Genesis;
|
||||
use revive_common::EVMVersion;
|
||||
use revive_dt_node_interaction::EthereumNode;
|
||||
|
||||
pub mod common;
|
||||
pub mod constants;
|
||||
pub mod geth;
|
||||
pub mod pool;
|
||||
pub mod substrate;
|
||||
|
||||
/// An abstract interface for testing nodes.
|
||||
pub trait Node: EthereumNode {
|
||||
/// Returns the identifier of the node.
|
||||
fn id(&self) -> usize;
|
||||
|
||||
/// Spawns a node configured according to the genesis json.
|
||||
///
|
||||
/// Blocking until it's ready to accept transactions.
|
||||
@@ -25,16 +20,6 @@ pub trait Node: EthereumNode {
|
||||
/// Blocking until it's completely stopped.
|
||||
fn shutdown(&mut self) -> anyhow::Result<()>;
|
||||
|
||||
/// Returns the nodes connection string.
|
||||
fn connection_string(&self) -> String;
|
||||
|
||||
/// Returns the node version.
|
||||
fn version(&self) -> anyhow::Result<String>;
|
||||
|
||||
/// Given a list of targets from the metadata file, this function determines if the metadata
|
||||
/// file can be ran on this node or not.
|
||||
fn matches_target(targets: Option<&[String]>) -> bool;
|
||||
|
||||
/// Returns the EVM version of the node.
|
||||
fn evm_version() -> EVMVersion;
|
||||
}
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
//! This crate implements concurrent handling of testing node.
|
||||
|
||||
use std::{
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
thread,
|
||||
};
|
||||
|
||||
use alloy::genesis::Genesis;
|
||||
use anyhow::Context as _;
|
||||
use revive_dt_config::{
|
||||
ConcurrencyConfiguration, EthRpcConfiguration, GenesisConfiguration, GethConfiguration,
|
||||
KitchensinkConfiguration, ReviveDevNodeConfiguration, WalletConfiguration,
|
||||
WorkingDirectoryConfiguration,
|
||||
};
|
||||
|
||||
use crate::Node;
|
||||
|
||||
/// The node pool starts one or more [Node] which then can be accessed
|
||||
/// in a round robbin fashion.
|
||||
pub struct NodePool<T> {
|
||||
next: AtomicUsize,
|
||||
nodes: Vec<T>,
|
||||
}
|
||||
|
||||
impl<T> NodePool<T>
|
||||
where
|
||||
T: Node + Send + 'static,
|
||||
{
|
||||
/// Create a new Pool. This will start as many nodes as there are workers in `config`.
|
||||
pub fn new(
|
||||
context: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ Clone
|
||||
+ 'static,
|
||||
) -> anyhow::Result<Self> {
|
||||
let concurrency_configuration = AsRef::<ConcurrencyConfiguration>::as_ref(&context);
|
||||
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
|
||||
|
||||
let nodes = concurrency_configuration.number_of_nodes;
|
||||
let genesis = genesis_configuration.genesis()?;
|
||||
|
||||
let mut handles = Vec::with_capacity(nodes);
|
||||
for _ in 0..nodes {
|
||||
let context = context.clone();
|
||||
let genesis = genesis.clone();
|
||||
handles.push(thread::spawn(move || spawn_node::<T>(context, genesis)));
|
||||
}
|
||||
|
||||
let mut nodes = Vec::with_capacity(nodes);
|
||||
for handle in handles {
|
||||
nodes.push(
|
||||
handle
|
||||
.join()
|
||||
.map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))
|
||||
.context("Failed to join node spawn thread")?
|
||||
.map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))
|
||||
.context("Node failed to spawn")?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
nodes,
|
||||
next: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a handle to the next node.
|
||||
pub fn round_robbin(&self) -> &T {
|
||||
let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len();
|
||||
self.nodes.get(current).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_node<T: Node + Send>(
|
||||
_: impl AsRef<WorkingDirectoryConfiguration>
|
||||
+ AsRef<ConcurrencyConfiguration>
|
||||
+ AsRef<GenesisConfiguration>
|
||||
+ AsRef<WalletConfiguration>
|
||||
+ AsRef<GethConfiguration>
|
||||
+ AsRef<KitchensinkConfiguration>
|
||||
+ AsRef<ReviveDevNodeConfiguration>
|
||||
+ AsRef<EthRpcConfiguration>
|
||||
+ Clone
|
||||
+ 'static,
|
||||
_: Genesis,
|
||||
) -> anyhow::Result<T> {
|
||||
todo!("Remove");
|
||||
}
|
||||
@@ -432,6 +432,14 @@ impl SubstrateNode {
|
||||
}
|
||||
|
||||
impl EthereumNode for SubstrateNode {
|
||||
fn id(&self) -> usize {
|
||||
self.id as _
|
||||
}
|
||||
|
||||
fn connection_string(&self) -> &str {
|
||||
&self.rpc_url
|
||||
}
|
||||
|
||||
fn execute_transaction(
|
||||
&self,
|
||||
transaction: alloy::rpc::types::TransactionRequest,
|
||||
@@ -520,13 +528,17 @@ impl EthereumNode for SubstrateNode {
|
||||
|
||||
fn resolver(
|
||||
&self,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Box<dyn ResolverApi + '_>>> + '_>> {
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Arc<dyn ResolverApi + '_>>> + '_>> {
|
||||
Box::pin(async move {
|
||||
let id = self.id;
|
||||
let provider = self.provider().await?;
|
||||
Ok(Box::new(SubstrateNodeResolver { id, provider }) as Box<dyn ResolverApi>)
|
||||
Ok(Arc::new(SubstrateNodeResolver { id, provider }) as Arc<dyn ResolverApi>)
|
||||
})
|
||||
}
|
||||
|
||||
fn evm_version(&self) -> EVMVersion {
|
||||
EVMVersion::Cancun
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SubstrateNodeResolver<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> {
|
||||
@@ -803,14 +815,6 @@ impl ResolverApi for SubstrateNode {
|
||||
}
|
||||
|
||||
impl Node for SubstrateNode {
|
||||
fn id(&self) -> usize {
|
||||
self.id as _
|
||||
}
|
||||
|
||||
fn connection_string(&self) -> String {
|
||||
self.rpc_url.clone()
|
||||
}
|
||||
|
||||
fn shutdown(&mut self) -> anyhow::Result<()> {
|
||||
// Terminate the processes in a graceful manner to allow for the output to be flushed.
|
||||
if let Some(mut child) = self.process_proxy.take() {
|
||||
@@ -854,17 +858,6 @@ impl Node for SubstrateNode {
|
||||
.stdout;
|
||||
Ok(String::from_utf8_lossy(&output).into())
|
||||
}
|
||||
|
||||
fn matches_target(targets: Option<&[String]>) -> bool {
|
||||
match targets {
|
||||
None => true,
|
||||
Some(targets) => targets.iter().any(|str| str.as_str() == "pvm"),
|
||||
}
|
||||
}
|
||||
|
||||
fn evm_version() -> EVMVersion {
|
||||
EVMVersion::Cancun
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SubstrateNode {
|
||||
|
||||
@@ -11,8 +11,9 @@ use std::{
|
||||
use alloy_primitives::Address;
|
||||
use anyhow::{Context as _, Result};
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode};
|
||||
use revive_dt_config::{Context, TestingPlatform};
|
||||
use revive_dt_config::Context;
|
||||
use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance};
|
||||
use semver::Version;
|
||||
use serde::Serialize;
|
||||
@@ -84,11 +85,8 @@ impl ReportAggregator {
|
||||
RunnerEvent::TestIgnored(event) => {
|
||||
self.handle_test_ignored_event(*event);
|
||||
}
|
||||
RunnerEvent::LeaderNodeAssigned(event) => {
|
||||
self.handle_leader_node_assigned_event(*event);
|
||||
}
|
||||
RunnerEvent::FollowerNodeAssigned(event) => {
|
||||
self.handle_follower_node_assigned_event(*event);
|
||||
RunnerEvent::NodeAssigned(event) => {
|
||||
self.handle_node_assigned_event(*event);
|
||||
}
|
||||
RunnerEvent::PreLinkContractsCompilationSucceeded(event) => {
|
||||
self.handle_pre_link_contracts_compilation_succeeded_event(*event)
|
||||
@@ -257,28 +255,15 @@ impl ReportAggregator {
|
||||
let _ = self.listener_tx.send(event);
|
||||
}
|
||||
|
||||
fn handle_leader_node_assigned_event(&mut self, event: LeaderNodeAssignedEvent) {
|
||||
fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) {
|
||||
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||
test_specifier: event.test_specifier,
|
||||
node_id: event.id,
|
||||
node_designation: NodeDesignation::Leader,
|
||||
platform_identifier: event.platform_identifier,
|
||||
});
|
||||
execution_information.node = Some(TestCaseNodeInformation {
|
||||
id: event.id,
|
||||
platform: event.platform,
|
||||
connection_string: event.connection_string,
|
||||
});
|
||||
}
|
||||
|
||||
fn handle_follower_node_assigned_event(&mut self, event: FollowerNodeAssignedEvent) {
|
||||
let execution_information = self.execution_information(&ExecutionSpecifier {
|
||||
test_specifier: event.test_specifier,
|
||||
node_id: event.id,
|
||||
node_designation: NodeDesignation::Follower,
|
||||
});
|
||||
execution_information.node = Some(TestCaseNodeInformation {
|
||||
id: event.id,
|
||||
platform: event.platform,
|
||||
platform_identifier: event.platform_identifier,
|
||||
connection_string: event.connection_string,
|
||||
});
|
||||
}
|
||||
@@ -413,14 +398,11 @@ impl ReportAggregator {
|
||||
specifier: &ExecutionSpecifier,
|
||||
) -> &mut ExecutionInformation {
|
||||
let test_case_report = self.test_case_report(&specifier.test_specifier);
|
||||
match specifier.node_designation {
|
||||
NodeDesignation::Leader => test_case_report
|
||||
.leader_execution_information
|
||||
.get_or_insert_default(),
|
||||
NodeDesignation::Follower => test_case_report
|
||||
.follower_execution_information
|
||||
.get_or_insert_default(),
|
||||
}
|
||||
test_case_report
|
||||
.platform_execution
|
||||
.entry(specifier.platform_identifier)
|
||||
.or_default()
|
||||
.get_or_insert_default()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -455,12 +437,8 @@ pub struct TestCaseReport {
|
||||
/// Information on the status of the test case and whether it succeeded, failed, or was ignored.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<TestCaseStatus>,
|
||||
/// Information related to the execution on the leader.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub leader_execution_information: Option<ExecutionInformation>,
|
||||
/// Information related to the execution on the follower.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub follower_execution_information: Option<ExecutionInformation>,
|
||||
/// Information related to the execution on one of the platforms.
|
||||
pub platform_execution: BTreeMap<PlatformIdentifier, Option<ExecutionInformation>>,
|
||||
}
|
||||
|
||||
/// Information related to the status of the test. Could be that the test succeeded, failed, or that
|
||||
@@ -494,7 +472,7 @@ pub struct TestCaseNodeInformation {
|
||||
/// The ID of the node that this case is being executed on.
|
||||
pub id: usize,
|
||||
/// The platform of the node.
|
||||
pub platform: TestingPlatform,
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
pub connection_string: String,
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
use revive_dt_common::define_wrapper_type;
|
||||
use revive_dt_common::{define_wrapper_type, types::PlatformIdentifier};
|
||||
use revive_dt_compiler::Mode;
|
||||
use revive_dt_format::{case::CaseIdx, input::StepIdx};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -27,13 +27,7 @@ pub struct TestSpecifier {
|
||||
pub struct ExecutionSpecifier {
|
||||
pub test_specifier: Arc<TestSpecifier>,
|
||||
pub node_id: usize,
|
||||
pub node_designation: NodeDesignation,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum NodeDesignation {
|
||||
Leader,
|
||||
Follower,
|
||||
pub platform_identifier: PlatformIdentifier,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
|
||||
@@ -6,8 +6,8 @@ use std::{collections::BTreeMap, path::PathBuf, sync::Arc};
|
||||
use alloy_primitives::Address;
|
||||
use anyhow::Context as _;
|
||||
use indexmap::IndexMap;
|
||||
use revive_dt_common::types::PlatformIdentifier;
|
||||
use revive_dt_compiler::{CompilerInput, CompilerOutput};
|
||||
use revive_dt_config::TestingPlatform;
|
||||
use revive_dt_format::metadata::Metadata;
|
||||
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance};
|
||||
use semver::Version;
|
||||
@@ -412,14 +412,14 @@ macro_rules! define_event {
|
||||
pub fn execution_specific_reporter(
|
||||
&self,
|
||||
node_id: impl Into<usize>,
|
||||
node_designation: impl Into<$crate::common::NodeDesignation>
|
||||
platform_identifier: impl Into<PlatformIdentifier>
|
||||
) -> [< $ident ExecutionSpecificReporter >] {
|
||||
[< $ident ExecutionSpecificReporter >] {
|
||||
reporter: self.reporter.clone(),
|
||||
execution_specifier: Arc::new($crate::common::ExecutionSpecifier {
|
||||
test_specifier: self.test_specifier.clone(),
|
||||
node_id: node_id.into(),
|
||||
node_designation: node_designation.into(),
|
||||
platform_identifier: platform_identifier.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -521,24 +521,13 @@ define_event! {
|
||||
reason: String,
|
||||
},
|
||||
/// An event emitted when the test case is assigned a leader node.
|
||||
LeaderNodeAssigned {
|
||||
NodeAssigned {
|
||||
/// A specifier for the test that the assignment is for.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The ID of the node that this case is being executed on.
|
||||
id: usize,
|
||||
/// The platform of the node.
|
||||
platform: TestingPlatform,
|
||||
/// The connection string of the node.
|
||||
connection_string: String,
|
||||
},
|
||||
/// An event emitted when the test case is assigned a follower node.
|
||||
FollowerNodeAssigned {
|
||||
/// A specifier for the test that the assignment is for.
|
||||
test_specifier: Arc<TestSpecifier>,
|
||||
/// The ID of the node that this case is being executed on.
|
||||
id: usize,
|
||||
/// The platform of the node.
|
||||
platform: TestingPlatform,
|
||||
/// The identifier of the platform used.
|
||||
platform_identifier: PlatformIdentifier,
|
||||
/// The connection string of the node.
|
||||
connection_string: String,
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user