fix: migrate vendor rustfmt.toml to stable-only features

- Update vendor/pezkuwi-zombienet-sdk/rustfmt.toml to stable-only
- Reformat 74 vendor files with stable rustfmt
- Remove nightly-only features causing CI failures
This commit is contained in:
2025-12-23 10:00:48 +03:00
parent ae7321e239
commit 44cbe4a280
74 changed files with 19895 additions and 21681 deletions
+3 -1
View File
@@ -302,7 +302,9 @@ impl<T: Config> ExtrinsicEvents<T> {
///
/// This works in the same way that [`events::Events::find()`] does, with the
/// exception that it filters out events not related to the submitted extrinsic.
pub fn find<'a, Ev: events::StaticEvent + 'a>(&'a self) -> impl Iterator<Item = Result<Ev, EventsError>> + 'a {
pub fn find<'a, Ev: events::StaticEvent + 'a>(
&'a self,
) -> impl Iterator<Item = Result<Ev, EventsError>> + 'a {
self.iter().filter_map(|ev| ev.and_then(|ev| ev.as_event::<Ev>()).transpose())
}
@@ -1,383 +1,339 @@
use std::{
error::Error,
fmt::Display,
net::IpAddr,
path::{Path, PathBuf},
str::FromStr,
error::Error,
fmt::Display,
net::IpAddr,
path::{Path, PathBuf},
str::FromStr,
};
use multiaddr::Multiaddr;
use serde::{Deserialize, Serialize};
use crate::{
shared::{
errors::{ConfigError, FieldError},
helpers::{merge_errors, merge_errors_vecs},
types::Duration,
},
utils::{default_as_true, default_node_spawn_timeout, default_timeout},
shared::{
errors::{ConfigError, FieldError},
helpers::{merge_errors, merge_errors_vecs},
types::Duration,
},
utils::{default_as_true, default_node_spawn_timeout, default_timeout},
};
/// Global settings applied to an entire network.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct GlobalSettings {
/// Global bootnodes to use (we will then add more)
#[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)]
bootnodes_addresses: Vec<Multiaddr>,
// TODO: parse both case in zombienet node version to avoid renamed ?
/// Global spawn timeout
#[serde(rename = "timeout", default = "default_timeout")]
network_spawn_timeout: Duration,
// TODO: not used yet
/// Node spawn timeout
#[serde(default = "default_node_spawn_timeout")]
node_spawn_timeout: Duration,
// TODO: not used yet
/// Local ip to use for construct the direct links
local_ip: Option<IpAddr>,
/// Directory to use as base dir
/// Used to reuse the same files (database) from a previous run,
/// also note that we will override the content of some of those files.
base_dir: Option<PathBuf>,
/// Number of concurrent spawning process to launch, None means try to spawn all at the same time.
spawn_concurrency: Option<usize>,
/// If enabled, will launch a task to monitor nodes' liveness and tear down the network if there are any.
#[serde(default = "default_as_true")]
tear_down_on_failure: bool,
/// Global bootnodes to use (we will then add more)
#[serde(skip_serializing_if = "std::vec::Vec::is_empty", default)]
bootnodes_addresses: Vec<Multiaddr>,
// TODO: parse both case in zombienet node version to avoid renamed ?
/// Global spawn timeout
#[serde(rename = "timeout", default = "default_timeout")]
network_spawn_timeout: Duration,
// TODO: not used yet
/// Node spawn timeout
#[serde(default = "default_node_spawn_timeout")]
node_spawn_timeout: Duration,
// TODO: not used yet
/// Local ip to use for construct the direct links
local_ip: Option<IpAddr>,
/// Directory to use as base dir
/// Used to reuse the same files (database) from a previous run,
/// also note that we will override the content of some of those files.
base_dir: Option<PathBuf>,
/// Number of concurrent spawning process to launch, None means try to spawn all at the same time.
spawn_concurrency: Option<usize>,
/// If enabled, will launch a task to monitor nodes' liveness and tear down the network if there are any.
#[serde(default = "default_as_true")]
tear_down_on_failure: bool,
}
impl GlobalSettings {
/// External bootnode address.
pub fn bootnodes_addresses(&self) -> Vec<&Multiaddr> {
self.bootnodes_addresses.iter().collect()
}
/// External bootnode address.
pub fn bootnodes_addresses(&self) -> Vec<&Multiaddr> {
self.bootnodes_addresses.iter().collect()
}
/// Global spawn timeout in seconds.
pub fn network_spawn_timeout(&self) -> Duration {
self.network_spawn_timeout
}
/// Global spawn timeout in seconds.
pub fn network_spawn_timeout(&self) -> Duration {
self.network_spawn_timeout
}
/// Individual node spawn timeout in seconds.
pub fn node_spawn_timeout(&self) -> Duration {
self.node_spawn_timeout
}
/// Individual node spawn timeout in seconds.
pub fn node_spawn_timeout(&self) -> Duration {
self.node_spawn_timeout
}
/// Local IP used to expose local services (including RPC, metrics and monitoring).
pub fn local_ip(&self) -> Option<&IpAddr> {
self.local_ip.as_ref()
}
/// Local IP used to expose local services (including RPC, metrics and monitoring).
pub fn local_ip(&self) -> Option<&IpAddr> {
self.local_ip.as_ref()
}
/// Base directory to use (instead a random tmp one)
/// All the artifacts will be created in this directory.
pub fn base_dir(&self) -> Option<&Path> {
self.base_dir.as_deref()
}
/// Base directory to use (instead a random tmp one)
/// All the artifacts will be created in this directory.
pub fn base_dir(&self) -> Option<&Path> {
self.base_dir.as_deref()
}
/// Number of concurrent spawning process to launch
pub fn spawn_concurrency(&self) -> Option<usize> {
self.spawn_concurrency
}
/// Number of concurrent spawning process to launch
pub fn spawn_concurrency(&self) -> Option<usize> {
self.spawn_concurrency
}
/// A flag to tear down the network if there are any unresponsive nodes detected.
pub fn tear_down_on_failure(&self) -> bool {
self.tear_down_on_failure
}
/// A flag to tear down the network if there are any unresponsive nodes detected.
pub fn tear_down_on_failure(&self) -> bool {
self.tear_down_on_failure
}
}
impl Default for GlobalSettings {
fn default() -> Self {
Self {
bootnodes_addresses: Default::default(),
network_spawn_timeout: default_timeout(),
node_spawn_timeout: default_node_spawn_timeout(),
local_ip: Default::default(),
base_dir: Default::default(),
spawn_concurrency: Default::default(),
tear_down_on_failure: true,
}
}
fn default() -> Self {
Self {
bootnodes_addresses: Default::default(),
network_spawn_timeout: default_timeout(),
node_spawn_timeout: default_node_spawn_timeout(),
local_ip: Default::default(),
base_dir: Default::default(),
spawn_concurrency: Default::default(),
tear_down_on_failure: true,
}
}
}
/// A global settings builder, used to build [`GlobalSettings`] declaratively with fields validation.
#[derive(Default)]
pub struct GlobalSettingsBuilder {
config: GlobalSettings,
errors: Vec<anyhow::Error>,
config: GlobalSettings,
errors: Vec<anyhow::Error>,
}
impl GlobalSettingsBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn new() -> Self {
Self::default()
}
// Transition to the next state of the builder.
fn transition(config: GlobalSettings, errors: Vec<anyhow::Error>) -> Self {
Self { config, errors }
}
// Transition to the next state of the builder.
fn transition(config: GlobalSettings, errors: Vec<anyhow::Error>) -> Self {
Self { config, errors }
}
/// Set the external bootnode address.
///
/// Note: Bootnode address replacements are NOT supported here.
/// Only arguments (`args`) support dynamic replacements. Bootnode addresses must be a valid address.
pub fn with_raw_bootnodes_addresses<T>(self, bootnodes_addresses: Vec<T>) -> Self
where
T: TryInto<Multiaddr> + Display + Copy,
T::Error: Error + Send + Sync + 'static,
{
let mut addrs = vec![];
let mut errors = vec![];
/// Set the external bootnode address.
///
/// Note: Bootnode address replacements are NOT supported here.
/// Only arguments (`args`) support dynamic replacements. Bootnode addresses must be a valid address.
pub fn with_raw_bootnodes_addresses<T>(self, bootnodes_addresses: Vec<T>) -> Self
where
T: TryInto<Multiaddr> + Display + Copy,
T::Error: Error + Send + Sync + 'static,
{
let mut addrs = vec![];
let mut errors = vec![];
for (index, addr) in bootnodes_addresses.into_iter().enumerate() {
match addr.try_into() {
Ok(addr) => addrs.push(addr),
Err(error) => errors.push(
FieldError::BootnodesAddress(index, addr.to_string(), error.into()).into(),
),
}
}
for (index, addr) in bootnodes_addresses.into_iter().enumerate() {
match addr.try_into() {
Ok(addr) => addrs.push(addr),
Err(error) => errors.push(
FieldError::BootnodesAddress(index, addr.to_string(), error.into()).into(),
),
}
}
Self::transition(
GlobalSettings {
bootnodes_addresses: addrs,
..self.config
},
merge_errors_vecs(self.errors, errors),
)
}
Self::transition(
GlobalSettings { bootnodes_addresses: addrs, ..self.config },
merge_errors_vecs(self.errors, errors),
)
}
/// Set global spawn timeout in seconds.
pub fn with_network_spawn_timeout(self, timeout: Duration) -> Self {
Self::transition(
GlobalSettings {
network_spawn_timeout: timeout,
..self.config
},
self.errors,
)
}
/// Set global spawn timeout in seconds.
pub fn with_network_spawn_timeout(self, timeout: Duration) -> Self {
Self::transition(
GlobalSettings { network_spawn_timeout: timeout, ..self.config },
self.errors,
)
}
/// Set individual node spawn timeout in seconds.
pub fn with_node_spawn_timeout(self, timeout: Duration) -> Self {
Self::transition(
GlobalSettings {
node_spawn_timeout: timeout,
..self.config
},
self.errors,
)
}
/// Set individual node spawn timeout in seconds.
pub fn with_node_spawn_timeout(self, timeout: Duration) -> Self {
Self::transition(GlobalSettings { node_spawn_timeout: timeout, ..self.config }, self.errors)
}
/// Set local IP used to expose local services (including RPC, metrics and monitoring).
pub fn with_local_ip(self, local_ip: &str) -> Self {
match IpAddr::from_str(local_ip) {
Ok(local_ip) => Self::transition(
GlobalSettings {
local_ip: Some(local_ip),
..self.config
},
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::LocalIp(error.into()).into()),
),
}
}
/// Set local IP used to expose local services (including RPC, metrics and monitoring).
pub fn with_local_ip(self, local_ip: &str) -> Self {
match IpAddr::from_str(local_ip) {
Ok(local_ip) => Self::transition(
GlobalSettings { local_ip: Some(local_ip), ..self.config },
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::LocalIp(error.into()).into()),
),
}
}
/// Set the directory to use as base (instead of a random tmp one).
pub fn with_base_dir(self, base_dir: impl Into<PathBuf>) -> Self {
Self::transition(
GlobalSettings {
base_dir: Some(base_dir.into()),
..self.config
},
self.errors,
)
}
/// Set the directory to use as base (instead of a random tmp one).
pub fn with_base_dir(self, base_dir: impl Into<PathBuf>) -> Self {
Self::transition(
GlobalSettings { base_dir: Some(base_dir.into()), ..self.config },
self.errors,
)
}
/// Set the spawn concurrency
pub fn with_spawn_concurrency(self, spawn_concurrency: usize) -> Self {
Self::transition(
GlobalSettings {
spawn_concurrency: Some(spawn_concurrency),
..self.config
},
self.errors,
)
}
/// Set the spawn concurrency
pub fn with_spawn_concurrency(self, spawn_concurrency: usize) -> Self {
Self::transition(
GlobalSettings { spawn_concurrency: Some(spawn_concurrency), ..self.config },
self.errors,
)
}
/// Set the `tear_down_on_failure` flag
pub fn with_tear_down_on_failure(self, tear_down_on_failure: bool) -> Self {
Self::transition(
GlobalSettings {
tear_down_on_failure,
..self.config
},
self.errors,
)
}
/// Set the `tear_down_on_failure` flag
pub fn with_tear_down_on_failure(self, tear_down_on_failure: bool) -> Self {
Self::transition(GlobalSettings { tear_down_on_failure, ..self.config }, self.errors)
}
/// Seals the builder and returns a [`GlobalSettings`] if there are no validation errors, else returns errors.
pub fn build(self) -> Result<GlobalSettings, Vec<anyhow::Error>> {
if !self.errors.is_empty() {
return Err(self
.errors
.into_iter()
.map(|error| ConfigError::GlobalSettings(error).into())
.collect::<Vec<_>>());
}
/// Seals the builder and returns a [`GlobalSettings`] if there are no validation errors, else returns errors.
pub fn build(self) -> Result<GlobalSettings, Vec<anyhow::Error>> {
if !self.errors.is_empty() {
return Err(self
.errors
.into_iter()
.map(|error| ConfigError::GlobalSettings(error).into())
.collect::<Vec<_>>());
}
Ok(self.config)
}
Ok(self.config)
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::*;
#[test]
fn global_settings_config_builder_should_succeeds_and_returns_a_global_settings_config() {
let global_settings_config = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec![
"/ip4/10.41.122.55/tcp/45421",
"/ip4/51.144.222.10/tcp/2333",
])
.with_network_spawn_timeout(600)
.with_node_spawn_timeout(120)
.with_local_ip("10.0.0.1")
.with_base_dir("/home/nonroot/mynetwork")
.with_spawn_concurrency(5)
.with_tear_down_on_failure(true)
.build()
.unwrap();
#[test]
fn global_settings_config_builder_should_succeeds_and_returns_a_global_settings_config() {
let global_settings_config = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec![
"/ip4/10.41.122.55/tcp/45421",
"/ip4/51.144.222.10/tcp/2333",
])
.with_network_spawn_timeout(600)
.with_node_spawn_timeout(120)
.with_local_ip("10.0.0.1")
.with_base_dir("/home/nonroot/mynetwork")
.with_spawn_concurrency(5)
.with_tear_down_on_failure(true)
.build()
.unwrap();
let bootnodes_addresses: Vec<Multiaddr> = vec![
"/ip4/10.41.122.55/tcp/45421".try_into().unwrap(),
"/ip4/51.144.222.10/tcp/2333".try_into().unwrap(),
];
assert_eq!(
global_settings_config.bootnodes_addresses(),
bootnodes_addresses.iter().collect::<Vec<_>>()
);
assert_eq!(global_settings_config.network_spawn_timeout(), 600);
assert_eq!(global_settings_config.node_spawn_timeout(), 120);
assert_eq!(
global_settings_config
.local_ip()
.unwrap()
.to_string()
.as_str(),
"10.0.0.1"
);
assert_eq!(
global_settings_config.base_dir().unwrap(),
Path::new("/home/nonroot/mynetwork")
);
assert_eq!(global_settings_config.spawn_concurrency().unwrap(), 5);
assert!(global_settings_config.tear_down_on_failure());
}
let bootnodes_addresses: Vec<Multiaddr> = vec![
"/ip4/10.41.122.55/tcp/45421".try_into().unwrap(),
"/ip4/51.144.222.10/tcp/2333".try_into().unwrap(),
];
assert_eq!(
global_settings_config.bootnodes_addresses(),
bootnodes_addresses.iter().collect::<Vec<_>>()
);
assert_eq!(global_settings_config.network_spawn_timeout(), 600);
assert_eq!(global_settings_config.node_spawn_timeout(), 120);
assert_eq!(global_settings_config.local_ip().unwrap().to_string().as_str(), "10.0.0.1");
assert_eq!(
global_settings_config.base_dir().unwrap(),
Path::new("/home/nonroot/mynetwork")
);
assert_eq!(global_settings_config.spawn_concurrency().unwrap(), 5);
assert!(global_settings_config.tear_down_on_failure());
}
#[test]
fn global_settings_config_builder_should_succeeds_when_node_spawn_timeout_is_missing() {
let global_settings_config = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec![
"/ip4/10.41.122.55/tcp/45421",
"/ip4/51.144.222.10/tcp/2333",
])
.with_network_spawn_timeout(600)
.with_local_ip("10.0.0.1")
.build()
.unwrap();
#[test]
fn global_settings_config_builder_should_succeeds_when_node_spawn_timeout_is_missing() {
let global_settings_config = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec![
"/ip4/10.41.122.55/tcp/45421",
"/ip4/51.144.222.10/tcp/2333",
])
.with_network_spawn_timeout(600)
.with_local_ip("10.0.0.1")
.build()
.unwrap();
let bootnodes_addresses: Vec<Multiaddr> = vec![
"/ip4/10.41.122.55/tcp/45421".try_into().unwrap(),
"/ip4/51.144.222.10/tcp/2333".try_into().unwrap(),
];
assert_eq!(
global_settings_config.bootnodes_addresses(),
bootnodes_addresses.iter().collect::<Vec<_>>()
);
assert_eq!(global_settings_config.network_spawn_timeout(), 600);
assert_eq!(global_settings_config.node_spawn_timeout(), 600);
assert_eq!(
global_settings_config
.local_ip()
.unwrap()
.to_string()
.as_str(),
"10.0.0.1"
);
}
let bootnodes_addresses: Vec<Multiaddr> = vec![
"/ip4/10.41.122.55/tcp/45421".try_into().unwrap(),
"/ip4/51.144.222.10/tcp/2333".try_into().unwrap(),
];
assert_eq!(
global_settings_config.bootnodes_addresses(),
bootnodes_addresses.iter().collect::<Vec<_>>()
);
assert_eq!(global_settings_config.network_spawn_timeout(), 600);
assert_eq!(global_settings_config.node_spawn_timeout(), 600);
assert_eq!(global_settings_config.local_ip().unwrap().to_string().as_str(), "10.0.0.1");
}
#[test]
fn global_settings_builder_should_fails_and_returns_an_error_if_one_bootnode_address_is_invalid(
) {
let errors = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421"])
.build()
.unwrap_err();
#[test]
fn global_settings_builder_should_fails_and_returns_an_error_if_one_bootnode_address_is_invalid(
) {
let errors = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421"])
.build()
.unwrap_err();
assert_eq!(errors.len(), 1);
assert_eq!(
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
"global_settings.bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax"
);
}
}
#[test]
fn global_settings_builder_should_fails_and_returns_multiple_errors_if_multiple_bootnodes_addresses_are_invalid(
) {
let errors = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421", "//10.42.153.10/tcp/43111"])
.build()
.unwrap_err();
#[test]
fn global_settings_builder_should_fails_and_returns_multiple_errors_if_multiple_bootnodes_addresses_are_invalid(
) {
let errors = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421", "//10.42.153.10/tcp/43111"])
.build()
.unwrap_err();
assert_eq!(errors.len(), 2);
assert_eq!(
assert_eq!(errors.len(), 2);
assert_eq!(
errors.first().unwrap().to_string(),
"global_settings.bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax"
);
assert_eq!(
assert_eq!(
errors.get(1).unwrap().to_string(),
"global_settings.bootnodes_addresses[1]: '//10.42.153.10/tcp/43111' unknown protocol string: "
);
}
}
#[test]
fn global_settings_builder_should_fails_and_returns_an_error_if_local_ip_is_invalid() {
let errors = GlobalSettingsBuilder::new()
.with_local_ip("invalid")
.build()
.unwrap_err();
#[test]
fn global_settings_builder_should_fails_and_returns_an_error_if_local_ip_is_invalid() {
let errors = GlobalSettingsBuilder::new().with_local_ip("invalid").build().unwrap_err();
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
"global_settings.local_ip: invalid IP address syntax"
);
}
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
"global_settings.local_ip: invalid IP address syntax"
);
}
#[test]
fn global_settings_builder_should_fails_and_returns_multiple_errors_if_multiple_fields_are_invalid(
) {
let errors = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421", "//10.42.153.10/tcp/43111"])
.with_local_ip("invalid")
.build()
.unwrap_err();
#[test]
fn global_settings_builder_should_fails_and_returns_multiple_errors_if_multiple_fields_are_invalid(
) {
let errors = GlobalSettingsBuilder::new()
.with_raw_bootnodes_addresses(vec!["/ip4//tcp/45421", "//10.42.153.10/tcp/43111"])
.with_local_ip("invalid")
.build()
.unwrap_err();
assert_eq!(errors.len(), 3);
assert_eq!(
assert_eq!(errors.len(), 3);
assert_eq!(
errors.first().unwrap().to_string(),
"global_settings.bootnodes_addresses[0]: '/ip4//tcp/45421' failed to parse: invalid IPv4 address syntax"
);
assert_eq!(
assert_eq!(
errors.get(1).unwrap().to_string(),
"global_settings.bootnodes_addresses[1]: '//10.42.153.10/tcp/43111' unknown protocol string: "
);
assert_eq!(
errors.get(2).unwrap().to_string(),
"global_settings.local_ip: invalid IP address syntax"
);
}
assert_eq!(
errors.get(2).unwrap().to_string(),
"global_settings.local_ip: invalid IP address syntax"
);
}
}
@@ -7,131 +7,116 @@ use crate::shared::{macros::states, types::ParaId};
/// HRMP channel configuration, with fine-grained configuration options.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct HrmpChannelConfig {
sender: ParaId,
recipient: ParaId,
max_capacity: u32,
max_message_size: u32,
sender: ParaId,
recipient: ParaId,
max_capacity: u32,
max_message_size: u32,
}
impl HrmpChannelConfig {
/// The sending parachain ID.
pub fn sender(&self) -> ParaId {
self.sender
}
/// The sending parachain ID.
pub fn sender(&self) -> ParaId {
self.sender
}
/// The receiving parachain ID.
pub fn recipient(&self) -> ParaId {
self.recipient
}
/// The receiving parachain ID.
pub fn recipient(&self) -> ParaId {
self.recipient
}
/// The maximum capacity of messages in the channel.
pub fn max_capacity(&self) -> u32 {
self.max_capacity
}
/// The maximum capacity of messages in the channel.
pub fn max_capacity(&self) -> u32 {
self.max_capacity
}
/// The maximum size of a message in the channel.
pub fn max_message_size(&self) -> u32 {
self.max_message_size
}
/// The maximum size of a message in the channel.
pub fn max_message_size(&self) -> u32 {
self.max_message_size
}
}
states! {
Initial,
WithSender,
WithRecipient
Initial,
WithSender,
WithRecipient
}
/// HRMP channel configuration builder, used to build an [`HrmpChannelConfig`] declaratively with fields validation.
pub struct HrmpChannelConfigBuilder<State> {
config: HrmpChannelConfig,
_state: PhantomData<State>,
config: HrmpChannelConfig,
_state: PhantomData<State>,
}
impl Default for HrmpChannelConfigBuilder<Initial> {
fn default() -> Self {
Self {
config: HrmpChannelConfig {
sender: 0,
recipient: 0,
max_capacity: 8,
max_message_size: 512,
},
_state: PhantomData,
}
}
fn default() -> Self {
Self {
config: HrmpChannelConfig {
sender: 0,
recipient: 0,
max_capacity: 8,
max_message_size: 512,
},
_state: PhantomData,
}
}
}
impl<A> HrmpChannelConfigBuilder<A> {
fn transition<B>(&self, config: HrmpChannelConfig) -> HrmpChannelConfigBuilder<B> {
HrmpChannelConfigBuilder {
config,
_state: PhantomData,
}
}
fn transition<B>(&self, config: HrmpChannelConfig) -> HrmpChannelConfigBuilder<B> {
HrmpChannelConfigBuilder { config, _state: PhantomData }
}
}
impl HrmpChannelConfigBuilder<Initial> {
pub fn new() -> Self {
Self::default()
}
pub fn new() -> Self {
Self::default()
}
/// Set the sending parachain ID.
pub fn with_sender(self, sender: ParaId) -> HrmpChannelConfigBuilder<WithSender> {
self.transition(HrmpChannelConfig {
sender,
..self.config
})
}
/// Set the sending parachain ID.
pub fn with_sender(self, sender: ParaId) -> HrmpChannelConfigBuilder<WithSender> {
self.transition(HrmpChannelConfig { sender, ..self.config })
}
}
impl HrmpChannelConfigBuilder<WithSender> {
/// Set the receiving parachain ID.
pub fn with_recipient(self, recipient: ParaId) -> HrmpChannelConfigBuilder<WithRecipient> {
self.transition(HrmpChannelConfig {
recipient,
..self.config
})
}
/// Set the receiving parachain ID.
pub fn with_recipient(self, recipient: ParaId) -> HrmpChannelConfigBuilder<WithRecipient> {
self.transition(HrmpChannelConfig { recipient, ..self.config })
}
}
impl HrmpChannelConfigBuilder<WithRecipient> {
/// Set the max capacity of messages in the channel.
pub fn with_max_capacity(self, max_capacity: u32) -> Self {
self.transition(HrmpChannelConfig {
max_capacity,
..self.config
})
}
/// Set the max capacity of messages in the channel.
pub fn with_max_capacity(self, max_capacity: u32) -> Self {
self.transition(HrmpChannelConfig { max_capacity, ..self.config })
}
/// Set the maximum size of a message in the channel.
pub fn with_max_message_size(self, max_message_size: u32) -> Self {
self.transition(HrmpChannelConfig {
max_message_size,
..self.config
})
}
/// Set the maximum size of a message in the channel.
pub fn with_max_message_size(self, max_message_size: u32) -> Self {
self.transition(HrmpChannelConfig { max_message_size, ..self.config })
}
pub fn build(self) -> HrmpChannelConfig {
self.config
}
pub fn build(self) -> HrmpChannelConfig {
self.config
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::*;
#[test]
fn hrmp_channel_config_builder_should_build_a_new_hrmp_channel_config_correctly() {
let hrmp_channel_config = HrmpChannelConfigBuilder::new()
.with_sender(1000)
.with_recipient(2000)
.with_max_capacity(50)
.with_max_message_size(100)
.build();
#[test]
fn hrmp_channel_config_builder_should_build_a_new_hrmp_channel_config_correctly() {
let hrmp_channel_config = HrmpChannelConfigBuilder::new()
.with_sender(1000)
.with_recipient(2000)
.with_max_capacity(50)
.with_max_message_size(100)
.build();
assert_eq!(hrmp_channel_config.sender(), 1000);
assert_eq!(hrmp_channel_config.recipient(), 2000);
assert_eq!(hrmp_channel_config.max_capacity(), 50);
assert_eq!(hrmp_channel_config.max_message_size(), 100);
}
assert_eq!(hrmp_channel_config.sender(), 1000);
assert_eq!(hrmp_channel_config.recipient(), 2000);
assert_eq!(hrmp_channel_config.max_capacity(), 50);
assert_eq!(hrmp_channel_config.max_message_size(), 100);
}
}
@@ -93,7 +93,7 @@ pub use relaychain::{RelaychainConfig, RelaychainConfigBuilder};
// re-export shared
pub use shared::{node::NodeConfig, types};
pub use teyrchain::{
states as para_states, RegistrationStrategy, TeyrchainConfig, TeyrchainConfigBuilder,
states as para_states, RegistrationStrategy, TeyrchainConfig, TeyrchainConfigBuilder,
};
// Backward compatibility aliases for external crates that use Polkadot SDK terminology
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -3,114 +3,114 @@ use super::types::{ParaId, Port};
/// An error at the configuration level.
#[derive(thiserror::Error, Debug)]
pub enum ConfigError {
#[error("relaychain.{0}")]
Relaychain(anyhow::Error),
#[error("relaychain.{0}")]
Relaychain(anyhow::Error),
#[error("teyrchain[{0}].{1}")]
Teyrchain(ParaId, anyhow::Error),
#[error("teyrchain[{0}].{1}")]
Teyrchain(ParaId, anyhow::Error),
#[error("global_settings.{0}")]
GlobalSettings(anyhow::Error),
#[error("global_settings.{0}")]
GlobalSettings(anyhow::Error),
#[error("nodes['{0}'].{1}")]
Node(String, anyhow::Error),
#[error("nodes['{0}'].{1}")]
Node(String, anyhow::Error),
#[error("collators['{0}'].{1}")]
Collator(String, anyhow::Error),
#[error("collators['{0}'].{1}")]
Collator(String, anyhow::Error),
}
/// An error at the field level.
#[derive(thiserror::Error, Debug)]
pub enum FieldError {
#[error("name: {0}")]
Name(anyhow::Error),
#[error("name: {0}")]
Name(anyhow::Error),
#[error("chain: {0}")]
Chain(anyhow::Error),
#[error("chain: {0}")]
Chain(anyhow::Error),
#[error("image: {0}")]
Image(anyhow::Error),
#[error("image: {0}")]
Image(anyhow::Error),
#[error("default_image: {0}")]
DefaultImage(anyhow::Error),
#[error("default_image: {0}")]
DefaultImage(anyhow::Error),
#[error("command: {0}")]
Command(anyhow::Error),
#[error("command: {0}")]
Command(anyhow::Error),
#[error("default_command: {0}")]
DefaultCommand(anyhow::Error),
#[error("default_command: {0}")]
DefaultCommand(anyhow::Error),
#[error("bootnodes_addresses[{0}]: '{1}' {2}")]
BootnodesAddress(usize, String, anyhow::Error),
#[error("bootnodes_addresses[{0}]: '{1}' {2}")]
BootnodesAddress(usize, String, anyhow::Error),
#[error("genesis_wasm_generator: {0}")]
GenesisWasmGenerator(anyhow::Error),
#[error("genesis_wasm_generator: {0}")]
GenesisWasmGenerator(anyhow::Error),
#[error("genesis_state_generator: {0}")]
GenesisStateGenerator(anyhow::Error),
#[error("genesis_state_generator: {0}")]
GenesisStateGenerator(anyhow::Error),
#[error("local_ip: {0}")]
LocalIp(anyhow::Error),
#[error("local_ip: {0}")]
LocalIp(anyhow::Error),
#[error("default_resources.{0}")]
DefaultResources(anyhow::Error),
#[error("default_resources.{0}")]
DefaultResources(anyhow::Error),
#[error("resources.{0}")]
Resources(anyhow::Error),
#[error("resources.{0}")]
Resources(anyhow::Error),
#[error("request_memory: {0}")]
RequestMemory(anyhow::Error),
#[error("request_memory: {0}")]
RequestMemory(anyhow::Error),
#[error("request_cpu: {0}")]
RequestCpu(anyhow::Error),
#[error("request_cpu: {0}")]
RequestCpu(anyhow::Error),
#[error("limit_memory: {0}")]
LimitMemory(anyhow::Error),
#[error("limit_memory: {0}")]
LimitMemory(anyhow::Error),
#[error("limit_cpu: {0}")]
LimitCpu(anyhow::Error),
#[error("limit_cpu: {0}")]
LimitCpu(anyhow::Error),
#[error("ws_port: {0}")]
WsPort(anyhow::Error),
#[error("ws_port: {0}")]
WsPort(anyhow::Error),
#[error("rpc_port: {0}")]
RpcPort(anyhow::Error),
#[error("rpc_port: {0}")]
RpcPort(anyhow::Error),
#[error("prometheus_port: {0}")]
PrometheusPort(anyhow::Error),
#[error("prometheus_port: {0}")]
PrometheusPort(anyhow::Error),
#[error("p2p_port: {0}")]
P2pPort(anyhow::Error),
#[error("p2p_port: {0}")]
P2pPort(anyhow::Error),
#[error("session_key: {0}")]
SessionKey(anyhow::Error),
#[error("session_key: {0}")]
SessionKey(anyhow::Error),
#[error("registration_strategy: {0}")]
RegistrationStrategy(anyhow::Error),
#[error("registration_strategy: {0}")]
RegistrationStrategy(anyhow::Error),
}
/// A conversion error for shared types across fields.
#[derive(thiserror::Error, Debug, Clone)]
pub enum ConversionError {
#[error("'{0}' shouldn't contains whitespace")]
ContainsWhitespaces(String),
#[error("'{0}' shouldn't contains whitespace")]
ContainsWhitespaces(String),
#[error("'{}' doesn't match regex '{}'", .value, .regex)]
DoesntMatchRegex { value: String, regex: String },
#[error("'{}' doesn't match regex '{}'", .value, .regex)]
DoesntMatchRegex { value: String, regex: String },
#[error("can't be empty")]
CantBeEmpty,
#[error("can't be empty")]
CantBeEmpty,
#[error("deserialize error")]
DeserializeError(String),
#[error("deserialize error")]
DeserializeError(String),
}
/// A validation error for shared types across fields.
#[derive(thiserror::Error, Debug, Clone)]
pub enum ValidationError {
#[error("'{0}' is already used across config")]
PortAlreadyUsed(Port),
#[error("'{0}' is already used across config")]
PortAlreadyUsed(Port),
#[error("can't be empty")]
CantBeEmpty(),
#[error("can't be empty")]
CantBeEmpty(),
}
@@ -4,28 +4,28 @@ use support::constants::{BORROWABLE, THIS_IS_A_BUG};
use tracing::warn;
use super::{
errors::ValidationError,
types::{ParaId, Port, ValidationContext},
errors::ValidationError,
types::{ParaId, Port, ValidationContext},
};
pub fn merge_errors(errors: Vec<anyhow::Error>, new_error: anyhow::Error) -> Vec<anyhow::Error> {
let mut errors = errors;
errors.push(new_error);
let mut errors = errors;
errors.push(new_error);
errors
errors
}
pub fn merge_errors_vecs(
errors: Vec<anyhow::Error>,
new_errors: Vec<anyhow::Error>,
errors: Vec<anyhow::Error>,
new_errors: Vec<anyhow::Error>,
) -> Vec<anyhow::Error> {
let mut errors = errors;
let mut errors = errors;
for new_error in new_errors.into_iter() {
errors.push(new_error);
}
for new_error in new_errors.into_iter() {
errors.push(new_error);
}
errors
errors
}
/// Generates a unique name from a base name and the names already present in a
@@ -34,14 +34,13 @@ pub fn merge_errors_vecs(
/// Uses [`generate_unique_node_name_from_names()`] internally to ensure uniqueness.
/// Logs a warning if the generated name differs from the original due to duplicates.
pub fn generate_unique_node_name(
node_name: impl Into<String>,
validation_context: Rc<RefCell<ValidationContext>>,
node_name: impl Into<String>,
validation_context: Rc<RefCell<ValidationContext>>,
) -> String {
let mut context = validation_context
.try_borrow_mut()
.expect(&format!("{BORROWABLE}, {THIS_IS_A_BUG}"));
let mut context =
validation_context.try_borrow_mut().expect(&format!("{BORROWABLE}, {THIS_IS_A_BUG}"));
generate_unique_node_name_from_names(node_name, &mut context.used_nodes_names)
generate_unique_node_name_from_names(node_name, &mut context.used_nodes_names)
}
/// Returns `node_name` if it is not already in `names`.
@@ -49,70 +48,68 @@ pub fn generate_unique_node_name(
/// Otherwise, appends an incrementing `-{counter}` suffix until a unique name is found,
/// then returns it. Logs a warning when a duplicate is detected.
pub fn generate_unique_node_name_from_names(
node_name: impl Into<String>,
names: &mut HashSet<String>,
node_name: impl Into<String>,
names: &mut HashSet<String>,
) -> String {
let node_name = node_name.into();
let node_name = node_name.into();
if names.insert(node_name.clone()) {
return node_name;
}
if names.insert(node_name.clone()) {
return node_name;
}
let mut counter = 1;
let mut candidate = node_name.clone();
while names.contains(&candidate) {
candidate = format!("{node_name}-{counter}");
counter += 1;
}
let mut counter = 1;
let mut candidate = node_name.clone();
while names.contains(&candidate) {
candidate = format!("{node_name}-{counter}");
counter += 1;
}
warn!(
original = %node_name,
adjusted = %candidate,
"Duplicate node name detected."
);
warn!(
original = %node_name,
adjusted = %candidate,
"Duplicate node name detected."
);
names.insert(candidate.clone());
candidate
names.insert(candidate.clone());
candidate
}
pub fn ensure_value_is_not_empty(value: &str) -> Result<(), anyhow::Error> {
if value.is_empty() {
Err(ValidationError::CantBeEmpty().into())
} else {
Ok(())
}
if value.is_empty() {
Err(ValidationError::CantBeEmpty().into())
} else {
Ok(())
}
}
pub fn ensure_port_unique(
port: Port,
validation_context: Rc<RefCell<ValidationContext>>,
port: Port,
validation_context: Rc<RefCell<ValidationContext>>,
) -> Result<(), anyhow::Error> {
let mut context = validation_context
.try_borrow_mut()
.expect(&format!("{BORROWABLE}, {THIS_IS_A_BUG}"));
let mut context =
validation_context.try_borrow_mut().expect(&format!("{BORROWABLE}, {THIS_IS_A_BUG}"));
if !context.used_ports.contains(&port) {
context.used_ports.push(port);
return Ok(());
}
if !context.used_ports.contains(&port) {
context.used_ports.push(port);
return Ok(());
}
Err(ValidationError::PortAlreadyUsed(port).into())
Err(ValidationError::PortAlreadyUsed(port).into())
}
pub fn generate_unique_para_id(
para_id: ParaId,
validation_context: Rc<RefCell<ValidationContext>>,
para_id: ParaId,
validation_context: Rc<RefCell<ValidationContext>>,
) -> String {
let mut context = validation_context
.try_borrow_mut()
.expect(&format!("{BORROWABLE}, {THIS_IS_A_BUG}"));
let mut context =
validation_context.try_borrow_mut().expect(&format!("{BORROWABLE}, {THIS_IS_A_BUG}"));
if let Some(suffix) = context.used_para_ids.get_mut(&para_id) {
*suffix += 1;
format!("{para_id}-{suffix}")
} else {
// insert 0, since will be used next time.
context.used_para_ids.insert(para_id, 0);
para_id.to_string()
}
if let Some(suffix) = context.used_para_ids.get_mut(&para_id) {
*suffix += 1;
format!("{para_id}-{suffix}")
} else {
// insert 0, since will be used next time.
context.used_para_ids.insert(para_id, 0);
para_id.to_string()
}
}
File diff suppressed because it is too large Load Diff
@@ -3,15 +3,15 @@ use std::error::Error;
use lazy_static::lazy_static;
use regex::Regex;
use serde::{
de::{self},
ser::SerializeStruct,
Deserialize, Serialize,
de::{self},
ser::SerializeStruct,
Deserialize, Serialize,
};
use support::constants::{SHOULD_COMPILE, THIS_IS_A_BUG};
use super::{
errors::{ConversionError, FieldError},
helpers::merge_errors,
errors::{ConversionError, FieldError},
helpers::merge_errors,
};
/// A resource quantity used to define limits (k8s/podman only).
@@ -37,453 +37,434 @@ use super::{
pub struct ResourceQuantity(String);
impl ResourceQuantity {
pub fn as_str(&self) -> &str {
&self.0
}
pub fn as_str(&self) -> &str {
&self.0
}
}
impl TryFrom<&str> for ResourceQuantity {
type Error = ConversionError;
type Error = ConversionError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
lazy_static! {
static ref RE: Regex = Regex::new(r"^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$")
.expect(&format!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}"));
}
fn try_from(value: &str) -> Result<Self, Self::Error> {
lazy_static! {
static ref RE: Regex = Regex::new(r"^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$")
.expect(&format!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}"));
}
if !RE.is_match(value) {
return Err(ConversionError::DoesntMatchRegex {
value: value.to_string(),
regex: r"^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$".to_string(),
});
}
if !RE.is_match(value) {
return Err(ConversionError::DoesntMatchRegex {
value: value.to_string(),
regex: r"^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$".to_string(),
});
}
Ok(Self(value.to_string()))
}
Ok(Self(value.to_string()))
}
}
impl From<u64> for ResourceQuantity {
fn from(value: u64) -> Self {
Self(value.to_string())
}
fn from(value: u64) -> Self {
Self(value.to_string())
}
}
/// Resources limits used in the context of podman/k8s.
#[derive(Debug, Default, Clone, PartialEq)]
pub struct Resources {
request_memory: Option<ResourceQuantity>,
request_cpu: Option<ResourceQuantity>,
limit_memory: Option<ResourceQuantity>,
limit_cpu: Option<ResourceQuantity>,
request_memory: Option<ResourceQuantity>,
request_cpu: Option<ResourceQuantity>,
limit_memory: Option<ResourceQuantity>,
limit_cpu: Option<ResourceQuantity>,
}
#[derive(Serialize, Deserialize)]
struct ResourcesField {
memory: Option<ResourceQuantity>,
cpu: Option<ResourceQuantity>,
memory: Option<ResourceQuantity>,
cpu: Option<ResourceQuantity>,
}
impl Serialize for Resources {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut state = serializer.serialize_struct("Resources", 2)?;
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut state = serializer.serialize_struct("Resources", 2)?;
if self.request_memory.is_some() || self.request_memory.is_some() {
state.serialize_field(
"requests",
&ResourcesField {
memory: self.request_memory.clone(),
cpu: self.request_cpu.clone(),
},
)?;
} else {
state.skip_field("requests")?;
}
if self.request_memory.is_some() || self.request_memory.is_some() {
state.serialize_field(
"requests",
&ResourcesField {
memory: self.request_memory.clone(),
cpu: self.request_cpu.clone(),
},
)?;
} else {
state.skip_field("requests")?;
}
if self.limit_memory.is_some() || self.limit_memory.is_some() {
state.serialize_field(
"limits",
&ResourcesField {
memory: self.limit_memory.clone(),
cpu: self.limit_cpu.clone(),
},
)?;
} else {
state.skip_field("limits")?;
}
if self.limit_memory.is_some() || self.limit_memory.is_some() {
state.serialize_field(
"limits",
&ResourcesField { memory: self.limit_memory.clone(), cpu: self.limit_cpu.clone() },
)?;
} else {
state.skip_field("limits")?;
}
state.end()
}
state.end()
}
}
struct ResourcesVisitor;
impl<'de> de::Visitor<'de> for ResourcesVisitor {
type Value = Resources;
type Value = Resources;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a resources object")
}
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a resources object")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: de::MapAccess<'de>,
{
let mut resources: Resources = Resources::default();
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: de::MapAccess<'de>,
{
let mut resources: Resources = Resources::default();
while let Some((key, value)) = map.next_entry::<String, ResourcesField>()? {
match key.as_str() {
"requests" => {
resources.request_memory = value.memory;
resources.request_cpu = value.cpu;
},
"limits" => {
resources.limit_memory = value.memory;
resources.limit_cpu = value.cpu;
},
_ => {
return Err(de::Error::unknown_field(
&key,
&["requests", "limits", "cpu", "memory"],
))
},
}
}
Ok(resources)
}
while let Some((key, value)) = map.next_entry::<String, ResourcesField>()? {
match key.as_str() {
"requests" => {
resources.request_memory = value.memory;
resources.request_cpu = value.cpu;
},
"limits" => {
resources.limit_memory = value.memory;
resources.limit_cpu = value.cpu;
},
_ => {
return Err(de::Error::unknown_field(
&key,
&["requests", "limits", "cpu", "memory"],
))
},
}
}
Ok(resources)
}
}
impl<'de> Deserialize<'de> for Resources {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_any(ResourcesVisitor)
}
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_any(ResourcesVisitor)
}
}
impl Resources {
/// Memory limit applied to requests.
pub fn request_memory(&self) -> Option<&ResourceQuantity> {
self.request_memory.as_ref()
}
/// Memory limit applied to requests.
pub fn request_memory(&self) -> Option<&ResourceQuantity> {
self.request_memory.as_ref()
}
/// CPU limit applied to requests.
pub fn request_cpu(&self) -> Option<&ResourceQuantity> {
self.request_cpu.as_ref()
}
/// CPU limit applied to requests.
pub fn request_cpu(&self) -> Option<&ResourceQuantity> {
self.request_cpu.as_ref()
}
/// Overall memory limit applied.
pub fn limit_memory(&self) -> Option<&ResourceQuantity> {
self.limit_memory.as_ref()
}
/// Overall memory limit applied.
pub fn limit_memory(&self) -> Option<&ResourceQuantity> {
self.limit_memory.as_ref()
}
/// Overall CPU limit applied.
pub fn limit_cpu(&self) -> Option<&ResourceQuantity> {
self.limit_cpu.as_ref()
}
/// Overall CPU limit applied.
pub fn limit_cpu(&self) -> Option<&ResourceQuantity> {
self.limit_cpu.as_ref()
}
}
/// A resources builder, used to build a [`Resources`] declaratively with fields validation.
#[derive(Debug, Default)]
pub struct ResourcesBuilder {
config: Resources,
errors: Vec<anyhow::Error>,
config: Resources,
errors: Vec<anyhow::Error>,
}
impl ResourcesBuilder {
pub fn new() -> ResourcesBuilder {
Self::default()
}
pub fn new() -> ResourcesBuilder {
Self::default()
}
fn transition(config: Resources, errors: Vec<anyhow::Error>) -> Self {
Self { config, errors }
}
fn transition(config: Resources, errors: Vec<anyhow::Error>) -> Self {
Self { config, errors }
}
/// Set the requested memory for a pod. This is the minimum memory allocated for a pod.
pub fn with_request_memory<T>(self, quantity: T) -> Self
where
T: TryInto<ResourceQuantity>,
T::Error: Error + Send + Sync + 'static,
{
match quantity.try_into() {
Ok(quantity) => Self::transition(
Resources {
request_memory: Some(quantity),
..self.config
},
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::RequestMemory(error.into()).into()),
),
}
}
/// Set the requested memory for a pod. This is the minimum memory allocated for a pod.
pub fn with_request_memory<T>(self, quantity: T) -> Self
where
T: TryInto<ResourceQuantity>,
T::Error: Error + Send + Sync + 'static,
{
match quantity.try_into() {
Ok(quantity) => Self::transition(
Resources { request_memory: Some(quantity), ..self.config },
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::RequestMemory(error.into()).into()),
),
}
}
/// Set the requested CPU limit for a pod. This is the minimum CPU allocated for a pod.
pub fn with_request_cpu<T>(self, quantity: T) -> Self
where
T: TryInto<ResourceQuantity>,
T::Error: Error + Send + Sync + 'static,
{
match quantity.try_into() {
Ok(quantity) => Self::transition(
Resources {
request_cpu: Some(quantity),
..self.config
},
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::RequestCpu(error.into()).into()),
),
}
}
/// Set the requested CPU limit for a pod. This is the minimum CPU allocated for a pod.
pub fn with_request_cpu<T>(self, quantity: T) -> Self
where
T: TryInto<ResourceQuantity>,
T::Error: Error + Send + Sync + 'static,
{
match quantity.try_into() {
Ok(quantity) => Self::transition(
Resources { request_cpu: Some(quantity), ..self.config },
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::RequestCpu(error.into()).into()),
),
}
}
/// Set the overall memory limit for a pod. This is the maximum memory threshold for a pod.
pub fn with_limit_memory<T>(self, quantity: T) -> Self
where
T: TryInto<ResourceQuantity>,
T::Error: Error + Send + Sync + 'static,
{
match quantity.try_into() {
Ok(quantity) => Self::transition(
Resources {
limit_memory: Some(quantity),
..self.config
},
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::LimitMemory(error.into()).into()),
),
}
}
/// Set the overall memory limit for a pod. This is the maximum memory threshold for a pod.
pub fn with_limit_memory<T>(self, quantity: T) -> Self
where
T: TryInto<ResourceQuantity>,
T::Error: Error + Send + Sync + 'static,
{
match quantity.try_into() {
Ok(quantity) => Self::transition(
Resources { limit_memory: Some(quantity), ..self.config },
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::LimitMemory(error.into()).into()),
),
}
}
/// Set the overall CPU limit for a pod. This is the maximum CPU threshold for a pod.
pub fn with_limit_cpu<T>(self, quantity: T) -> Self
where
T: TryInto<ResourceQuantity>,
T::Error: Error + Send + Sync + 'static,
{
match quantity.try_into() {
Ok(quantity) => Self::transition(
Resources {
limit_cpu: Some(quantity),
..self.config
},
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::LimitCpu(error.into()).into()),
),
}
}
/// Set the overall CPU limit for a pod. This is the maximum CPU threshold for a pod.
pub fn with_limit_cpu<T>(self, quantity: T) -> Self
where
T: TryInto<ResourceQuantity>,
T::Error: Error + Send + Sync + 'static,
{
match quantity.try_into() {
Ok(quantity) => Self::transition(
Resources { limit_cpu: Some(quantity), ..self.config },
self.errors,
),
Err(error) => Self::transition(
self.config,
merge_errors(self.errors, FieldError::LimitCpu(error.into()).into()),
),
}
}
/// Seals the builder and returns a [`Resources`] if there are no validation errors, else returns errors.
pub fn build(self) -> Result<Resources, Vec<anyhow::Error>> {
if !self.errors.is_empty() {
return Err(self.errors);
}
/// Seals the builder and returns a [`Resources`] if there are no validation errors, else returns errors.
pub fn build(self) -> Result<Resources, Vec<anyhow::Error>> {
if !self.errors.is_empty() {
return Err(self.errors);
}
Ok(self.config)
}
Ok(self.config)
}
}
#[cfg(test)]
#[allow(non_snake_case)]
mod tests {
use super::*;
use crate::NetworkConfig;
use super::*;
use crate::NetworkConfig;
macro_rules! impl_resources_quantity_unit_test {
($val:literal) => {{
let resources = ResourcesBuilder::new()
.with_request_memory($val)
.build()
.unwrap();
macro_rules! impl_resources_quantity_unit_test {
($val:literal) => {{
let resources = ResourcesBuilder::new().with_request_memory($val).build().unwrap();
assert_eq!(resources.request_memory().unwrap().as_str(), $val);
assert_eq!(resources.request_cpu(), None);
assert_eq!(resources.limit_cpu(), None);
assert_eq!(resources.limit_memory(), None);
}};
}
assert_eq!(resources.request_memory().unwrap().as_str(), $val);
assert_eq!(resources.request_cpu(), None);
assert_eq!(resources.limit_cpu(), None);
assert_eq!(resources.limit_memory(), None);
}};
}
#[test]
fn converting_a_string_a_resource_quantity_without_unit_should_succeeds() {
impl_resources_quantity_unit_test!("1000");
}
#[test]
fn converting_a_string_a_resource_quantity_without_unit_should_succeeds() {
impl_resources_quantity_unit_test!("1000");
}
#[test]
fn converting_a_str_with_m_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("100m");
}
#[test]
fn converting_a_str_with_m_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("100m");
}
#[test]
fn converting_a_str_with_K_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("50K");
}
#[test]
fn converting_a_str_with_K_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("50K");
}
#[test]
fn converting_a_str_with_M_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("100M");
}
#[test]
fn converting_a_str_with_M_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("100M");
}
#[test]
fn converting_a_str_with_G_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("1G");
}
#[test]
fn converting_a_str_with_G_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("1G");
}
#[test]
fn converting_a_str_with_T_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.01T");
}
#[test]
fn converting_a_str_with_T_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.01T");
}
#[test]
fn converting_a_str_with_P_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.00001P");
}
#[test]
fn converting_a_str_with_P_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.00001P");
}
#[test]
fn converting_a_str_with_E_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.000000001E");
}
#[test]
fn converting_a_str_with_E_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.000000001E");
}
#[test]
fn converting_a_str_with_Ki_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("50Ki");
}
#[test]
fn converting_a_str_with_Ki_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("50Ki");
}
#[test]
fn converting_a_str_with_Mi_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("100Mi");
}
#[test]
fn converting_a_str_with_Mi_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("100Mi");
}
#[test]
fn converting_a_str_with_Gi_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("1Gi");
}
#[test]
fn converting_a_str_with_Gi_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("1Gi");
}
#[test]
fn converting_a_str_with_Ti_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.01Ti");
}
#[test]
fn converting_a_str_with_Ti_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.01Ti");
}
#[test]
fn converting_a_str_with_Pi_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.00001Pi");
}
#[test]
fn converting_a_str_with_Pi_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.00001Pi");
}
#[test]
fn converting_a_str_with_Ei_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.000000001Ei");
}
#[test]
fn converting_a_str_with_Ei_unit_into_a_resource_quantity_should_succeeds() {
impl_resources_quantity_unit_test!("0.000000001Ei");
}
#[test]
fn resources_config_builder_should_succeeds_and_returns_a_resources_config() {
let resources = ResourcesBuilder::new()
.with_request_memory("200M")
.with_request_cpu("1G")
.with_limit_cpu("500M")
.with_limit_memory("2G")
.build()
.unwrap();
#[test]
fn resources_config_builder_should_succeeds_and_returns_a_resources_config() {
let resources = ResourcesBuilder::new()
.with_request_memory("200M")
.with_request_cpu("1G")
.with_limit_cpu("500M")
.with_limit_memory("2G")
.build()
.unwrap();
assert_eq!(resources.request_memory().unwrap().as_str(), "200M");
assert_eq!(resources.request_cpu().unwrap().as_str(), "1G");
assert_eq!(resources.limit_cpu().unwrap().as_str(), "500M");
assert_eq!(resources.limit_memory().unwrap().as_str(), "2G");
}
assert_eq!(resources.request_memory().unwrap().as_str(), "200M");
assert_eq!(resources.request_cpu().unwrap().as_str(), "1G");
assert_eq!(resources.limit_cpu().unwrap().as_str(), "500M");
assert_eq!(resources.limit_memory().unwrap().as_str(), "2G");
}
#[test]
fn resources_config_toml_import_should_succeeds_and_returns_a_resources_config() {
let load_from_toml =
NetworkConfig::load_from_toml("./testing/snapshots/0001-big-network.toml").unwrap();
#[test]
fn resources_config_toml_import_should_succeeds_and_returns_a_resources_config() {
let load_from_toml =
NetworkConfig::load_from_toml("./testing/snapshots/0001-big-network.toml").unwrap();
let resources = load_from_toml.relaychain().default_resources().unwrap();
assert_eq!(resources.request_memory().unwrap().as_str(), "500M");
assert_eq!(resources.request_cpu().unwrap().as_str(), "100000");
assert_eq!(resources.limit_cpu().unwrap().as_str(), "10Gi");
assert_eq!(resources.limit_memory().unwrap().as_str(), "4000M");
}
let resources = load_from_toml.relaychain().default_resources().unwrap();
assert_eq!(resources.request_memory().unwrap().as_str(), "500M");
assert_eq!(resources.request_cpu().unwrap().as_str(), "100000");
assert_eq!(resources.limit_cpu().unwrap().as_str(), "10Gi");
assert_eq!(resources.limit_memory().unwrap().as_str(), "4000M");
}
#[test]
fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_request_memory()
{
let resources_builder = ResourcesBuilder::new().with_request_memory("invalid");
#[test]
fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_request_memory()
{
let resources_builder = ResourcesBuilder::new().with_request_memory("invalid");
let errors = resources_builder.build().err().unwrap();
let errors = resources_builder.build().err().unwrap();
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
r"request_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
r"request_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
#[test]
fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_request_cpu() {
let resources_builder = ResourcesBuilder::new().with_request_cpu("invalid");
#[test]
fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_request_cpu() {
let resources_builder = ResourcesBuilder::new().with_request_cpu("invalid");
let errors = resources_builder.build().err().unwrap();
let errors = resources_builder.build().err().unwrap();
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
r"request_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
r"request_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
#[test]
fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_limit_memory() {
let resources_builder = ResourcesBuilder::new().with_limit_memory("invalid");
#[test]
fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_limit_memory() {
let resources_builder = ResourcesBuilder::new().with_limit_memory("invalid");
let errors = resources_builder.build().err().unwrap();
let errors = resources_builder.build().err().unwrap();
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
r"limit_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
r"limit_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
#[test]
fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_limit_cpu() {
let resources_builder = ResourcesBuilder::new().with_limit_cpu("invalid");
#[test]
fn resources_config_builder_should_fails_and_returns_an_error_if_couldnt_parse_limit_cpu() {
let resources_builder = ResourcesBuilder::new().with_limit_cpu("invalid");
let errors = resources_builder.build().err().unwrap();
let errors = resources_builder.build().err().unwrap();
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
r"limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
assert_eq!(errors.len(), 1);
assert_eq!(
errors.first().unwrap().to_string(),
r"limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
#[test]
fn resources_config_builder_should_fails_and_returns_multiple_error_if_couldnt_parse_multiple_fields(
) {
let resources_builder = ResourcesBuilder::new()
.with_limit_cpu("invalid")
.with_request_memory("invalid");
#[test]
fn resources_config_builder_should_fails_and_returns_multiple_error_if_couldnt_parse_multiple_fields(
) {
let resources_builder =
ResourcesBuilder::new().with_limit_cpu("invalid").with_request_memory("invalid");
let errors = resources_builder.build().err().unwrap();
let errors = resources_builder.build().err().unwrap();
assert_eq!(errors.len(), 2);
assert_eq!(
errors.first().unwrap().to_string(),
r"limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
assert_eq!(
errors.get(1).unwrap().to_string(),
r"request_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
assert_eq!(errors.len(), 2);
assert_eq!(
errors.first().unwrap().to_string(),
r"limit_cpu: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
assert_eq!(
errors.get(1).unwrap().to_string(),
r"request_memory: 'invalid' doesn't match regex '^\d+(.\d+)?(m|K|M|G|T|P|E|Ki|Mi|Gi|Ti|Pi|Ei)?$'"
);
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -5,61 +5,61 @@ use support::constants::ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS;
use crate::types::{Chain, Command, Duration};
pub(crate) fn is_true(value: &bool) -> bool {
*value
*value
}
pub(crate) fn is_false(value: &bool) -> bool {
!(*value)
!(*value)
}
pub(crate) fn default_as_true() -> bool {
true
true
}
pub(crate) fn default_as_false() -> bool {
false
false
}
pub(crate) fn default_initial_balance() -> crate::types::U128 {
2_000_000_000_000.into()
2_000_000_000_000.into()
}
/// Default timeout for spawning a node (10mins)
pub(crate) fn default_node_spawn_timeout() -> Duration {
env::var(ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS)
.ok()
.and_then(|s| s.parse::<u32>().ok())
.unwrap_or(600)
env::var(ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS)
.ok()
.and_then(|s| s.parse::<u32>().ok())
.unwrap_or(600)
}
/// Default timeout for spawning the whole network (1hr)
pub(crate) fn default_timeout() -> Duration {
3600
3600
}
pub(crate) fn default_command_polkadot() -> Option<Command> {
TryInto::<Command>::try_into("polkadot").ok()
TryInto::<Command>::try_into("polkadot").ok()
}
pub(crate) fn default_relaychain_chain() -> Chain {
TryInto::<Chain>::try_into("rococo-local").expect("'rococo-local' should be a valid chain")
TryInto::<Chain>::try_into("rococo-local").expect("'rococo-local' should be a valid chain")
}
#[cfg(test)]
mod tests {
use super::*;
use super::*;
#[test]
fn default_node_spawn_timeout_works_before_and_after_env_is_set() {
// The default should be 600 seconds if the env var is not set
assert_eq!(default_node_spawn_timeout(), 600);
#[test]
fn default_node_spawn_timeout_works_before_and_after_env_is_set() {
// The default should be 600 seconds if the env var is not set
assert_eq!(default_node_spawn_timeout(), 600);
// If env var is set to a valid number, it should return that number
env::set_var(ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS, "123");
assert_eq!(default_node_spawn_timeout(), 123);
// If env var is set to a valid number, it should return that number
env::set_var(ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS, "123");
assert_eq!(default_node_spawn_timeout(), 123);
// If env var is set to a NOT valid number, it should return 600
env::set_var(ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS, "NOT_A_NUMBER");
assert_eq!(default_node_spawn_timeout(), 600);
}
// If env var is set to a NOT valid number, it should return 600
env::set_var(ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS, "NOT_A_NUMBER");
assert_eq!(default_node_spawn_timeout(), 600);
}
}
@@ -7,25 +7,25 @@ use crate::generators;
#[derive(Debug, thiserror::Error)]
pub enum OrchestratorError {
// TODO: improve invalid config reporting
#[error("Invalid network configuration: {0}")]
InvalidConfig(String),
#[error("Invalid network config to use provider {0}: {1}")]
InvalidConfigForProvider(String, String),
#[error("Invalid configuration for node: {0}, field: {1}")]
InvalidNodeConfig(String, String),
#[error("Invariant not fulfilled {0}")]
InvariantError(&'static str),
#[error("Global network spawn timeout: {0} secs")]
GlobalTimeOut(u32),
#[error("Generator error: {0}")]
GeneratorError(#[from] generators::errors::GeneratorError),
#[error("Provider error")]
ProviderError(#[from] ProviderError),
#[error("FileSystem error")]
FileSystemError(#[from] FileSystemError),
#[error("Serialization error")]
SerializationError(#[from] serde_json::Error),
#[error(transparent)]
SpawnerError(#[from] anyhow::Error),
// TODO: improve invalid config reporting
#[error("Invalid network configuration: {0}")]
InvalidConfig(String),
#[error("Invalid network config to use provider {0}: {1}")]
InvalidConfigForProvider(String, String),
#[error("Invalid configuration for node: {0}, field: {1}")]
InvalidNodeConfig(String, String),
#[error("Invariant not fulfilled {0}")]
InvariantError(&'static str),
#[error("Global network spawn timeout: {0} secs")]
GlobalTimeOut(u32),
#[error("Generator error: {0}")]
GeneratorError(#[from] generators::errors::GeneratorError),
#[error("Provider error")]
ProviderError(#[from] ProviderError),
#[error("FileSystem error")]
FileSystemError(#[from] FileSystemError),
#[error("Serialization error")]
SerializationError(#[from] serde_json::Error),
#[error(transparent)]
SpawnerError(#[from] anyhow::Error),
}
@@ -13,8 +13,8 @@ mod port;
pub use bootnode_addr::generate as generate_node_bootnode_addr;
pub use command::{
generate_for_cumulus_node as generate_node_command_cumulus,
generate_for_node as generate_node_command, GenCmdOptions,
generate_for_cumulus_node as generate_node_command_cumulus,
generate_for_node as generate_node_command, GenCmdOptions,
};
pub use identity::generate as generate_node_identity;
pub use key::generate as generate_node_keys;
@@ -8,21 +8,21 @@ use configuration::types::Arg;
/// - `-:insecure-validator` -> removes `--insecure-validator` (normalized)
/// - `-:--prometheus-port` -> removes `--prometheus-port`
pub fn parse_removal_args(args: &[Arg]) -> Vec<String> {
args.iter()
.filter_map(|arg| match arg {
Arg::Flag(flag) if flag.starts_with("-:") => {
let mut flag_to_exclude = flag[2..].to_string();
args.iter()
.filter_map(|arg| match arg {
Arg::Flag(flag) if flag.starts_with("-:") => {
let mut flag_to_exclude = flag[2..].to_string();
// Normalize flag format - ensure it starts with --
if !flag_to_exclude.starts_with("--") {
flag_to_exclude = format!("--{flag_to_exclude}");
}
// Normalize flag format - ensure it starts with --
if !flag_to_exclude.starts_with("--") {
flag_to_exclude = format!("--{flag_to_exclude}");
}
Some(flag_to_exclude)
},
_ => None,
})
.collect()
Some(flag_to_exclude)
},
_ => None,
})
.collect()
}
/// Apply arg removals to a vector of string arguments.
@@ -35,104 +35,104 @@ pub fn parse_removal_args(args: &[Arg]) -> Vec<String> {
/// # Returns
/// Filtered vector with specified args removed
pub fn apply_arg_removals(args: Vec<String>, removals: &[String]) -> Vec<String> {
if removals.is_empty() {
return args;
}
if removals.is_empty() {
return args;
}
let mut res = Vec::new();
let mut skip_next = false;
let mut res = Vec::new();
let mut skip_next = false;
for (i, arg) in args.iter().enumerate() {
if skip_next {
skip_next = false;
continue;
}
for (i, arg) in args.iter().enumerate() {
if skip_next {
skip_next = false;
continue;
}
let should_remove = removals
.iter()
.any(|removal| arg == removal || arg.starts_with(&format!("{removal}=")));
let should_remove = removals
.iter()
.any(|removal| arg == removal || arg.starts_with(&format!("{removal}=")));
if should_remove {
// Only skip next if this looks like an option (starts with --) and next arg doesn't start with --
if !arg.contains("=") && i + 1 < args.len() {
let next_arg = &args[i + 1];
if !next_arg.starts_with("-") {
skip_next = true;
}
}
continue;
}
if should_remove {
// Only skip next if this looks like an option (starts with --) and next arg doesn't start with --
if !arg.contains("=") && i + 1 < args.len() {
let next_arg = &args[i + 1];
if !next_arg.starts_with("-") {
skip_next = true;
}
}
continue;
}
res.push(arg.clone());
}
res.push(arg.clone());
}
res
res
}
#[cfg(test)]
mod tests {
use super::*;
use super::*;
#[test]
fn test_parse_removal_args() {
let args = vec![
Arg::Flag("-:--insecure-validator-i-know-what-i-do".to_string()),
Arg::Flag("--validator".to_string()),
Arg::Flag("-:--no-telemetry".to_string()),
];
#[test]
fn test_parse_removal_args() {
let args = vec![
Arg::Flag("-:--insecure-validator-i-know-what-i-do".to_string()),
Arg::Flag("--validator".to_string()),
Arg::Flag("-:--no-telemetry".to_string()),
];
let removals = parse_removal_args(&args);
assert_eq!(removals.len(), 2);
assert!(removals.contains(&"--insecure-validator-i-know-what-i-do".to_string()));
assert!(removals.contains(&"--no-telemetry".to_string()));
}
let removals = parse_removal_args(&args);
assert_eq!(removals.len(), 2);
assert!(removals.contains(&"--insecure-validator-i-know-what-i-do".to_string()));
assert!(removals.contains(&"--no-telemetry".to_string()));
}
#[test]
fn test_apply_arg_removals_flag() {
let args = vec![
"--validator".to_string(),
"--insecure-validator-i-know-what-i-do".to_string(),
"--no-telemetry".to_string(),
];
let removals = vec!["--insecure-validator-i-know-what-i-do".to_string()];
let res = apply_arg_removals(args, &removals);
assert_eq!(res.len(), 2);
assert!(res.contains(&"--validator".to_string()));
assert!(res.contains(&"--no-telemetry".to_string()));
assert!(!res.contains(&"--insecure-validator-i-know-what-i-do".to_string()));
}
#[test]
fn test_apply_arg_removals_flag() {
let args = vec![
"--validator".to_string(),
"--insecure-validator-i-know-what-i-do".to_string(),
"--no-telemetry".to_string(),
];
let removals = vec!["--insecure-validator-i-know-what-i-do".to_string()];
let res = apply_arg_removals(args, &removals);
assert_eq!(res.len(), 2);
assert!(res.contains(&"--validator".to_string()));
assert!(res.contains(&"--no-telemetry".to_string()));
assert!(!res.contains(&"--insecure-validator-i-know-what-i-do".to_string()));
}
#[test]
fn test_apply_arg_removals_option_with_equals() {
let args = vec!["--name=alice".to_string(), "--port=30333".to_string()];
let removals = vec!["--port".to_string()];
let res = apply_arg_removals(args, &removals);
assert_eq!(res.len(), 1);
assert_eq!(res[0], "--name=alice");
}
#[test]
fn test_apply_arg_removals_option_with_equals() {
let args = vec!["--name=alice".to_string(), "--port=30333".to_string()];
let removals = vec!["--port".to_string()];
let res = apply_arg_removals(args, &removals);
assert_eq!(res.len(), 1);
assert_eq!(res[0], "--name=alice");
}
#[test]
fn test_apply_arg_removals_option_with_space() {
let args = vec![
"--name".to_string(),
"alice".to_string(),
"--port".to_string(),
"30333".to_string(),
];
let removals = vec!["--port".to_string()];
#[test]
fn test_apply_arg_removals_option_with_space() {
let args = vec![
"--name".to_string(),
"alice".to_string(),
"--port".to_string(),
"30333".to_string(),
];
let removals = vec!["--port".to_string()];
let res = apply_arg_removals(args, &removals);
assert_eq!(res.len(), 2);
assert_eq!(res[0], "--name");
assert_eq!(res[1], "alice");
}
let res = apply_arg_removals(args, &removals);
assert_eq!(res.len(), 2);
assert_eq!(res[0], "--name");
assert_eq!(res[1], "alice");
}
#[test]
fn test_apply_arg_removals_empty() {
let args = vec!["--validator".to_string()];
let removals = vec![];
#[test]
fn test_apply_arg_removals_empty() {
let args = vec!["--validator".to_string()];
let removals = vec![];
let res = apply_arg_removals(args, &removals);
assert_eq!(res, vec!["--validator".to_string()]);
}
let res = apply_arg_removals(args, &removals);
assert_eq!(res, vec!["--validator".to_string()]);
}
}
@@ -3,109 +3,94 @@ use std::{fmt::Display, net::IpAddr};
use super::errors::GeneratorError;
pub fn generate<T: AsRef<str> + Display>(
peer_id: &str,
ip: &IpAddr,
port: u16,
args: &[T],
p2p_cert: &Option<String>,
peer_id: &str,
ip: &IpAddr,
port: u16,
args: &[T],
p2p_cert: &Option<String>,
) -> Result<String, GeneratorError> {
let addr = if let Some(index) = args.iter().position(|arg| arg.as_ref().eq("--listen-addr")) {
let listen_value = args
.as_ref()
.get(index + 1)
.ok_or(GeneratorError::BootnodeAddrGeneration(
"can not generate bootnode address from args".into(),
))?
.to_string();
let addr = if let Some(index) = args.iter().position(|arg| arg.as_ref().eq("--listen-addr")) {
let listen_value = args
.as_ref()
.get(index + 1)
.ok_or(GeneratorError::BootnodeAddrGeneration(
"can not generate bootnode address from args".into(),
))?
.to_string();
let ip_str = ip.to_string();
let port_str = port.to_string();
let mut parts = listen_value.split('/').collect::<Vec<&str>>();
parts[2] = &ip_str;
parts[4] = port_str.as_str();
parts.join("/")
} else {
format!("/ip4/{ip}/tcp/{port}/ws")
};
let ip_str = ip.to_string();
let port_str = port.to_string();
let mut parts = listen_value.split('/').collect::<Vec<&str>>();
parts[2] = &ip_str;
parts[4] = port_str.as_str();
parts.join("/")
} else {
format!("/ip4/{ip}/tcp/{port}/ws")
};
let mut addr_with_peer = format!("{addr}/p2p/{peer_id}");
if let Some(p2p_cert) = p2p_cert {
addr_with_peer.push_str("/certhash/");
addr_with_peer.push_str(p2p_cert)
}
Ok(addr_with_peer)
let mut addr_with_peer = format!("{addr}/p2p/{peer_id}");
if let Some(p2p_cert) = p2p_cert {
addr_with_peer.push_str("/certhash/");
addr_with_peer.push_str(p2p_cert)
}
Ok(addr_with_peer)
}
#[cfg(test)]
mod tests {
use provider::constants::LOCALHOST;
use provider::constants::LOCALHOST;
use super::*;
#[test]
fn generate_for_alice_without_args() {
let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed
let args: Vec<&str> = vec![];
let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, &args, &None).unwrap();
assert_eq!(
&bootnode_addr,
"/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"
);
}
use super::*;
#[test]
fn generate_for_alice_without_args() {
let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed
let args: Vec<&str> = vec![];
let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, &args, &None).unwrap();
assert_eq!(
&bootnode_addr,
"/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"
);
}
#[test]
fn generate_for_alice_with_listen_addr() {
// Should override the ip/port
let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed
let args: Vec<String> = [
"--some",
"other",
"--listen-addr",
"/ip4/192.168.100.1/tcp/30333/ws",
]
.iter()
.map(|x| x.to_string())
.collect();
let bootnode_addr =
generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None).unwrap();
assert_eq!(
&bootnode_addr,
"/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"
);
}
#[test]
fn generate_for_alice_with_listen_addr() {
// Should override the ip/port
let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed
let args: Vec<String> =
["--some", "other", "--listen-addr", "/ip4/192.168.100.1/tcp/30333/ws"]
.iter()
.map(|x| x.to_string())
.collect();
let bootnode_addr =
generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None).unwrap();
assert_eq!(
&bootnode_addr,
"/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"
);
}
#[test]
fn generate_for_alice_with_listen_addr_without_value_must_fail() {
// Should override the ip/port
let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed
let args: Vec<String> = ["--some", "other", "--listen-addr"]
.iter()
.map(|x| x.to_string())
.collect();
let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None);
#[test]
fn generate_for_alice_with_listen_addr_without_value_must_fail() {
// Should override the ip/port
let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed
let args: Vec<String> =
["--some", "other", "--listen-addr"].iter().map(|x| x.to_string()).collect();
let bootnode_addr = generate(peer_id, &LOCALHOST, 5678, args.iter().as_ref(), &None);
assert!(bootnode_addr.is_err());
assert!(matches!(
bootnode_addr,
Err(GeneratorError::BootnodeAddrGeneration(_))
));
}
assert!(bootnode_addr.is_err());
assert!(matches!(bootnode_addr, Err(GeneratorError::BootnodeAddrGeneration(_))));
}
#[test]
fn generate_for_alice_withcert() {
let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed
let args: Vec<&str> = vec![];
let bootnode_addr = generate(
peer_id,
&LOCALHOST,
5678,
&args,
&Some(String::from("data")),
)
.unwrap();
assert_eq!(
#[test]
fn generate_for_alice_withcert() {
let peer_id = "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"; // from alice as seed
let args: Vec<&str> = vec![];
let bootnode_addr =
generate(peer_id, &LOCALHOST, 5678, &args, &Some(String::from("data"))).unwrap();
assert_eq!(
&bootnode_addr,
"/ip4/127.0.0.1/tcp/5678/ws/p2p/12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm/certhash/data"
);
}
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -3,22 +3,22 @@ use support::fs::FileSystemError;
#[derive(Debug, thiserror::Error)]
pub enum GeneratorError {
#[error("Generating key {0} with input {1}")]
KeyGeneration(String, String),
#[error("Generating port {0}, err {1}")]
PortGeneration(u16, String),
#[error("Chain-spec build error: {0}")]
ChainSpecGeneration(String),
#[error("Provider error: {0}")]
ProviderError(#[from] ProviderError),
#[error("FileSystem error")]
FileSystemError(#[from] FileSystemError),
#[error("Generating identity, err {0}")]
IdentityGeneration(String),
#[error("Generating bootnode address, err {0}")]
BootnodeAddrGeneration(String),
#[error("Error overriding wasm on raw chain-spec, err {0}")]
OverridingWasm(String),
#[error("Error overriding raw chain-spec, err {0}")]
OverridingRawSpec(String),
#[error("Generating key {0} with input {1}")]
KeyGeneration(String, String),
#[error("Generating port {0}, err {1}")]
PortGeneration(u16, String),
#[error("Chain-spec build error: {0}")]
ChainSpecGeneration(String),
#[error("Provider error: {0}")]
ProviderError(#[from] ProviderError),
#[error("FileSystem error")]
FileSystemError(#[from] FileSystemError),
#[error("Generating identity, err {0}")]
IdentityGeneration(String),
#[error("Generating bootnode address, err {0}")]
BootnodeAddrGeneration(String),
#[error("Error overriding wasm on raw chain-spec, err {0}")]
OverridingWasm(String),
#[error("Error overriding raw chain-spec, err {0}")]
OverridingRawSpec(String),
}
@@ -7,35 +7,29 @@ use super::errors::GeneratorError;
// Generate p2p identity for node
// return `node-key` and `peerId`
pub fn generate(node_name: &str) -> Result<(String, String), GeneratorError> {
let key = hex::encode(sha2::Sha256::digest(node_name));
let key = hex::encode(sha2::Sha256::digest(node_name));
let bytes = <[u8; 32]>::from_hex(key.clone()).map_err(|_| {
GeneratorError::IdentityGeneration("can not transform hex to [u8;32]".into())
})?;
let sk = ed25519::SecretKey::try_from_bytes(bytes)
.map_err(|_| GeneratorError::IdentityGeneration("can not create sk from bytes".into()))?;
let local_identity: Keypair = ed25519::Keypair::from(sk).into();
let local_public = local_identity.public();
let local_peer_id = local_public.to_peer_id();
let bytes = <[u8; 32]>::from_hex(key.clone()).map_err(|_| {
GeneratorError::IdentityGeneration("can not transform hex to [u8;32]".into())
})?;
let sk = ed25519::SecretKey::try_from_bytes(bytes)
.map_err(|_| GeneratorError::IdentityGeneration("can not create sk from bytes".into()))?;
let local_identity: Keypair = ed25519::Keypair::from(sk).into();
let local_public = local_identity.public();
let local_peer_id = local_public.to_peer_id();
Ok((key, local_peer_id.to_base58()))
Ok((key, local_peer_id.to_base58()))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn generate_for_alice() {
let s = "alice";
let (key, peer_id) = generate(s).unwrap();
assert_eq!(
&key,
"2bd806c97f0e00af1a1fc3328fa763a9269723c8db8fac4f93af71db186d6e90"
);
assert_eq!(
&peer_id,
"12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm"
);
}
use super::*;
#[test]
fn generate_for_alice() {
let s = "alice";
let (key, peer_id) = generate(s).unwrap();
assert_eq!(&key, "2bd806c97f0e00af1a1fc3328fa763a9269723c8db8fac4f93af71db186d6e90");
assert_eq!(&peer_id, "12D3KooWQCkBm1BYtkHpocxCwMgR8yjitEeHGx8spzcDLGt2gkBm");
}
}
@@ -1,151 +1,141 @@
use pezsp_core::{crypto::SecretStringError, ecdsa, ed25519, keccak_256, sr25519, Pair, H160, H256};
use pezsp_core::{
crypto::SecretStringError, ecdsa, ed25519, keccak_256, sr25519, Pair, H160, H256,
};
use super::errors::GeneratorError;
use crate::shared::types::{Accounts, NodeAccount};
const KEYS: [&str; 5] = ["sr", "sr_stash", "ed", "ec", "eth"];
pub fn generate_pair<T: Pair>(seed: &str) -> Result<T::Pair, SecretStringError> {
let pair = T::Pair::from_string(seed, None)?;
Ok(pair)
let pair = T::Pair::from_string(seed, None)?;
Ok(pair)
}
pub fn generate(seed: &str) -> Result<Accounts, GeneratorError> {
let mut accounts: Accounts = Default::default();
for k in KEYS {
let (address, public_key) = match k {
"sr" => {
let pair = generate_pair::<sr25519::Pair>(seed)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
(pair.public().to_string(), hex::encode(pair.public()))
},
"sr_stash" => {
let pair = generate_pair::<sr25519::Pair>(&format!("{seed}//stash"))
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
(pair.public().to_string(), hex::encode(pair.public()))
},
"ed" => {
let pair = generate_pair::<ed25519::Pair>(seed)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
(pair.public().to_string(), hex::encode(pair.public()))
},
"ec" => {
let pair = generate_pair::<ecdsa::Pair>(seed)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
(pair.public().to_string(), hex::encode(pair.public()))
},
"eth" => {
let pair = generate_pair::<ecdsa::Pair>(seed)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
let mut accounts: Accounts = Default::default();
for k in KEYS {
let (address, public_key) = match k {
"sr" => {
let pair = generate_pair::<sr25519::Pair>(seed)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
(pair.public().to_string(), hex::encode(pair.public()))
},
"sr_stash" => {
let pair = generate_pair::<sr25519::Pair>(&format!("{seed}//stash"))
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
(pair.public().to_string(), hex::encode(pair.public()))
},
"ed" => {
let pair = generate_pair::<ed25519::Pair>(seed)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
(pair.public().to_string(), hex::encode(pair.public()))
},
"ec" => {
let pair = generate_pair::<ecdsa::Pair>(seed)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
(pair.public().to_string(), hex::encode(pair.public()))
},
"eth" => {
let pair = generate_pair::<ecdsa::Pair>(seed)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?;
let decompressed = libsecp256k1::PublicKey::parse_compressed(&pair.public().0)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?
.serialize();
let mut m = [0u8; 64];
m.copy_from_slice(&decompressed[1..65]);
let account = H160::from(H256::from(keccak_256(&m)));
let decompressed = libsecp256k1::PublicKey::parse_compressed(&pair.public().0)
.map_err(|_| GeneratorError::KeyGeneration(k.into(), seed.into()))?
.serialize();
let mut m = [0u8; 64];
m.copy_from_slice(&decompressed[1..65]);
let account = H160::from(H256::from(keccak_256(&m)));
(hex::encode(account), hex::encode(account))
},
_ => unreachable!(),
};
accounts.insert(k.into(), NodeAccount::new(address, public_key));
}
Ok(accounts)
(hex::encode(account), hex::encode(account))
},
_ => unreachable!(),
};
accounts.insert(k.into(), NodeAccount::new(address, public_key));
}
Ok(accounts)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn generate_for_alice() {
use pezsp_core::crypto::Ss58Codec;
let s = "Alice";
let seed = format!("//{s}");
use super::*;
#[test]
fn generate_for_alice() {
use pezsp_core::crypto::Ss58Codec;
let s = "Alice";
let seed = format!("//{s}");
let pair = generate_pair::<sr25519::Pair>(&seed).unwrap();
assert_eq!(
"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
pair.public().to_ss58check()
);
let pair = generate_pair::<sr25519::Pair>(&seed).unwrap();
assert_eq!(
"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",
pair.public().to_ss58check()
);
let pair = generate_pair::<ecdsa::Pair>(&seed).unwrap();
assert_eq!(
"0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1",
format!("0x{}", hex::encode(pair.public()))
);
let pair = generate_pair::<ecdsa::Pair>(&seed).unwrap();
assert_eq!(
"0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1",
format!("0x{}", hex::encode(pair.public()))
);
let pair = generate_pair::<ed25519::Pair>(&seed).unwrap();
assert_eq!(
"5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu",
pair.public().to_ss58check()
);
}
let pair = generate_pair::<ed25519::Pair>(&seed).unwrap();
assert_eq!(
"5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu",
pair.public().to_ss58check()
);
}
#[test]
fn generate_for_zombie() {
use pezsp_core::crypto::Ss58Codec;
let s = "Zombie";
let seed = format!("//{s}");
#[test]
fn generate_for_zombie() {
use pezsp_core::crypto::Ss58Codec;
let s = "Zombie";
let seed = format!("//{s}");
let pair = generate_pair::<sr25519::Pair>(&seed).unwrap();
assert_eq!(
"5FTcLfwFc7ctvqp3RhbEig6UuHLHcHVRujuUm8r21wy4dAR8",
pair.public().to_ss58check()
);
}
let pair = generate_pair::<sr25519::Pair>(&seed).unwrap();
assert_eq!(
"5FTcLfwFc7ctvqp3RhbEig6UuHLHcHVRujuUm8r21wy4dAR8",
pair.public().to_ss58check()
);
}
#[test]
fn generate_pair_invalid_should_fail() {
let s = "Alice";
let seed = s.to_string();
#[test]
fn generate_pair_invalid_should_fail() {
let s = "Alice";
let seed = s.to_string();
let pair = generate_pair::<sr25519::Pair>(&seed);
assert!(pair.is_err());
}
let pair = generate_pair::<sr25519::Pair>(&seed);
assert!(pair.is_err());
}
#[test]
fn generate_invalid_should_fail() {
let s = "Alice";
let seed = s.to_string();
#[test]
fn generate_invalid_should_fail() {
let s = "Alice";
let seed = s.to_string();
let pair = generate(&seed);
assert!(pair.is_err());
assert!(matches!(pair, Err(GeneratorError::KeyGeneration(_, _))));
}
let pair = generate(&seed);
assert!(pair.is_err());
assert!(matches!(pair, Err(GeneratorError::KeyGeneration(_, _))));
}
#[test]
fn generate_work() {
let s = "Alice";
let seed = format!("//{s}");
#[test]
fn generate_work() {
let s = "Alice";
let seed = format!("//{s}");
let pair = generate(&seed).unwrap();
let sr = pair.get("sr").unwrap();
let sr_stash = pair.get("sr_stash").unwrap();
let ed = pair.get("ed").unwrap();
let ec = pair.get("ec").unwrap();
let eth = pair.get("eth").unwrap();
let pair = generate(&seed).unwrap();
let sr = pair.get("sr").unwrap();
let sr_stash = pair.get("sr_stash").unwrap();
let ed = pair.get("ed").unwrap();
let ec = pair.get("ec").unwrap();
let eth = pair.get("eth").unwrap();
assert_eq!(
sr.address,
"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY"
);
assert_eq!(
sr_stash.address,
"5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY"
);
assert_eq!(
ed.address,
"5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu"
);
assert_eq!(
format!("0x{}", ec.public_key),
"0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1"
);
assert_eq!(sr.address, "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY");
assert_eq!(sr_stash.address, "5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY");
assert_eq!(ed.address, "5FA9nQDVg267DEd8m1ZypXLBnvN7SFxYwV7ndqSYGiN9TTpu");
assert_eq!(
format!("0x{}", ec.public_key),
"0x020a1091341fe5664bfa1782d5e04779689068c916b04cb365ec3153755684d9a1"
);
assert_eq!(
format!("0x{}", eth.public_key),
"0xe04cc55ebee1cbce552f250e85c57b70b2e2625b"
)
}
assert_eq!(format!("0x{}", eth.public_key), "0xe04cc55ebee1cbce552f250e85c57b70b2e2625b")
}
}
@@ -1,6 +1,6 @@
use std::{
path::{Path, PathBuf},
vec,
path::{Path, PathBuf},
vec,
};
use hex::encode;
@@ -8,9 +8,9 @@ use support::{constants::THIS_IS_A_BUG, fs::FileSystem};
use super::errors::GeneratorError;
use crate::{
generators::keystore_key_types::{parse_keystore_key_types, KeystoreKeyType},
shared::types::NodeAccounts,
ScopedFilesystem,
generators::keystore_key_types::{parse_keystore_key_types, KeystoreKeyType},
shared::types::NodeAccounts,
ScopedFilesystem,
};
/// Generates keystore files for a node.
@@ -25,266 +25,230 @@ use crate::{
/// If `keystore_key_types` is empty, all default key types will be generated.
/// Otherwise, only the specified key types will be generated.
pub async fn generate<'a, T>(
acc: &NodeAccounts,
node_files_path: impl AsRef<Path>,
scoped_fs: &ScopedFilesystem<'a, T>,
asset_hub_polkadot: bool,
keystore_key_types: Vec<&str>,
acc: &NodeAccounts,
node_files_path: impl AsRef<Path>,
scoped_fs: &ScopedFilesystem<'a, T>,
asset_hub_polkadot: bool,
keystore_key_types: Vec<&str>,
) -> Result<Vec<PathBuf>, GeneratorError>
where
T: FileSystem,
T: FileSystem,
{
// Create local keystore
scoped_fs.create_dir_all(node_files_path.as_ref()).await?;
let mut filenames = vec![];
// Create local keystore
scoped_fs.create_dir_all(node_files_path.as_ref()).await?;
let mut filenames = vec![];
// Parse the key type specifications
let key_types = parse_keystore_key_types(&keystore_key_types, asset_hub_polkadot);
// Parse the key type specifications
let key_types = parse_keystore_key_types(&keystore_key_types, asset_hub_polkadot);
let futures: Vec<_> = key_types
.iter()
.map(|key_type| {
let filename = generate_keystore_filename(key_type, acc);
let file_path = PathBuf::from(format!(
"{}/{}",
node_files_path.as_ref().to_string_lossy(),
filename
));
let content = format!("\"{}\"", acc.seed);
(filename, scoped_fs.write(file_path, content))
})
.collect();
let futures: Vec<_> = key_types
.iter()
.map(|key_type| {
let filename = generate_keystore_filename(key_type, acc);
let file_path = PathBuf::from(format!(
"{}/{}",
node_files_path.as_ref().to_string_lossy(),
filename
));
let content = format!("\"{}\"", acc.seed);
(filename, scoped_fs.write(file_path, content))
})
.collect();
for (filename, future) in futures {
future.await?;
filenames.push(PathBuf::from(filename));
}
for (filename, future) in futures {
future.await?;
filenames.push(PathBuf::from(filename));
}
Ok(filenames)
Ok(filenames)
}
/// Generates the keystore filename for a given key type.
///
/// The filename format is: `{hex_encoded_key_type}{public_key}`
fn generate_keystore_filename(key_type: &KeystoreKeyType, acc: &NodeAccounts) -> String {
let account_key = key_type.scheme.account_key();
let pk = acc
.accounts
.get(account_key)
.expect(&format!(
"Key '{}' should be set for node {THIS_IS_A_BUG}",
account_key
))
.public_key
.as_str();
let account_key = key_type.scheme.account_key();
let pk = acc
.accounts
.get(account_key)
.expect(&format!("Key '{}' should be set for node {THIS_IS_A_BUG}", account_key))
.public_key
.as_str();
format!("{}{}", encode(&key_type.key_type), pk)
format!("{}{}", encode(&key_type.key_type), pk)
}
#[cfg(test)]
mod tests {
use std::{collections::HashMap, ffi::OsString, str::FromStr};
use std::{collections::HashMap, ffi::OsString, str::FromStr};
use support::fs::in_memory::{InMemoryFile, InMemoryFileSystem};
use support::fs::in_memory::{InMemoryFile, InMemoryFileSystem};
use super::*;
use crate::shared::types::{NodeAccount, NodeAccounts};
use super::*;
use crate::shared::types::{NodeAccount, NodeAccounts};
fn create_test_accounts() -> NodeAccounts {
let mut accounts = HashMap::new();
accounts.insert(
"sr".to_string(),
NodeAccount::new("sr_address", "sr_public_key"),
);
accounts.insert(
"ed".to_string(),
NodeAccount::new("ed_address", "ed_public_key"),
);
accounts.insert(
"ec".to_string(),
NodeAccount::new("ec_address", "ec_public_key"),
);
NodeAccounts {
seed: "//Alice".to_string(),
accounts,
}
}
fn create_test_accounts() -> NodeAccounts {
let mut accounts = HashMap::new();
accounts.insert("sr".to_string(), NodeAccount::new("sr_address", "sr_public_key"));
accounts.insert("ed".to_string(), NodeAccount::new("ed_address", "ed_public_key"));
accounts.insert("ec".to_string(), NodeAccount::new("ec_address", "ec_public_key"));
NodeAccounts { seed: "//Alice".to_string(), accounts }
}
fn create_test_fs() -> InMemoryFileSystem {
InMemoryFileSystem::new(HashMap::from([(
OsString::from_str("/").unwrap(),
InMemoryFile::dir(),
)]))
}
fn create_test_fs() -> InMemoryFileSystem {
InMemoryFileSystem::new(HashMap::from([(
OsString::from_str("/").unwrap(),
InMemoryFile::dir(),
)]))
}
#[tokio::test]
async fn generate_creates_default_keystore_files_when_no_key_types_specified() {
let accounts = create_test_accounts();
let fs = create_test_fs();
let base_dir = "/tmp/test";
#[tokio::test]
async fn generate_creates_default_keystore_files_when_no_key_types_specified() {
let accounts = create_test_accounts();
let fs = create_test_fs();
let base_dir = "/tmp/test";
let scoped_fs = ScopedFilesystem { fs: &fs, base_dir };
let key_types: Vec<&str> = vec![];
let scoped_fs = ScopedFilesystem { fs: &fs, base_dir };
let key_types: Vec<&str> = vec![];
let res = generate(&accounts, "node1", &scoped_fs, false, key_types).await;
assert!(res.is_ok());
let res = generate(&accounts, "node1", &scoped_fs, false, key_types).await;
assert!(res.is_ok());
let filenames = res.unwrap();
let filenames = res.unwrap();
assert!(filenames.len() > 10);
assert!(filenames.len() > 10);
let filename_strs: Vec<String> = filenames
.iter()
.map(|p| p.to_string_lossy().to_string())
.collect();
let filename_strs: Vec<String> =
filenames.iter().map(|p| p.to_string_lossy().to_string()).collect();
// Check that aura key is generated (hex of "aura" is 61757261)
assert!(filename_strs.iter().any(|f| f.starts_with("61757261")));
// Check that babe key is generated (hex of "babe" is 62616265)
assert!(filename_strs.iter().any(|f| f.starts_with("62616265")));
// Check that gran key is generated (hex of "gran" is 6772616e)
assert!(filename_strs.iter().any(|f| f.starts_with("6772616e")));
}
// Check that aura key is generated (hex of "aura" is 61757261)
assert!(filename_strs.iter().any(|f| f.starts_with("61757261")));
// Check that babe key is generated (hex of "babe" is 62616265)
assert!(filename_strs.iter().any(|f| f.starts_with("62616265")));
// Check that gran key is generated (hex of "gran" is 6772616e)
assert!(filename_strs.iter().any(|f| f.starts_with("6772616e")));
}
#[tokio::test]
async fn generate_creates_only_specified_keystore_files() {
let accounts = create_test_accounts();
let fs = create_test_fs();
let base_dir = "/tmp/test";
#[tokio::test]
async fn generate_creates_only_specified_keystore_files() {
let accounts = create_test_accounts();
let fs = create_test_fs();
let base_dir = "/tmp/test";
let scoped_fs = ScopedFilesystem { fs: &fs, base_dir };
let key_types = vec!["audi", "gran"];
let scoped_fs = ScopedFilesystem { fs: &fs, base_dir };
let key_types = vec!["audi", "gran"];
let res = generate(&accounts, "node1", &scoped_fs, false, key_types).await;
let res = generate(&accounts, "node1", &scoped_fs, false, key_types).await;
assert!(res.is_ok());
assert!(res.is_ok());
let filenames = res.unwrap();
assert_eq!(filenames.len(), 2);
let filenames = res.unwrap();
assert_eq!(filenames.len(), 2);
let filename_strs: Vec<String> = filenames
.iter()
.map(|p| p.to_string_lossy().to_string())
.collect();
let filename_strs: Vec<String> =
filenames.iter().map(|p| p.to_string_lossy().to_string()).collect();
// audi uses sr scheme by default
assert!(filename_strs
.iter()
.any(|f| f.starts_with("61756469") && f.contains("sr_public_key")));
// gran uses ed scheme by default
assert!(filename_strs
.iter()
.any(|f| f.starts_with("6772616e") && f.contains("ed_public_key")));
}
// audi uses sr scheme by default
assert!(filename_strs
.iter()
.any(|f| f.starts_with("61756469") && f.contains("sr_public_key")));
// gran uses ed scheme by default
assert!(filename_strs
.iter()
.any(|f| f.starts_with("6772616e") && f.contains("ed_public_key")));
}
#[tokio::test]
async fn generate_produces_correct_keystore_files() {
struct TestCase {
name: &'static str,
key_types: Vec<&'static str>,
asset_hub_polkadot: bool,
expected_prefix: &'static str,
expected_public_key: &'static str,
}
#[tokio::test]
async fn generate_produces_correct_keystore_files() {
struct TestCase {
name: &'static str,
key_types: Vec<&'static str>,
asset_hub_polkadot: bool,
expected_prefix: &'static str,
expected_public_key: &'static str,
}
let test_cases = vec![
TestCase {
name: "explicit scheme override (gran_sr)",
key_types: vec!["gran_sr"],
asset_hub_polkadot: false,
expected_prefix: "6772616e", // "gran" in hex
expected_public_key: "sr_public_key",
},
TestCase {
name: "aura with asset_hub_polkadot uses ed",
key_types: vec!["aura"],
asset_hub_polkadot: true,
expected_prefix: "61757261", // "aura" in hex
expected_public_key: "ed_public_key",
},
TestCase {
name: "aura without asset_hub_polkadot uses sr",
key_types: vec!["aura"],
asset_hub_polkadot: false,
expected_prefix: "61757261", // "aura" in hex
expected_public_key: "sr_public_key",
},
TestCase {
name: "custom key type with explicit ec scheme",
key_types: vec!["cust_ec"],
asset_hub_polkadot: false,
expected_prefix: "63757374", // "cust" in hex
expected_public_key: "ec_public_key",
},
];
let test_cases = vec![
TestCase {
name: "explicit scheme override (gran_sr)",
key_types: vec!["gran_sr"],
asset_hub_polkadot: false,
expected_prefix: "6772616e", // "gran" in hex
expected_public_key: "sr_public_key",
},
TestCase {
name: "aura with asset_hub_polkadot uses ed",
key_types: vec!["aura"],
asset_hub_polkadot: true,
expected_prefix: "61757261", // "aura" in hex
expected_public_key: "ed_public_key",
},
TestCase {
name: "aura without asset_hub_polkadot uses sr",
key_types: vec!["aura"],
asset_hub_polkadot: false,
expected_prefix: "61757261", // "aura" in hex
expected_public_key: "sr_public_key",
},
TestCase {
name: "custom key type with explicit ec scheme",
key_types: vec!["cust_ec"],
asset_hub_polkadot: false,
expected_prefix: "63757374", // "cust" in hex
expected_public_key: "ec_public_key",
},
];
for tc in test_cases {
let accounts = create_test_accounts();
let fs = create_test_fs();
let scoped_fs = ScopedFilesystem {
fs: &fs,
base_dir: "/tmp/test",
};
for tc in test_cases {
let accounts = create_test_accounts();
let fs = create_test_fs();
let scoped_fs = ScopedFilesystem { fs: &fs, base_dir: "/tmp/test" };
let key_types: Vec<&str> = tc.key_types.clone();
let res = generate(
&accounts,
"node1",
&scoped_fs,
tc.asset_hub_polkadot,
key_types,
)
.await;
let key_types: Vec<&str> = tc.key_types.clone();
let res =
generate(&accounts, "node1", &scoped_fs, tc.asset_hub_polkadot, key_types).await;
assert!(
res.is_ok(),
"[{}] Expected Ok but got: {:?}",
tc.name,
res.err()
);
let filenames = res.unwrap();
assert!(res.is_ok(), "[{}] Expected Ok but got: {:?}", tc.name, res.err());
let filenames = res.unwrap();
assert_eq!(filenames.len(), 1, "[{}] Expected 1 file", tc.name);
assert_eq!(filenames.len(), 1, "[{}] Expected 1 file", tc.name);
let filename = filenames[0].to_string_lossy().to_string();
assert!(
filename.starts_with(tc.expected_prefix),
"[{}] Expected prefix '{}', got '{}'",
tc.name,
tc.expected_prefix,
filename
);
assert!(
filename.contains(tc.expected_public_key),
"[{}] Expected public key '{}' in '{}'",
tc.name,
tc.expected_public_key,
filename
);
}
}
let filename = filenames[0].to_string_lossy().to_string();
assert!(
filename.starts_with(tc.expected_prefix),
"[{}] Expected prefix '{}', got '{}'",
tc.name,
tc.expected_prefix,
filename
);
assert!(
filename.contains(tc.expected_public_key),
"[{}] Expected public key '{}' in '{}'",
tc.name,
tc.expected_public_key,
filename
);
}
}
#[tokio::test]
async fn generate_ignores_invalid_key_specs_and_uses_defaults() {
let accounts = create_test_accounts();
let fs = create_test_fs();
let scoped_fs = ScopedFilesystem {
fs: &fs,
base_dir: "/tmp/test",
};
#[tokio::test]
async fn generate_ignores_invalid_key_specs_and_uses_defaults() {
let accounts = create_test_accounts();
let fs = create_test_fs();
let scoped_fs = ScopedFilesystem { fs: &fs, base_dir: "/tmp/test" };
let key_types = vec![
"invalid", // Too long
"xxx", // Too short
"audi_xx", // Invalid sceme
];
let key_types = vec![
"invalid", // Too long
"xxx", // Too short
"audi_xx", // Invalid sceme
];
let res = generate(&accounts, "node1", &scoped_fs, false, key_types).await;
let res = generate(&accounts, "node1", &scoped_fs, false, key_types).await;
assert!(res.is_ok());
let filenames = res.unwrap();
assert!(res.is_ok());
let filenames = res.unwrap();
// Should fall back to defaults since all specs are invalid
assert!(filenames.len() > 10);
}
// Should fall back to defaults since all specs are invalid
assert!(filenames.len() > 10);
}
}
@@ -5,94 +5,91 @@ use serde::{Deserialize, Serialize};
/// Supported cryptographic schemes for keystore keys.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum KeyScheme {
/// Sr25519 scheme
Sr,
/// Ed25519 scheme
Ed,
/// ECDSA scheme
Ec,
/// Sr25519 scheme
Sr,
/// Ed25519 scheme
Ed,
/// ECDSA scheme
Ec,
}
impl KeyScheme {
/// Returns the account key suffix used in `NodeAccounts` for this scheme.
pub fn account_key(&self) -> &'static str {
match self {
KeyScheme::Sr => "sr",
KeyScheme::Ed => "ed",
KeyScheme::Ec => "ec",
}
}
/// Returns the account key suffix used in `NodeAccounts` for this scheme.
pub fn account_key(&self) -> &'static str {
match self {
KeyScheme::Sr => "sr",
KeyScheme::Ed => "ed",
KeyScheme::Ec => "ec",
}
}
}
impl std::fmt::Display for KeyScheme {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
KeyScheme::Sr => write!(f, "sr"),
KeyScheme::Ed => write!(f, "ed"),
KeyScheme::Ec => write!(f, "ec"),
}
}
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
KeyScheme::Sr => write!(f, "sr"),
KeyScheme::Ed => write!(f, "ed"),
KeyScheme::Ec => write!(f, "ec"),
}
}
}
impl TryFrom<&str> for KeyScheme {
type Error = String;
type Error = String;
fn try_from(value: &str) -> Result<Self, Self::Error> {
match value.to_lowercase().as_str() {
"sr" => Ok(KeyScheme::Sr),
"ed" => Ok(KeyScheme::Ed),
"ec" => Ok(KeyScheme::Ec),
_ => Err(format!("Unsupported key scheme: {}", value)),
}
}
fn try_from(value: &str) -> Result<Self, Self::Error> {
match value.to_lowercase().as_str() {
"sr" => Ok(KeyScheme::Sr),
"ed" => Ok(KeyScheme::Ed),
"ec" => Ok(KeyScheme::Ec),
_ => Err(format!("Unsupported key scheme: {}", value)),
}
}
}
/// A parsed keystore key type.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct KeystoreKeyType {
/// The 4-character key type identifier (e.g., "aura", "babe", "gran").
pub key_type: String,
/// The cryptographic scheme to use for this key type.
pub scheme: KeyScheme,
/// The 4-character key type identifier (e.g., "aura", "babe", "gran").
pub key_type: String,
/// The cryptographic scheme to use for this key type.
pub scheme: KeyScheme,
}
impl KeystoreKeyType {
pub fn new(key_type: impl Into<String>, scheme: KeyScheme) -> Self {
Self {
key_type: key_type.into(),
scheme,
}
}
pub fn new(key_type: impl Into<String>, scheme: KeyScheme) -> Self {
Self { key_type: key_type.into(), scheme }
}
}
/// Returns the default predefined key schemes for known key types.
/// Special handling for `aura` when `is_asset_hub_polkadot` is true.
fn get_predefined_schemes(is_asset_hub_polkadot: bool) -> HashMap<&'static str, KeyScheme> {
let mut schemes = HashMap::new();
let mut schemes = HashMap::new();
// aura has special handling for asset-hub-polkadot
if is_asset_hub_polkadot {
schemes.insert("aura", KeyScheme::Ed);
} else {
schemes.insert("aura", KeyScheme::Sr);
}
// aura has special handling for asset-hub-polkadot
if is_asset_hub_polkadot {
schemes.insert("aura", KeyScheme::Ed);
} else {
schemes.insert("aura", KeyScheme::Sr);
}
schemes.insert("babe", KeyScheme::Sr);
schemes.insert("imon", KeyScheme::Sr);
schemes.insert("gran", KeyScheme::Ed);
schemes.insert("audi", KeyScheme::Sr);
schemes.insert("asgn", KeyScheme::Sr);
schemes.insert("para", KeyScheme::Sr);
schemes.insert("beef", KeyScheme::Ec);
schemes.insert("nmbs", KeyScheme::Sr); // Nimbus
schemes.insert("rand", KeyScheme::Sr); // Randomness (Moonbeam)
schemes.insert("rate", KeyScheme::Ed); // Equilibrium rate module
schemes.insert("acco", KeyScheme::Sr);
schemes.insert("bcsv", KeyScheme::Sr); // BlockchainSrvc (StorageHub)
schemes.insert("ftsv", KeyScheme::Ed); // FileTransferSrvc (StorageHub)
schemes.insert("mixn", KeyScheme::Sr); // Mixnet
schemes.insert("babe", KeyScheme::Sr);
schemes.insert("imon", KeyScheme::Sr);
schemes.insert("gran", KeyScheme::Ed);
schemes.insert("audi", KeyScheme::Sr);
schemes.insert("asgn", KeyScheme::Sr);
schemes.insert("para", KeyScheme::Sr);
schemes.insert("beef", KeyScheme::Ec);
schemes.insert("nmbs", KeyScheme::Sr); // Nimbus
schemes.insert("rand", KeyScheme::Sr); // Randomness (Moonbeam)
schemes.insert("rate", KeyScheme::Ed); // Equilibrium rate module
schemes.insert("acco", KeyScheme::Sr);
schemes.insert("bcsv", KeyScheme::Sr); // BlockchainSrvc (StorageHub)
schemes.insert("ftsv", KeyScheme::Ed); // FileTransferSrvc (StorageHub)
schemes.insert("mixn", KeyScheme::Sr); // Mixnet
schemes
schemes
}
/// Parses a single keystore key type specification string.
@@ -103,26 +100,26 @@ fn get_predefined_schemes(is_asset_hub_polkadot: bool) -> HashMap<&'static str,
///
/// Returns `None` if the spec is invalid or doesn't match the expected format.
fn parse_key_spec(spec: &str, predefined: &HashMap<&str, KeyScheme>) -> Option<KeystoreKeyType> {
let spec = spec.trim();
let spec = spec.trim();
// Try parsing as long form first: key_type_scheme (e.g., "audi_sr")
if let Some((key_type, scheme_str)) = spec.split_once('_') {
if key_type.len() != 4 {
return None;
}
// Try parsing as long form first: key_type_scheme (e.g., "audi_sr")
if let Some((key_type, scheme_str)) = spec.split_once('_') {
if key_type.len() != 4 {
return None;
}
let scheme = KeyScheme::try_from(scheme_str).ok()?;
return Some(KeystoreKeyType::new(key_type, scheme));
}
let scheme = KeyScheme::try_from(scheme_str).ok()?;
return Some(KeystoreKeyType::new(key_type, scheme));
}
// Try parsing as short form: key_type only (e.g., "audi")
if spec.len() == 4 {
// Look up predefined scheme; default to Sr if not found
let scheme = predefined.get(spec).copied().unwrap_or(KeyScheme::Sr);
return Some(KeystoreKeyType::new(spec, scheme));
}
// Try parsing as short form: key_type only (e.g., "audi")
if spec.len() == 4 {
// Look up predefined scheme; default to Sr if not found
let scheme = predefined.get(spec).copied().unwrap_or(KeyScheme::Sr);
return Some(KeystoreKeyType::new(spec, scheme));
}
None
None
}
/// Parses a list of keystore key type specifications.
@@ -132,151 +129,149 @@ fn parse_key_spec(spec: &str, predefined: &HashMap<&str, KeyScheme>) -> Option<K
///
/// If the resulting list is empty, returns the default keystore key types.
pub fn parse_keystore_key_types<T: AsRef<str>>(
specs: &[T],
is_asset_hub_polkadot: bool,
specs: &[T],
is_asset_hub_polkadot: bool,
) -> Vec<KeystoreKeyType> {
let predefined_schemes = get_predefined_schemes(is_asset_hub_polkadot);
let predefined_schemes = get_predefined_schemes(is_asset_hub_polkadot);
let parsed: Vec<KeystoreKeyType> = specs
.iter()
.filter_map(|spec| parse_key_spec(spec.as_ref(), &predefined_schemes))
.collect();
let parsed: Vec<KeystoreKeyType> = specs
.iter()
.filter_map(|spec| parse_key_spec(spec.as_ref(), &predefined_schemes))
.collect();
if parsed.is_empty() {
get_default_keystore_key_types(is_asset_hub_polkadot)
} else {
parsed
}
if parsed.is_empty() {
get_default_keystore_key_types(is_asset_hub_polkadot)
} else {
parsed
}
}
/// Returns the default keystore key types when none are specified.
pub fn get_default_keystore_key_types(is_asset_hub_polkadot: bool) -> Vec<KeystoreKeyType> {
let predefined_schemes = get_predefined_schemes(is_asset_hub_polkadot);
let default_keys = [
"aura", "babe", "imon", "gran", "audi", "asgn", "para", "beef", "nmbs", "rand", "rate",
"mixn", "bcsv", "ftsv",
];
let predefined_schemes = get_predefined_schemes(is_asset_hub_polkadot);
let default_keys = [
"aura", "babe", "imon", "gran", "audi", "asgn", "para", "beef", "nmbs", "rand", "rate",
"mixn", "bcsv", "ftsv",
];
default_keys
.iter()
.filter_map(|key_type| {
predefined_schemes
.get(*key_type)
.map(|scheme| KeystoreKeyType::new(*key_type, *scheme))
})
.collect()
default_keys
.iter()
.filter_map(|key_type| {
predefined_schemes.get(*key_type).map(|scheme| KeystoreKeyType::new(*key_type, *scheme))
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use super::*;
#[test]
fn parse_keystore_key_types_ignores_invalid_specs() {
let specs = vec![
"audi".to_string(),
"invalid".to_string(), // Too long - ignored
"xxx".to_string(), // Too short - ignored
"xxxx".to_string(), // Unknown key - defaults to sr
"audi_xx".to_string(), // Invalid scheme - ignored
"gran".to_string(),
];
#[test]
fn parse_keystore_key_types_ignores_invalid_specs() {
let specs = vec![
"audi".to_string(),
"invalid".to_string(), // Too long - ignored
"xxx".to_string(), // Too short - ignored
"xxxx".to_string(), // Unknown key - defaults to sr
"audi_xx".to_string(), // Invalid scheme - ignored
"gran".to_string(),
];
let result = parse_keystore_key_types(&specs, false);
assert_eq!(result.len(), 3);
assert_eq!(result[1], KeystoreKeyType::new("xxxx", KeyScheme::Sr)); // Unknown defaults to sr
assert_eq!(result[2], KeystoreKeyType::new("gran", KeyScheme::Ed));
}
let result = parse_keystore_key_types(&specs, false);
assert_eq!(result.len(), 3);
assert_eq!(result[1], KeystoreKeyType::new("xxxx", KeyScheme::Sr)); // Unknown defaults to sr
assert_eq!(result[2], KeystoreKeyType::new("gran", KeyScheme::Ed));
}
#[test]
fn parse_keystore_key_types_returns_specified_keys() {
let specs = vec!["audi".to_string(), "gran".to_string()];
let res = parse_keystore_key_types(&specs, false);
#[test]
fn parse_keystore_key_types_returns_specified_keys() {
let specs = vec!["audi".to_string(), "gran".to_string()];
let res = parse_keystore_key_types(&specs, false);
assert_eq!(res.len(), 2);
assert_eq!(res[0], KeystoreKeyType::new("audi", KeyScheme::Sr));
assert_eq!(res[1], KeystoreKeyType::new("gran", KeyScheme::Ed));
}
assert_eq!(res.len(), 2);
assert_eq!(res[0], KeystoreKeyType::new("audi", KeyScheme::Sr));
assert_eq!(res[1], KeystoreKeyType::new("gran", KeyScheme::Ed));
}
#[test]
fn parse_keystore_key_types_mixed_short_and_long_forms() {
let specs = vec![
"audi".to_string(),
"gran_sr".to_string(), // Override gran's default ed to sr
"gran".to_string(),
"beef".to_string(),
];
let res = parse_keystore_key_types(&specs, false);
#[test]
fn parse_keystore_key_types_mixed_short_and_long_forms() {
let specs = vec![
"audi".to_string(),
"gran_sr".to_string(), // Override gran's default ed to sr
"gran".to_string(),
"beef".to_string(),
];
let res = parse_keystore_key_types(&specs, false);
assert_eq!(res.len(), 4);
assert_eq!(res[0], KeystoreKeyType::new("audi", KeyScheme::Sr));
assert_eq!(res[1], KeystoreKeyType::new("gran", KeyScheme::Sr)); // Overridden
assert_eq!(res[2], KeystoreKeyType::new("gran", KeyScheme::Ed));
assert_eq!(res[3], KeystoreKeyType::new("beef", KeyScheme::Ec));
}
assert_eq!(res.len(), 4);
assert_eq!(res[0], KeystoreKeyType::new("audi", KeyScheme::Sr));
assert_eq!(res[1], KeystoreKeyType::new("gran", KeyScheme::Sr)); // Overridden
assert_eq!(res[2], KeystoreKeyType::new("gran", KeyScheme::Ed));
assert_eq!(res[3], KeystoreKeyType::new("beef", KeyScheme::Ec));
}
#[test]
fn parse_keystore_key_types_returns_defaults_when_empty() {
let specs: Vec<String> = vec![];
let res = parse_keystore_key_types(&specs, false);
#[test]
fn parse_keystore_key_types_returns_defaults_when_empty() {
let specs: Vec<String> = vec![];
let res = parse_keystore_key_types(&specs, false);
// Should return all default keys
assert!(!res.is_empty());
assert!(res.iter().any(|k| k.key_type == "aura"));
assert!(res.iter().any(|k| k.key_type == "babe"));
assert!(res.iter().any(|k| k.key_type == "gran"));
}
// Should return all default keys
assert!(!res.is_empty());
assert!(res.iter().any(|k| k.key_type == "aura"));
assert!(res.iter().any(|k| k.key_type == "babe"));
assert!(res.iter().any(|k| k.key_type == "gran"));
}
#[test]
fn parse_keystore_key_types_allows_custom_key_with_explicit_scheme() {
let specs = vec![
"cust_sr".to_string(), // Custom key with explicit scheme
"audi".to_string(),
];
let result = parse_keystore_key_types(&specs, false);
#[test]
fn parse_keystore_key_types_allows_custom_key_with_explicit_scheme() {
let specs = vec![
"cust_sr".to_string(), // Custom key with explicit scheme
"audi".to_string(),
];
let result = parse_keystore_key_types(&specs, false);
assert_eq!(result.len(), 2);
assert_eq!(result[0], KeystoreKeyType::new("cust", KeyScheme::Sr));
assert_eq!(result[1], KeystoreKeyType::new("audi", KeyScheme::Sr));
}
assert_eq!(result.len(), 2);
assert_eq!(result[0], KeystoreKeyType::new("cust", KeyScheme::Sr));
assert_eq!(result[1], KeystoreKeyType::new("audi", KeyScheme::Sr));
}
#[test]
fn full_workflow_asset_hub_polkadot() {
// For asset-hub-polkadot, aura should default to ed
let specs = vec!["aura".to_string(), "babe".to_string()];
#[test]
fn full_workflow_asset_hub_polkadot() {
// For asset-hub-polkadot, aura should default to ed
let specs = vec!["aura".to_string(), "babe".to_string()];
let res = parse_keystore_key_types(&specs, true);
let res = parse_keystore_key_types(&specs, true);
assert_eq!(res.len(), 2);
assert_eq!(res[0].key_type, "aura");
assert_eq!(res[0].scheme, KeyScheme::Ed); // ed for asset-hub-polkadot
assert_eq!(res.len(), 2);
assert_eq!(res[0].key_type, "aura");
assert_eq!(res[0].scheme, KeyScheme::Ed); // ed for asset-hub-polkadot
assert_eq!(res[1].key_type, "babe");
assert_eq!(res[1].scheme, KeyScheme::Sr);
}
assert_eq!(res[1].key_type, "babe");
assert_eq!(res[1].scheme, KeyScheme::Sr);
}
#[test]
fn full_workflow_custom_key_types() {
let specs = vec![
"aura".to_string(), // Use default scheme
"gran_sr".to_string(), // Override gran to use sr instead of ed
"cust_ec".to_string(), // Custom key type with ecdsa
];
#[test]
fn full_workflow_custom_key_types() {
let specs = vec![
"aura".to_string(), // Use default scheme
"gran_sr".to_string(), // Override gran to use sr instead of ed
"cust_ec".to_string(), // Custom key type with ecdsa
];
let res = parse_keystore_key_types(&specs, false);
let res = parse_keystore_key_types(&specs, false);
assert_eq!(res.len(), 3);
assert_eq!(res.len(), 3);
// aura uses default sr
assert_eq!(res[0].key_type, "aura");
assert_eq!(res[0].scheme, KeyScheme::Sr);
// aura uses default sr
assert_eq!(res[0].key_type, "aura");
assert_eq!(res[0].scheme, KeyScheme::Sr);
// gran overridden to sr
assert_eq!(res[1].key_type, "gran");
assert_eq!(res[1].scheme, KeyScheme::Sr);
// gran overridden to sr
assert_eq!(res[1].key_type, "gran");
assert_eq!(res[1].scheme, KeyScheme::Sr);
// custom key with ec
assert_eq!(res[2].key_type, "cust");
assert_eq!(res[2].scheme, KeyScheme::Ec);
}
// custom key with ec
assert_eq!(res[2].key_type, "cust");
assert_eq!(res[2].scheme, KeyScheme::Ec);
}
}
@@ -2,9 +2,9 @@ use std::path::{Path, PathBuf};
use configuration::types::CommandWithCustomArgs;
use provider::{
constants::NODE_CONFIG_DIR,
types::{GenerateFileCommand, GenerateFilesOptions, TransferedFile},
DynNamespace,
constants::NODE_CONFIG_DIR,
types::{GenerateFileCommand, GenerateFilesOptions, TransferedFile},
DynNamespace,
};
use serde::{Deserialize, Serialize};
use support::fs::FileSystem;
@@ -15,151 +15,137 @@ use crate::ScopedFilesystem;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) enum ParaArtifactType {
Wasm,
State,
Wasm,
State,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) enum ParaArtifactBuildOption {
Path(String),
Command(String),
CommandWithCustomArgs(CommandWithCustomArgs),
Path(String),
Command(String),
CommandWithCustomArgs(CommandWithCustomArgs),
}
/// Parachain artifact (could be either the genesis state or genesis wasm)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ParaArtifact {
artifact_type: ParaArtifactType,
build_option: ParaArtifactBuildOption,
artifact_path: Option<PathBuf>,
// image to use for building the para artifact
image: Option<String>,
artifact_type: ParaArtifactType,
build_option: ParaArtifactBuildOption,
artifact_path: Option<PathBuf>,
// image to use for building the para artifact
image: Option<String>,
}
impl ParaArtifact {
pub(crate) fn new(
artifact_type: ParaArtifactType,
build_option: ParaArtifactBuildOption,
) -> Self {
Self {
artifact_type,
build_option,
artifact_path: None,
image: None,
}
}
pub(crate) fn new(
artifact_type: ParaArtifactType,
build_option: ParaArtifactBuildOption,
) -> Self {
Self { artifact_type, build_option, artifact_path: None, image: None }
}
pub(crate) fn image(mut self, image: Option<String>) -> Self {
self.image = image;
self
}
pub(crate) fn image(mut self, image: Option<String>) -> Self {
self.image = image;
self
}
pub(crate) fn artifact_path(&self) -> Option<&PathBuf> {
self.artifact_path.as_ref()
}
pub(crate) fn artifact_path(&self) -> Option<&PathBuf> {
self.artifact_path.as_ref()
}
pub(crate) async fn build<'a, T>(
&mut self,
chain_spec_path: Option<impl AsRef<Path>>,
artifact_path: impl AsRef<Path>,
ns: &DynNamespace,
scoped_fs: &ScopedFilesystem<'a, T>,
maybe_output_path: Option<PathBuf>,
) -> Result<(), GeneratorError>
where
T: FileSystem,
{
let (cmd, custom_args) = match &self.build_option {
ParaArtifactBuildOption::Path(path) => {
let t = TransferedFile::new(PathBuf::from(path), artifact_path.as_ref().into());
scoped_fs.copy_files(vec![&t]).await?;
self.artifact_path = Some(artifact_path.as_ref().into());
return Ok(()); // work done!
},
ParaArtifactBuildOption::Command(cmd) => (cmd, &vec![]),
ParaArtifactBuildOption::CommandWithCustomArgs(cmd_with_custom_args) => {
(
&cmd_with_custom_args.cmd().as_str().to_string(),
cmd_with_custom_args.args(),
)
// (cmd.cmd_as_str().to_string(), cmd.1)
},
};
pub(crate) async fn build<'a, T>(
&mut self,
chain_spec_path: Option<impl AsRef<Path>>,
artifact_path: impl AsRef<Path>,
ns: &DynNamespace,
scoped_fs: &ScopedFilesystem<'a, T>,
maybe_output_path: Option<PathBuf>,
) -> Result<(), GeneratorError>
where
T: FileSystem,
{
let (cmd, custom_args) = match &self.build_option {
ParaArtifactBuildOption::Path(path) => {
let t = TransferedFile::new(PathBuf::from(path), artifact_path.as_ref().into());
scoped_fs.copy_files(vec![&t]).await?;
self.artifact_path = Some(artifact_path.as_ref().into());
return Ok(()); // work done!
},
ParaArtifactBuildOption::Command(cmd) => (cmd, &vec![]),
ParaArtifactBuildOption::CommandWithCustomArgs(cmd_with_custom_args) => {
(&cmd_with_custom_args.cmd().as_str().to_string(), cmd_with_custom_args.args())
// (cmd.cmd_as_str().to_string(), cmd.1)
},
};
let generate_subcmd = match self.artifact_type {
ParaArtifactType::Wasm => "export-genesis-wasm",
ParaArtifactType::State => "export-genesis-state",
};
let generate_subcmd = match self.artifact_type {
ParaArtifactType::Wasm => "export-genesis-wasm",
ParaArtifactType::State => "export-genesis-state",
};
// TODO: replace uuid with para_id-random
let temp_name = format!("temp-{}-{}", generate_subcmd, Uuid::new_v4());
let mut args: Vec<String> = vec![generate_subcmd.into()];
// TODO: replace uuid with para_id-random
let temp_name = format!("temp-{}-{}", generate_subcmd, Uuid::new_v4());
let mut args: Vec<String> = vec![generate_subcmd.into()];
let files_to_inject = if let Some(chain_spec_path) = chain_spec_path {
// TODO: we should get the full path from the scoped filesystem
let chain_spec_path_local = format!(
"{}/{}",
ns.base_dir().to_string_lossy(),
chain_spec_path.as_ref().to_string_lossy()
);
// Remote path to be injected
let chain_spec_path_in_pod = format!(
"{}/{}",
NODE_CONFIG_DIR,
chain_spec_path.as_ref().to_string_lossy()
);
// Path in the context of the node, this can be different in the context of the providers (e.g native)
let chain_spec_path_in_args = if ns.capabilities().prefix_with_full_path {
// In native
format!(
"{}/{}{}",
ns.base_dir().to_string_lossy(),
&temp_name,
&chain_spec_path_in_pod
)
} else {
chain_spec_path_in_pod.clone()
};
let files_to_inject = if let Some(chain_spec_path) = chain_spec_path {
// TODO: we should get the full path from the scoped filesystem
let chain_spec_path_local = format!(
"{}/{}",
ns.base_dir().to_string_lossy(),
chain_spec_path.as_ref().to_string_lossy()
);
// Remote path to be injected
let chain_spec_path_in_pod =
format!("{}/{}", NODE_CONFIG_DIR, chain_spec_path.as_ref().to_string_lossy());
// Path in the context of the node, this can be different in the context of the providers (e.g native)
let chain_spec_path_in_args = if ns.capabilities().prefix_with_full_path {
// In native
format!(
"{}/{}{}",
ns.base_dir().to_string_lossy(),
&temp_name,
&chain_spec_path_in_pod
)
} else {
chain_spec_path_in_pod.clone()
};
args.push("--chain".into());
args.push(chain_spec_path_in_args);
args.push("--chain".into());
args.push(chain_spec_path_in_args);
for custom_arg in custom_args {
match custom_arg {
configuration::types::Arg::Flag(flag) => {
args.push(flag.into());
},
configuration::types::Arg::Option(flag, flag_value) => {
args.push(flag.into());
args.push(flag_value.into());
},
configuration::types::Arg::Array(flag, values) => {
args.push(flag.into());
values.iter().for_each(|v| args.push(v.into()));
},
}
}
for custom_arg in custom_args {
match custom_arg {
configuration::types::Arg::Flag(flag) => {
args.push(flag.into());
},
configuration::types::Arg::Option(flag, flag_value) => {
args.push(flag.into());
args.push(flag_value.into());
},
configuration::types::Arg::Array(flag, values) => {
args.push(flag.into());
values.iter().for_each(|v| args.push(v.into()));
},
}
}
vec![TransferedFile::new(
chain_spec_path_local,
chain_spec_path_in_pod,
)]
} else {
vec![]
};
vec![TransferedFile::new(chain_spec_path_local, chain_spec_path_in_pod)]
} else {
vec![]
};
let artifact_path_ref = artifact_path.as_ref();
let generate_command = GenerateFileCommand::new(cmd.as_str(), artifact_path_ref).args(args);
let options = GenerateFilesOptions::with_files(
vec![generate_command],
self.image.clone(),
&files_to_inject,
maybe_output_path,
)
.temp_name(temp_name);
ns.generate_files(options).await?;
self.artifact_path = Some(artifact_path_ref.into());
let artifact_path_ref = artifact_path.as_ref();
let generate_command = GenerateFileCommand::new(cmd.as_str(), artifact_path_ref).args(args);
let options = GenerateFilesOptions::with_files(
vec![generate_command],
self.image.clone(),
&files_to_inject,
maybe_output_path,
)
.temp_name(temp_name);
ns.generate_files(options).await?;
self.artifact_path = Some(artifact_path_ref.into());
Ok(())
}
Ok(())
}
}
@@ -8,41 +8,39 @@ use crate::shared::types::ParkedPort;
// TODO: (team), we want to continue support ws_port? No
enum PortTypes {
Rpc,
P2P,
Prometheus,
Rpc,
P2P,
Prometheus,
}
pub fn generate(port: Option<Port>) -> Result<ParkedPort, GeneratorError> {
let port = port.unwrap_or(0);
let listener = TcpListener::bind(format!("0.0.0.0:{port}"))
.map_err(|_e| GeneratorError::PortGeneration(port, "Can't bind".into()))?;
let port = listener
.local_addr()
.expect(&format!(
"We should always get the local_addr from the listener {THIS_IS_A_BUG}"
))
.port();
Ok(ParkedPort::new(port, listener))
let port = port.unwrap_or(0);
let listener = TcpListener::bind(format!("0.0.0.0:{port}"))
.map_err(|_e| GeneratorError::PortGeneration(port, "Can't bind".into()))?;
let port = listener
.local_addr()
.expect(&format!("We should always get the local_addr from the listener {THIS_IS_A_BUG}"))
.port();
Ok(ParkedPort::new(port, listener))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn generate_random() {
let port = generate(None).unwrap();
let listener = port.1.write().unwrap();
use super::*;
#[test]
fn generate_random() {
let port = generate(None).unwrap();
let listener = port.1.write().unwrap();
assert!(listener.is_some());
}
assert!(listener.is_some());
}
#[test]
fn generate_fixed_port() {
let port = generate(Some(33056)).unwrap();
let listener = port.1.write().unwrap();
#[test]
fn generate_fixed_port() {
let port = generate(Some(33056)).unwrap();
let listener = port.1.write().unwrap();
assert!(listener.is_some());
assert_eq!(port.0, 33056);
}
assert!(listener.is_some());
assert_eq!(port.0, 33056);
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -9,33 +9,33 @@ use crate::{shared::types::RuntimeUpgradeOptions, tx_helper};
#[async_trait]
pub trait ChainUpgrade {
/// Perform a runtime upgrade (with sudo)
///
/// This call 'System.set_code_without_checks' wrapped in
/// 'Sudo.sudo_unchecked_weight'
async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error>;
/// Perform a runtime upgrade (with sudo)
///
/// This call 'System.set_code_without_checks' wrapped in
/// 'Sudo.sudo_unchecked_weight'
async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error>;
/// Perform a runtime upgrade (with sudo), inner call with the node pass as arg.
///
/// This call 'System.set_code_without_checks' wrapped in
/// 'Sudo.sudo_unchecked_weight'
async fn perform_runtime_upgrade(
&self,
node: &NetworkNode,
options: RuntimeUpgradeOptions,
) -> Result<(), anyhow::Error> {
let sudo = if let Some(possible_seed) = options.seed {
Keypair::from_secret_key(possible_seed)
.map_err(|_| anyhow!("seed should return a Keypair"))?
} else {
let uri = SecretUri::from_str("//Alice")?;
Keypair::from_uri(&uri).map_err(|_| anyhow!("'//Alice' should return a Keypair"))?
};
/// Perform a runtime upgrade (with sudo), inner call with the node pass as arg.
///
/// This call 'System.set_code_without_checks' wrapped in
/// 'Sudo.sudo_unchecked_weight'
async fn perform_runtime_upgrade(
&self,
node: &NetworkNode,
options: RuntimeUpgradeOptions,
) -> Result<(), anyhow::Error> {
let sudo = if let Some(possible_seed) = options.seed {
Keypair::from_secret_key(possible_seed)
.map_err(|_| anyhow!("seed should return a Keypair"))?
} else {
let uri = SecretUri::from_str("//Alice")?;
Keypair::from_uri(&uri).map_err(|_| anyhow!("'//Alice' should return a Keypair"))?
};
let wasm_data = options.wasm.get_asset().await?;
let wasm_data = options.wasm.get_asset().await?;
tx_helper::runtime_upgrade::upgrade(node, &wasm_data, &sudo).await?;
tx_helper::runtime_upgrade::upgrade(node, &wasm_data, &sudo).await?;
Ok(())
}
Ok(())
}
}
File diff suppressed because it is too large Load Diff
@@ -6,70 +6,61 @@ use serde::{Deserialize, Serialize};
use super::node::NetworkNode;
use crate::{
network::chain_upgrade::ChainUpgrade, shared::types::RuntimeUpgradeOptions,
utils::default_as_empty_vec,
network::chain_upgrade::ChainUpgrade, shared::types::RuntimeUpgradeOptions,
utils::default_as_empty_vec,
};
#[derive(Debug, Serialize, Deserialize)]
pub struct Relaychain {
pub(crate) chain: String,
pub(crate) chain_id: String,
pub(crate) chain_spec_path: PathBuf,
#[serde(default, deserialize_with = "default_as_empty_vec")]
pub(crate) nodes: Vec<NetworkNode>,
pub(crate) chain: String,
pub(crate) chain_id: String,
pub(crate) chain_spec_path: PathBuf,
#[serde(default, deserialize_with = "default_as_empty_vec")]
pub(crate) nodes: Vec<NetworkNode>,
}
#[derive(Debug, Deserialize)]
pub(crate) struct RawRelaychain {
#[serde(flatten)]
pub(crate) inner: Relaychain,
pub(crate) nodes: serde_json::Value,
#[serde(flatten)]
pub(crate) inner: Relaychain,
pub(crate) nodes: serde_json::Value,
}
#[async_trait]
impl ChainUpgrade for Relaychain {
async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error> {
// check if the node is valid first
let node = if let Some(node_name) = &options.node_name {
if let Some(node) = self
.nodes()
.into_iter()
.find(|node| node.name() == node_name)
{
node
} else {
return Err(anyhow!("Node: {node_name} is not part of the set of nodes"));
}
} else {
// take the first node
if let Some(node) = self.nodes().first() {
node
} else {
return Err(anyhow!("chain doesn't have any node!"));
}
};
async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error> {
// check if the node is valid first
let node = if let Some(node_name) = &options.node_name {
if let Some(node) = self.nodes().into_iter().find(|node| node.name() == node_name) {
node
} else {
return Err(anyhow!("Node: {node_name} is not part of the set of nodes"));
}
} else {
// take the first node
if let Some(node) = self.nodes().first() {
node
} else {
return Err(anyhow!("chain doesn't have any node!"));
}
};
self.perform_runtime_upgrade(node, options).await
}
self.perform_runtime_upgrade(node, options).await
}
}
impl Relaychain {
pub(crate) fn new(chain: String, chain_id: String, chain_spec_path: PathBuf) -> Self {
Self {
chain,
chain_id,
chain_spec_path,
nodes: Default::default(),
}
}
pub(crate) fn new(chain: String, chain_id: String, chain_spec_path: PathBuf) -> Self {
Self { chain, chain_id, chain_spec_path, nodes: Default::default() }
}
// Public API
pub fn nodes(&self) -> Vec<&NetworkNode> {
self.nodes.iter().collect()
}
// Public API
pub fn nodes(&self) -> Vec<&NetworkNode> {
self.nodes.iter().collect()
}
/// Get chain name
pub fn chain(&self) -> &str {
&self.chain
}
/// Get chain name
pub fn chain(&self) -> &str {
&self.chain
}
}
@@ -1,6 +1,6 @@
use std::{
path::{Path, PathBuf},
str::FromStr,
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::anyhow;
@@ -14,317 +14,282 @@ use tracing::info;
use super::{chain_upgrade::ChainUpgrade, node::NetworkNode};
use crate::{
network_spec::teyrchain::TeyrchainSpec,
shared::types::{RegisterParachainOptions, RuntimeUpgradeOptions},
tx_helper::client::get_client_from_url,
utils::default_as_empty_vec,
ScopedFilesystem,
network_spec::teyrchain::TeyrchainSpec,
shared::types::{RegisterParachainOptions, RuntimeUpgradeOptions},
tx_helper::client::get_client_from_url,
utils::default_as_empty_vec,
ScopedFilesystem,
};
#[derive(Debug, Serialize, Deserialize)]
pub struct Parachain {
pub(crate) chain: Option<String>,
pub(crate) para_id: u32,
// unique_id is internally used to allow multiple parachains with the same id
// See `ParachainConfig` for more details
pub(crate) unique_id: String,
pub(crate) chain_id: Option<String>,
pub(crate) chain_spec_path: Option<PathBuf>,
#[serde(default, deserialize_with = "default_as_empty_vec")]
pub(crate) collators: Vec<NetworkNode>,
pub(crate) files_to_inject: Vec<TransferedFile>,
pub(crate) bootnodes_addresses: Vec<multiaddr::Multiaddr>,
pub(crate) chain: Option<String>,
pub(crate) para_id: u32,
// unique_id is internally used to allow multiple parachains with the same id
// See `ParachainConfig` for more details
pub(crate) unique_id: String,
pub(crate) chain_id: Option<String>,
pub(crate) chain_spec_path: Option<PathBuf>,
#[serde(default, deserialize_with = "default_as_empty_vec")]
pub(crate) collators: Vec<NetworkNode>,
pub(crate) files_to_inject: Vec<TransferedFile>,
pub(crate) bootnodes_addresses: Vec<multiaddr::Multiaddr>,
}
#[derive(Debug, Deserialize)]
pub(crate) struct RawParachain {
#[serde(flatten)]
pub(crate) inner: Parachain,
pub(crate) collators: serde_json::Value,
#[serde(flatten)]
pub(crate) inner: Parachain,
pub(crate) collators: serde_json::Value,
}
#[async_trait]
impl ChainUpgrade for Parachain {
async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error> {
// check if the node is valid first
let node = if let Some(node_name) = &options.node_name {
if let Some(node) = self
.collators()
.into_iter()
.find(|node| node.name() == node_name)
{
node
} else {
return Err(anyhow!("Node: {node_name} is not part of the set of nodes"));
}
} else {
// take the first node
if let Some(node) = self.collators().first() {
node
} else {
return Err(anyhow!("chain doesn't have any node!"));
}
};
async fn runtime_upgrade(&self, options: RuntimeUpgradeOptions) -> Result<(), anyhow::Error> {
// check if the node is valid first
let node = if let Some(node_name) = &options.node_name {
if let Some(node) = self.collators().into_iter().find(|node| node.name() == node_name) {
node
} else {
return Err(anyhow!("Node: {node_name} is not part of the set of nodes"));
}
} else {
// take the first node
if let Some(node) = self.collators().first() {
node
} else {
return Err(anyhow!("chain doesn't have any node!"));
}
};
self.perform_runtime_upgrade(node, options).await
}
self.perform_runtime_upgrade(node, options).await
}
}
impl Parachain {
pub(crate) fn new(para_id: u32, unique_id: impl Into<String>) -> Self {
Self {
chain: None,
para_id,
unique_id: unique_id.into(),
chain_id: None,
chain_spec_path: None,
collators: Default::default(),
files_to_inject: Default::default(),
bootnodes_addresses: vec![],
}
}
pub(crate) fn new(para_id: u32, unique_id: impl Into<String>) -> Self {
Self {
chain: None,
para_id,
unique_id: unique_id.into(),
chain_id: None,
chain_spec_path: None,
collators: Default::default(),
files_to_inject: Default::default(),
bootnodes_addresses: vec![],
}
}
pub(crate) fn with_chain_spec(
para_id: u32,
unique_id: impl Into<String>,
chain_id: impl Into<String>,
chain_spec_path: impl AsRef<Path>,
) -> Self {
Self {
para_id,
unique_id: unique_id.into(),
chain: None,
chain_id: Some(chain_id.into()),
chain_spec_path: Some(chain_spec_path.as_ref().into()),
collators: Default::default(),
files_to_inject: Default::default(),
bootnodes_addresses: vec![],
}
}
pub(crate) fn with_chain_spec(
para_id: u32,
unique_id: impl Into<String>,
chain_id: impl Into<String>,
chain_spec_path: impl AsRef<Path>,
) -> Self {
Self {
para_id,
unique_id: unique_id.into(),
chain: None,
chain_id: Some(chain_id.into()),
chain_spec_path: Some(chain_spec_path.as_ref().into()),
collators: Default::default(),
files_to_inject: Default::default(),
bootnodes_addresses: vec![],
}
}
pub(crate) async fn from_spec(
para: &TeyrchainSpec,
files_to_inject: &[TransferedFile],
scoped_fs: &ScopedFilesystem<'_, impl FileSystem>,
) -> Result<Self, anyhow::Error> {
let mut para_files_to_inject = files_to_inject.to_owned();
pub(crate) async fn from_spec(
para: &TeyrchainSpec,
files_to_inject: &[TransferedFile],
scoped_fs: &ScopedFilesystem<'_, impl FileSystem>,
) -> Result<Self, anyhow::Error> {
let mut para_files_to_inject = files_to_inject.to_owned();
// parachain id is used for the keystore
let mut parachain = if let Some(chain_spec) = para.chain_spec.as_ref() {
let id = chain_spec.read_chain_id(scoped_fs).await?;
// parachain id is used for the keystore
let mut parachain = if let Some(chain_spec) = para.chain_spec.as_ref() {
let id = chain_spec.read_chain_id(scoped_fs).await?;
// add the spec to global files to inject
let spec_name = chain_spec.chain_spec_name();
let base = PathBuf::from_str(scoped_fs.base_dir)?;
para_files_to_inject.push(TransferedFile::new(
base.join(format!("{spec_name}.json")),
PathBuf::from(format!("/cfg/{}.json", para.id)),
));
// add the spec to global files to inject
let spec_name = chain_spec.chain_spec_name();
let base = PathBuf::from_str(scoped_fs.base_dir)?;
para_files_to_inject.push(TransferedFile::new(
base.join(format!("{spec_name}.json")),
PathBuf::from(format!("/cfg/{}.json", para.id)),
));
let raw_path = chain_spec
.raw_path()
.ok_or(anyhow::anyhow!("chain-spec path should be set by now.",))?;
let mut running_para =
Parachain::with_chain_spec(para.id, &para.unique_id, id, raw_path);
if let Some(chain_name) = chain_spec.chain_name() {
running_para.chain = Some(chain_name.to_string());
}
let raw_path = chain_spec
.raw_path()
.ok_or(anyhow::anyhow!("chain-spec path should be set by now.",))?;
let mut running_para =
Parachain::with_chain_spec(para.id, &para.unique_id, id, raw_path);
if let Some(chain_name) = chain_spec.chain_name() {
running_para.chain = Some(chain_name.to_string());
}
running_para
} else {
Parachain::new(para.id, &para.unique_id)
};
running_para
} else {
Parachain::new(para.id, &para.unique_id)
};
parachain.bootnodes_addresses = para.bootnodes_addresses().into_iter().cloned().collect();
parachain.files_to_inject = para_files_to_inject;
parachain.bootnodes_addresses = para.bootnodes_addresses().into_iter().cloned().collect();
parachain.files_to_inject = para_files_to_inject;
Ok(parachain)
}
Ok(parachain)
}
pub async fn register(
options: RegisterParachainOptions,
scoped_fs: &ScopedFilesystem<'_, impl FileSystem>,
) -> Result<(), anyhow::Error> {
info!("Registering parachain: {:?}", options);
// get the seed
let sudo: Keypair;
if let Some(possible_seed) = options.seed {
sudo = Keypair::from_secret_key(possible_seed)
.expect(&format!("seed should return a Keypair {THIS_IS_A_BUG}"));
} else {
let uri = SecretUri::from_str("//Alice")?;
sudo = Keypair::from_uri(&uri)?;
}
pub async fn register(
options: RegisterParachainOptions,
scoped_fs: &ScopedFilesystem<'_, impl FileSystem>,
) -> Result<(), anyhow::Error> {
info!("Registering parachain: {:?}", options);
// get the seed
let sudo: Keypair;
if let Some(possible_seed) = options.seed {
sudo = Keypair::from_secret_key(possible_seed)
.expect(&format!("seed should return a Keypair {THIS_IS_A_BUG}"));
} else {
let uri = SecretUri::from_str("//Alice")?;
sudo = Keypair::from_uri(&uri)?;
}
let genesis_state = scoped_fs
.read_to_string(options.state_path)
.await
.expect(&format!(
"State Path should be ok by this point {THIS_IS_A_BUG}"
));
let wasm_data = scoped_fs
.read_to_string(options.wasm_path)
.await
.expect(&format!(
"Wasm Path should be ok by this point {THIS_IS_A_BUG}"
));
let genesis_state = scoped_fs
.read_to_string(options.state_path)
.await
.expect(&format!("State Path should be ok by this point {THIS_IS_A_BUG}"));
let wasm_data = scoped_fs
.read_to_string(options.wasm_path)
.await
.expect(&format!("Wasm Path should be ok by this point {THIS_IS_A_BUG}"));
wait_ws_ready(options.node_ws_url.as_str())
.await
.map_err(|_| {
anyhow::anyhow!(
"Error waiting for ws to be ready, at {}",
options.node_ws_url.as_str()
)
})?;
wait_ws_ready(options.node_ws_url.as_str()).await.map_err(|_| {
anyhow::anyhow!("Error waiting for ws to be ready, at {}", options.node_ws_url.as_str())
})?;
let api: OnlineClient<BizinikiwConfig> = get_client_from_url(&options.node_ws_url).await?;
let api: OnlineClient<BizinikiwConfig> = get_client_from_url(&options.node_ws_url).await?;
let schedule_para = pezkuwi_subxt::dynamic::tx(
"ParasSudoWrapper",
"sudo_schedule_para_initialize",
vec![
Value::primitive(options.id.into()),
Value::named_composite([
(
"genesis_head",
Value::from_bytes(hex::decode(&genesis_state[2..])?),
),
(
"validation_code",
Value::from_bytes(hex::decode(&wasm_data[2..])?),
),
("para_kind", Value::bool(options.onboard_as_para)),
]),
],
);
let schedule_para = pezkuwi_subxt::dynamic::tx(
"ParasSudoWrapper",
"sudo_schedule_para_initialize",
vec![
Value::primitive(options.id.into()),
Value::named_composite([
("genesis_head", Value::from_bytes(hex::decode(&genesis_state[2..])?)),
("validation_code", Value::from_bytes(hex::decode(&wasm_data[2..])?)),
("para_kind", Value::bool(options.onboard_as_para)),
]),
],
);
let sudo_call =
pezkuwi_subxt::dynamic::tx("Sudo", "sudo", vec![schedule_para.into_value()]);
let sudo_call =
pezkuwi_subxt::dynamic::tx("Sudo", "sudo", vec![schedule_para.into_value()]);
// TODO: uncomment below and fix the sign and submit (and follow afterwards until
// finalized block) to register the parachain
let mut tx = api
.tx()
.sign_and_submit_then_watch_default(&sudo_call, &sudo)
.await?;
// TODO: uncomment below and fix the sign and submit (and follow afterwards until
// finalized block) to register the parachain
let mut tx = api.tx().sign_and_submit_then_watch_default(&sudo_call, &sudo).await?;
// Below we use the low level API to replicate the `wait_for_in_block` behaviour
// which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237.
while let Some(status) = tx.next().await {
match status? {
TxStatus::InBestBlock(tx_in_block) | TxStatus::InFinalizedBlock(tx_in_block) => {
let _result = tx_in_block.wait_for_success().await?;
info!("In block: {:#?}", tx_in_block.block_hash());
},
TxStatus::Error { message }
| TxStatus::Invalid { message }
| TxStatus::Dropped { message } => {
return Err(anyhow::format_err!("Error submitting tx: {message}"));
},
_ => continue,
}
}
// Below we use the low level API to replicate the `wait_for_in_block` behaviour
// which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237.
while let Some(status) = tx.next().await {
match status? {
TxStatus::InBestBlock(tx_in_block) | TxStatus::InFinalizedBlock(tx_in_block) => {
let _result = tx_in_block.wait_for_success().await?;
info!("In block: {:#?}", tx_in_block.block_hash());
},
TxStatus::Error { message }
| TxStatus::Invalid { message }
| TxStatus::Dropped { message } => {
return Err(anyhow::format_err!("Error submitting tx: {message}"));
},
_ => continue,
}
}
Ok(())
}
Ok(())
}
pub fn para_id(&self) -> u32 {
self.para_id
}
pub fn para_id(&self) -> u32 {
self.para_id
}
pub fn unique_id(&self) -> &str {
self.unique_id.as_str()
}
pub fn unique_id(&self) -> &str {
self.unique_id.as_str()
}
pub fn chain_id(&self) -> Option<&str> {
self.chain_id.as_deref()
}
pub fn chain_id(&self) -> Option<&str> {
self.chain_id.as_deref()
}
pub fn collators(&self) -> Vec<&NetworkNode> {
self.collators.iter().collect()
}
pub fn collators(&self) -> Vec<&NetworkNode> {
self.collators.iter().collect()
}
pub fn bootnodes_addresses(&self) -> Vec<&multiaddr::Multiaddr> {
self.bootnodes_addresses.iter().collect()
}
pub fn bootnodes_addresses(&self) -> Vec<&multiaddr::Multiaddr> {
self.bootnodes_addresses.iter().collect()
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::collections::HashMap;
use super::*;
use super::*;
#[test]
fn create_with_is_works() {
let para = Parachain::new(100, "100");
// only para_id and unique_id should be set
assert_eq!(para.para_id, 100);
assert_eq!(para.unique_id, "100");
assert_eq!(para.chain_id, None);
assert_eq!(para.chain, None);
assert_eq!(para.chain_spec_path, None);
}
#[test]
fn create_with_is_works() {
let para = Parachain::new(100, "100");
// only para_id and unique_id should be set
assert_eq!(para.para_id, 100);
assert_eq!(para.unique_id, "100");
assert_eq!(para.chain_id, None);
assert_eq!(para.chain, None);
assert_eq!(para.chain_spec_path, None);
}
#[test]
fn create_with_chain_spec_works() {
let para = Parachain::with_chain_spec(100, "100", "rococo-local", "/tmp/rococo-local.json");
assert_eq!(para.para_id, 100);
assert_eq!(para.unique_id, "100");
assert_eq!(para.chain_id, Some("rococo-local".to_string()));
assert_eq!(para.chain, None);
assert_eq!(
para.chain_spec_path,
Some(PathBuf::from("/tmp/rococo-local.json"))
);
}
#[test]
fn create_with_chain_spec_works() {
let para = Parachain::with_chain_spec(100, "100", "rococo-local", "/tmp/rococo-local.json");
assert_eq!(para.para_id, 100);
assert_eq!(para.unique_id, "100");
assert_eq!(para.chain_id, Some("rococo-local".to_string()));
assert_eq!(para.chain, None);
assert_eq!(para.chain_spec_path, Some(PathBuf::from("/tmp/rococo-local.json")));
}
#[tokio::test]
async fn create_with_para_spec_works() {
use configuration::ParachainConfigBuilder;
#[tokio::test]
async fn create_with_para_spec_works() {
use configuration::ParachainConfigBuilder;
use crate::network_spec::teyrchain::TeyrchainSpec;
use crate::network_spec::teyrchain::TeyrchainSpec;
let bootnode_addresses = vec!["/ip4/10.41.122.55/tcp/45421"];
let bootnode_addresses = vec!["/ip4/10.41.122.55/tcp/45421"];
let para_config = ParachainConfigBuilder::new(Default::default())
.with_id(100)
.cumulus_based(false)
.with_default_command("adder-collator")
.with_raw_bootnodes_addresses(bootnode_addresses.clone())
.with_collator(|c| c.with_name("col"))
.build()
.unwrap();
let para_config = ParachainConfigBuilder::new(Default::default())
.with_id(100)
.cumulus_based(false)
.with_default_command("adder-collator")
.with_raw_bootnodes_addresses(bootnode_addresses.clone())
.with_collator(|c| c.with_name("col"))
.build()
.unwrap();
let para_spec =
TeyrchainSpec::from_config(&para_config, "rococo-local".try_into().unwrap()).unwrap();
let fs = support::fs::in_memory::InMemoryFileSystem::new(HashMap::default());
let scoped_fs = ScopedFilesystem {
fs: &fs,
base_dir: "/tmp/some",
};
let para_spec =
TeyrchainSpec::from_config(&para_config, "rococo-local".try_into().unwrap()).unwrap();
let fs = support::fs::in_memory::InMemoryFileSystem::new(HashMap::default());
let scoped_fs = ScopedFilesystem { fs: &fs, base_dir: "/tmp/some" };
let files = vec![TransferedFile::new(
PathBuf::from("/tmp/some"),
PathBuf::from("/tmp/some"),
)];
let para = Parachain::from_spec(&para_spec, &files, &scoped_fs)
.await
.unwrap();
println!("{para:#?}");
assert_eq!(para.para_id, 100);
assert_eq!(para.unique_id, "100");
assert_eq!(para.chain_id, None);
assert_eq!(para.chain, None);
// one file should be added.
assert_eq!(para.files_to_inject.len(), 1);
assert_eq!(
para.bootnodes_addresses()
.iter()
.map(|addr| addr.to_string())
.collect::<Vec<_>>(),
bootnode_addresses
);
}
let files =
vec![TransferedFile::new(PathBuf::from("/tmp/some"), PathBuf::from("/tmp/some"))];
let para = Parachain::from_spec(&para_spec, &files, &scoped_fs).await.unwrap();
println!("{para:#?}");
assert_eq!(para.para_id, 100);
assert_eq!(para.unique_id, "100");
assert_eq!(para.chain_id, None);
assert_eq!(para.chain, None);
// one file should be added.
assert_eq!(para.files_to_inject.len(), 1);
assert_eq!(
para.bootnodes_addresses().iter().map(|addr| addr.to_string()).collect::<Vec<_>>(),
bootnode_addresses
);
}
}
@@ -5,58 +5,56 @@ use reqwest::Url;
#[async_trait]
pub trait MetricsHelper {
async fn metric(&self, metric_name: &str) -> Result<f64, anyhow::Error>;
async fn metric_with_url(
metric: impl AsRef<str> + Send,
endpoint: impl Into<Url> + Send,
) -> Result<f64, anyhow::Error>;
async fn metric(&self, metric_name: &str) -> Result<f64, anyhow::Error>;
async fn metric_with_url(
metric: impl AsRef<str> + Send,
endpoint: impl Into<Url> + Send,
) -> Result<f64, anyhow::Error>;
}
pub struct Metrics {
endpoint: Url,
endpoint: Url,
}
impl Metrics {
fn new(endpoint: impl Into<Url>) -> Self {
Self {
endpoint: endpoint.into(),
}
}
fn new(endpoint: impl Into<Url>) -> Self {
Self { endpoint: endpoint.into() }
}
async fn fetch_metrics(
endpoint: impl AsRef<str>,
) -> Result<HashMap<String, f64>, anyhow::Error> {
let response = reqwest::get(endpoint.as_ref()).await?;
Ok(prom_metrics_parser::parse(&response.text().await?)?)
}
async fn fetch_metrics(
endpoint: impl AsRef<str>,
) -> Result<HashMap<String, f64>, anyhow::Error> {
let response = reqwest::get(endpoint.as_ref()).await?;
Ok(prom_metrics_parser::parse(&response.text().await?)?)
}
fn get_metric(
metrics_map: HashMap<String, f64>,
metric_name: &str,
) -> Result<f64, anyhow::Error> {
let treat_not_found_as_zero = true;
if let Some(val) = metrics_map.get(metric_name) {
Ok(*val)
} else if treat_not_found_as_zero {
Ok(0_f64)
} else {
Err(anyhow::anyhow!("MetricNotFound: {metric_name}"))
}
}
fn get_metric(
metrics_map: HashMap<String, f64>,
metric_name: &str,
) -> Result<f64, anyhow::Error> {
let treat_not_found_as_zero = true;
if let Some(val) = metrics_map.get(metric_name) {
Ok(*val)
} else if treat_not_found_as_zero {
Ok(0_f64)
} else {
Err(anyhow::anyhow!("MetricNotFound: {metric_name}"))
}
}
}
#[async_trait]
impl MetricsHelper for Metrics {
async fn metric(&self, metric_name: &str) -> Result<f64, anyhow::Error> {
let metrics_map = Metrics::fetch_metrics(self.endpoint.as_str()).await?;
Metrics::get_metric(metrics_map, metric_name)
}
async fn metric(&self, metric_name: &str) -> Result<f64, anyhow::Error> {
let metrics_map = Metrics::fetch_metrics(self.endpoint.as_str()).await?;
Metrics::get_metric(metrics_map, metric_name)
}
async fn metric_with_url(
metric_name: impl AsRef<str> + Send,
endpoint: impl Into<Url> + Send,
) -> Result<f64, anyhow::Error> {
let metrics_map = Metrics::fetch_metrics(endpoint.into()).await?;
Metrics::get_metric(metrics_map, metric_name.as_ref())
}
async fn metric_with_url(
metric_name: impl AsRef<str> + Send,
endpoint: impl Into<Url> + Send,
) -> Result<f64, anyhow::Error> {
let metrics_map = Metrics::fetch_metrics(endpoint.into()).await?;
Metrics::get_metric(metrics_map, metric_name.as_ref())
}
}
@@ -6,29 +6,29 @@ use tracing::trace;
use crate::network::node::NetworkNode;
pub(crate) async fn verify_nodes(nodes: &[&NetworkNode]) -> Result<(), anyhow::Error> {
timeout(Duration::from_secs(90), check_nodes(nodes))
.await
.map_err(|_| anyhow::anyhow!("one or more nodes are not ready!"))
timeout(Duration::from_secs(90), check_nodes(nodes))
.await
.map_err(|_| anyhow::anyhow!("one or more nodes are not ready!"))
}
// TODO: we should inject in someway the logic to make the request
// in order to allow us to `mock` and easily test this.
// maybe moved to the provider with a NodeStatus, and some helpers like wait_running, wait_ready, etc... ? to be discussed
async fn check_nodes(nodes: &[&NetworkNode]) {
loop {
let tasks: Vec<_> = nodes
.iter()
.map(|node| {
trace!("🔎 checking node: {} ", node.name);
reqwest::get(node.prometheus_uri.clone())
})
.collect();
loop {
let tasks: Vec<_> = nodes
.iter()
.map(|node| {
trace!("🔎 checking node: {} ", node.name);
reqwest::get(node.prometheus_uri.clone())
})
.collect();
let all_ready = futures::future::try_join_all(tasks).await;
if all_ready.is_ok() {
return;
}
let all_ready = futures::future::try_join_all(tasks).await;
if all_ready.is_ok() {
return;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
}
}
@@ -1,6 +1,6 @@
use std::{
collections::{hash_map::Entry, HashMap},
sync::Arc,
collections::{hash_map::Entry, HashMap},
sync::Arc,
};
use configuration::{GlobalSettings, HrmpChannelConfig, NetworkConfig};
@@ -20,311 +20,289 @@ use self::{node::NodeSpec, relaychain::RelaychainSpec, teyrchain::TeyrchainSpec}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NetworkSpec {
/// Relaychain configuration.
pub(crate) relaychain: RelaychainSpec,
/// Relaychain configuration.
pub(crate) relaychain: RelaychainSpec,
/// Parachains configurations.
pub(crate) parachains: Vec<TeyrchainSpec>,
/// Parachains configurations.
pub(crate) parachains: Vec<TeyrchainSpec>,
/// HRMP channels configurations.
pub(crate) hrmp_channels: Vec<HrmpChannelConfig>,
/// HRMP channels configurations.
pub(crate) hrmp_channels: Vec<HrmpChannelConfig>,
/// Global settings
pub(crate) global_settings: GlobalSettings,
/// Global settings
pub(crate) global_settings: GlobalSettings,
}
impl NetworkSpec {
pub async fn from_config(
network_config: &NetworkConfig,
) -> Result<NetworkSpec, OrchestratorError> {
let mut errs = vec![];
let relaychain = RelaychainSpec::from_config(network_config.relaychain())?;
let mut parachains = vec![];
pub async fn from_config(
network_config: &NetworkConfig,
) -> Result<NetworkSpec, OrchestratorError> {
let mut errs = vec![];
let relaychain = RelaychainSpec::from_config(network_config.relaychain())?;
let mut parachains = vec![];
// TODO: move to `fold` or map+fold
for para_config in network_config.parachains() {
match TeyrchainSpec::from_config(para_config, relaychain.chain.clone()) {
Ok(para) => parachains.push(para),
Err(err) => errs.push(err),
}
}
// TODO: move to `fold` or map+fold
for para_config in network_config.parachains() {
match TeyrchainSpec::from_config(para_config, relaychain.chain.clone()) {
Ok(para) => parachains.push(para),
Err(err) => errs.push(err),
}
}
if errs.is_empty() {
Ok(NetworkSpec {
relaychain,
parachains,
hrmp_channels: network_config
.hrmp_channels()
.into_iter()
.cloned()
.collect(),
global_settings: network_config.global_settings().clone(),
})
} else {
let errs_str = errs
.into_iter()
.map(|e| e.to_string())
.collect::<Vec<String>>()
.join("\n");
Err(OrchestratorError::InvalidConfig(errs_str))
}
}
if errs.is_empty() {
Ok(NetworkSpec {
relaychain,
parachains,
hrmp_channels: network_config.hrmp_channels().into_iter().cloned().collect(),
global_settings: network_config.global_settings().clone(),
})
} else {
let errs_str =
errs.into_iter().map(|e| e.to_string()).collect::<Vec<String>>().join("\n");
Err(OrchestratorError::InvalidConfig(errs_str))
}
}
pub async fn populate_nodes_available_args(
&mut self,
ns: Arc<dyn ProviderNamespace + Send + Sync>,
) -> Result<(), OrchestratorError> {
let network_nodes = self.collect_network_nodes();
pub async fn populate_nodes_available_args(
&mut self,
ns: Arc<dyn ProviderNamespace + Send + Sync>,
) -> Result<(), OrchestratorError> {
let network_nodes = self.collect_network_nodes();
let mut image_command_to_nodes_mapping =
Self::create_image_command_to_nodes_mapping(network_nodes);
let mut image_command_to_nodes_mapping =
Self::create_image_command_to_nodes_mapping(network_nodes);
let available_args_outputs =
Self::retrieve_all_nodes_available_args_output(ns, &image_command_to_nodes_mapping)
.await?;
let available_args_outputs =
Self::retrieve_all_nodes_available_args_output(ns, &image_command_to_nodes_mapping)
.await?;
Self::update_nodes_available_args_output(
&mut image_command_to_nodes_mapping,
available_args_outputs,
);
Self::update_nodes_available_args_output(
&mut image_command_to_nodes_mapping,
available_args_outputs,
);
Ok(())
}
Ok(())
}
//
pub async fn node_available_args_output(
&self,
node_spec: &NodeSpec,
ns: Arc<dyn ProviderNamespace + Send + Sync>,
) -> Result<String, ProviderError> {
// try to find a node that use the same combination of image/cmd
let cmp_fn = |ad_hoc: &&NodeSpec| -> bool {
ad_hoc.image == node_spec.image && ad_hoc.command == node_spec.command
};
//
pub async fn node_available_args_output(
&self,
node_spec: &NodeSpec,
ns: Arc<dyn ProviderNamespace + Send + Sync>,
) -> Result<String, ProviderError> {
// try to find a node that use the same combination of image/cmd
let cmp_fn = |ad_hoc: &&NodeSpec| -> bool {
ad_hoc.image == node_spec.image && ad_hoc.command == node_spec.command
};
// check if we already had computed the args output for this cmd/[image]
let node = self.relaychain.nodes.iter().find(cmp_fn);
let node = if let Some(node) = node {
Some(node)
} else {
let node = self
.parachains
.iter()
.find_map(|para| para.collators.iter().find(cmp_fn));
// check if we already had computed the args output for this cmd/[image]
let node = self.relaychain.nodes.iter().find(cmp_fn);
let node = if let Some(node) = node {
Some(node)
} else {
let node = self.parachains.iter().find_map(|para| para.collators.iter().find(cmp_fn));
node
};
node
};
let output = if let Some(node) = node {
node.available_args_output.clone().expect(&format!(
"args_output should be set for running nodes {THIS_IS_A_BUG}"
))
} else {
// we need to compute the args output
let image = node_spec
.image
.as_ref()
.map(|image| image.as_str().to_string());
let command = node_spec.command.as_str().to_string();
let output = if let Some(node) = node {
node.available_args_output
.clone()
.expect(&format!("args_output should be set for running nodes {THIS_IS_A_BUG}"))
} else {
// we need to compute the args output
let image = node_spec.image.as_ref().map(|image| image.as_str().to_string());
let command = node_spec.command.as_str().to_string();
ns.get_node_available_args((command, image)).await?
};
ns.get_node_available_args((command, image)).await?
};
Ok(output)
}
Ok(output)
}
pub fn relaychain(&self) -> &RelaychainSpec {
&self.relaychain
}
pub fn relaychain(&self) -> &RelaychainSpec {
&self.relaychain
}
pub fn relaychain_mut(&mut self) -> &mut RelaychainSpec {
&mut self.relaychain
}
pub fn relaychain_mut(&mut self) -> &mut RelaychainSpec {
&mut self.relaychain
}
pub fn parachains_iter(&self) -> impl Iterator<Item = &TeyrchainSpec> {
self.parachains.iter()
}
pub fn parachains_iter(&self) -> impl Iterator<Item = &TeyrchainSpec> {
self.parachains.iter()
}
pub fn parachains_iter_mut(&mut self) -> impl Iterator<Item = &mut TeyrchainSpec> {
self.parachains.iter_mut()
}
pub fn parachains_iter_mut(&mut self) -> impl Iterator<Item = &mut TeyrchainSpec> {
self.parachains.iter_mut()
}
pub fn set_global_settings(&mut self, global_settings: GlobalSettings) {
self.global_settings = global_settings;
}
pub fn set_global_settings(&mut self, global_settings: GlobalSettings) {
self.global_settings = global_settings;
}
pub async fn build_parachain_artifacts<'a, T: FileSystem>(
&mut self,
ns: DynNamespace,
scoped_fs: &ScopedFilesystem<'a, T>,
relaychain_id: &str,
base_dir_exists: bool,
) -> Result<(), anyhow::Error> {
for para in self.parachains.iter_mut() {
let chain_spec_raw_path = para.build_chain_spec(relaychain_id, &ns, scoped_fs).await?;
pub async fn build_parachain_artifacts<'a, T: FileSystem>(
&mut self,
ns: DynNamespace,
scoped_fs: &ScopedFilesystem<'a, T>,
relaychain_id: &str,
base_dir_exists: bool,
) -> Result<(), anyhow::Error> {
for para in self.parachains.iter_mut() {
let chain_spec_raw_path = para.build_chain_spec(relaychain_id, &ns, scoped_fs).await?;
trace!("creating dirs for {}", &para.unique_id);
if base_dir_exists {
scoped_fs.create_dir_all(&para.unique_id).await?;
} else {
scoped_fs.create_dir(&para.unique_id).await?;
};
trace!("created dirs for {}", &para.unique_id);
trace!("creating dirs for {}", &para.unique_id);
if base_dir_exists {
scoped_fs.create_dir_all(&para.unique_id).await?;
} else {
scoped_fs.create_dir(&para.unique_id).await?;
};
trace!("created dirs for {}", &para.unique_id);
// create wasm/state
para.genesis_state
.build(
chain_spec_raw_path.clone(),
format!("{}/genesis-state", para.unique_id),
&ns,
scoped_fs,
None,
)
.await?;
debug!("parachain genesis state built!");
para.genesis_wasm
.build(
chain_spec_raw_path,
format!("{}/genesis-wasm", para.unique_id),
&ns,
scoped_fs,
None,
)
.await?;
debug!("parachain genesis wasm built!");
}
// create wasm/state
para.genesis_state
.build(
chain_spec_raw_path.clone(),
format!("{}/genesis-state", para.unique_id),
&ns,
scoped_fs,
None,
)
.await?;
debug!("parachain genesis state built!");
para.genesis_wasm
.build(
chain_spec_raw_path,
format!("{}/genesis-wasm", para.unique_id),
&ns,
scoped_fs,
None,
)
.await?;
debug!("parachain genesis wasm built!");
}
Ok(())
}
Ok(())
}
// collect mutable references to all nodes from relaychain and parachains
fn collect_network_nodes(&mut self) -> Vec<&mut NodeSpec> {
vec![
self.relaychain.nodes.iter_mut().collect::<Vec<_>>(),
self.parachains
.iter_mut()
.flat_map(|para| para.collators.iter_mut())
.collect(),
]
.into_iter()
.flatten()
.collect::<Vec<_>>()
}
// collect mutable references to all nodes from relaychain and parachains
fn collect_network_nodes(&mut self) -> Vec<&mut NodeSpec> {
vec![
self.relaychain.nodes.iter_mut().collect::<Vec<_>>(),
self.parachains.iter_mut().flat_map(|para| para.collators.iter_mut()).collect(),
]
.into_iter()
.flatten()
.collect::<Vec<_>>()
}
// initialize the mapping of all possible node image/commands to corresponding nodes
fn create_image_command_to_nodes_mapping(
network_nodes: Vec<&mut NodeSpec>,
) -> HashMap<(Option<String>, String), Vec<&mut NodeSpec>> {
network_nodes.into_iter().fold(
HashMap::new(),
|mut acc: HashMap<(Option<String>, String), Vec<&mut node::NodeSpec>>, node| {
// build mapping key using image and command if image is present or command only
let key = node
.image
.as_ref()
.map(|image| {
(
Some(image.as_str().to_string()),
node.command.as_str().to_string(),
)
})
.unwrap_or_else(|| (None, node.command.as_str().to_string()));
// initialize the mapping of all possible node image/commands to corresponding nodes
fn create_image_command_to_nodes_mapping(
network_nodes: Vec<&mut NodeSpec>,
) -> HashMap<(Option<String>, String), Vec<&mut NodeSpec>> {
network_nodes.into_iter().fold(
HashMap::new(),
|mut acc: HashMap<(Option<String>, String), Vec<&mut node::NodeSpec>>, node| {
// build mapping key using image and command if image is present or command only
let key = node
.image
.as_ref()
.map(|image| {
(Some(image.as_str().to_string()), node.command.as_str().to_string())
})
.unwrap_or_else(|| (None, node.command.as_str().to_string()));
// append the node to the vector of nodes for this image/command tuple
if let Entry::Vacant(entry) = acc.entry(key.clone()) {
entry.insert(vec![node]);
} else {
acc.get_mut(&key).unwrap().push(node);
}
// append the node to the vector of nodes for this image/command tuple
if let Entry::Vacant(entry) = acc.entry(key.clone()) {
entry.insert(vec![node]);
} else {
acc.get_mut(&key).unwrap().push(node);
}
acc
},
)
}
acc
},
)
}
async fn retrieve_all_nodes_available_args_output(
ns: Arc<dyn ProviderNamespace + Send + Sync>,
image_command_to_nodes_mapping: &HashMap<(Option<String>, String), Vec<&mut NodeSpec>>,
) -> Result<Vec<(Option<String>, String, String)>, OrchestratorError> {
try_join_all(
image_command_to_nodes_mapping
.keys()
.map(|(image, command)| {
let ns = ns.clone();
let image = image.clone();
let command = command.clone();
async move {
// get node available args output from image/command
let available_args = ns
.get_node_available_args((command.clone(), image.clone()))
.await?;
debug!(
"retrieved available args for image: {:?}, command: {}",
image, command
);
async fn retrieve_all_nodes_available_args_output(
ns: Arc<dyn ProviderNamespace + Send + Sync>,
image_command_to_nodes_mapping: &HashMap<(Option<String>, String), Vec<&mut NodeSpec>>,
) -> Result<Vec<(Option<String>, String, String)>, OrchestratorError> {
try_join_all(
image_command_to_nodes_mapping
.keys()
.map(|(image, command)| {
let ns = ns.clone();
let image = image.clone();
let command = command.clone();
async move {
// get node available args output from image/command
let available_args =
ns.get_node_available_args((command.clone(), image.clone())).await?;
debug!(
"retrieved available args for image: {:?}, command: {}",
image, command
);
// map the result to include image and command
Ok::<_, OrchestratorError>((image, command, available_args))
}
})
.collect::<Vec<_>>(),
)
.await
}
// map the result to include image and command
Ok::<_, OrchestratorError>((image, command, available_args))
}
})
.collect::<Vec<_>>(),
)
.await
}
fn update_nodes_available_args_output(
image_command_to_nodes_mapping: &mut HashMap<(Option<String>, String), Vec<&mut NodeSpec>>,
available_args_outputs: Vec<(Option<String>, String, String)>,
) {
for (image, command, available_args_output) in available_args_outputs {
let nodes = image_command_to_nodes_mapping
.get_mut(&(image, command))
.expect(&format!(
"node image/command key should exist {THIS_IS_A_BUG}"
));
fn update_nodes_available_args_output(
image_command_to_nodes_mapping: &mut HashMap<(Option<String>, String), Vec<&mut NodeSpec>>,
available_args_outputs: Vec<(Option<String>, String, String)>,
) {
for (image, command, available_args_output) in available_args_outputs {
let nodes = image_command_to_nodes_mapping
.get_mut(&(image, command))
.expect(&format!("node image/command key should exist {THIS_IS_A_BUG}"));
for node in nodes {
node.available_args_output = Some(available_args_output.clone());
}
}
}
for node in nodes {
node.available_args_output = Some(available_args_output.clone());
}
}
}
}
#[cfg(test)]
mod tests {
#[tokio::test]
async fn small_network_config_get_spec() {
use configuration::NetworkConfigBuilder;
#[tokio::test]
async fn small_network_config_get_spec() {
use configuration::NetworkConfigBuilder;
use super::*;
use super::*;
let config = NetworkConfigBuilder::new()
.with_relaychain(|r| {
r.with_chain("rococo-local")
.with_default_command("polkadot")
.with_validator(|node| node.with_name("alice"))
.with_fullnode(|node| node.with_name("bob").with_command("polkadot1"))
})
.with_parachain(|p| {
p.with_id(100)
.with_default_command("adder-collator")
.with_collator(|c| c.with_name("collator1"))
})
.build()
.unwrap();
let config = NetworkConfigBuilder::new()
.with_relaychain(|r| {
r.with_chain("rococo-local")
.with_default_command("polkadot")
.with_validator(|node| node.with_name("alice"))
.with_fullnode(|node| node.with_name("bob").with_command("polkadot1"))
})
.with_parachain(|p| {
p.with_id(100)
.with_default_command("adder-collator")
.with_collator(|c| c.with_name("collator1"))
})
.build()
.unwrap();
let network_spec = NetworkSpec::from_config(&config).await.unwrap();
let alice = network_spec.relaychain.nodes.first().unwrap();
let bob = network_spec.relaychain.nodes.get(1).unwrap();
assert_eq!(alice.command.as_str(), "polkadot");
assert_eq!(bob.command.as_str(), "polkadot1");
assert!(alice.is_validator);
assert!(!bob.is_validator);
let network_spec = NetworkSpec::from_config(&config).await.unwrap();
let alice = network_spec.relaychain.nodes.first().unwrap();
let bob = network_spec.relaychain.nodes.get(1).unwrap();
assert_eq!(alice.command.as_str(), "polkadot");
assert_eq!(bob.command.as_str(), "polkadot1");
assert!(alice.is_validator);
assert!(!bob.is_validator);
// paras
assert_eq!(network_spec.parachains.len(), 1);
let para_100 = network_spec.parachains.first().unwrap();
assert_eq!(para_100.id, 100);
}
// paras
assert_eq!(network_spec.parachains.len(), 1);
let para_100 = network_spec.parachains.first().unwrap();
assert_eq!(para_100.id, 100);
}
}
@@ -1,9 +1,9 @@
use std::path::PathBuf;
use configuration::shared::{
node::{EnvVar, NodeConfig},
resources::Resources,
types::{Arg, AssetLocation, Command, Image},
node::{EnvVar, NodeConfig},
resources::Resources,
types::{Arg, AssetLocation, Command, Image},
};
use multiaddr::Multiaddr;
use provider::types::Port;
@@ -11,39 +11,39 @@ use serde::{Deserialize, Serialize};
use support::constants::THIS_IS_A_BUG;
use crate::{
errors::OrchestratorError,
generators,
network::AddNodeOptions,
shared::{
macros,
types::{ChainDefaultContext, NodeAccount, NodeAccounts, ParkedPort},
},
AddCollatorOptions,
errors::OrchestratorError,
generators,
network::AddNodeOptions,
shared::{
macros,
types::{ChainDefaultContext, NodeAccount, NodeAccounts, ParkedPort},
},
AddCollatorOptions,
};
macros::create_add_options!(AddNodeSpecOpts {
override_eth_key: Option<String>
override_eth_key: Option<String>
});
macro_rules! impl_from_for_add_node_opts {
($struct:ident) => {
impl From<$struct> for AddNodeSpecOpts {
fn from(value: $struct) -> Self {
Self {
image: value.image,
command: value.command,
subcommand: value.subcommand,
args: value.args,
env: value.env,
is_validator: value.is_validator,
rpc_port: value.rpc_port,
prometheus_port: value.prometheus_port,
p2p_port: value.p2p_port,
override_eth_key: value.override_eth_key,
}
}
}
};
($struct:ident) => {
impl From<$struct> for AddNodeSpecOpts {
fn from(value: $struct) -> Self {
Self {
image: value.image,
command: value.command,
subcommand: value.subcommand,
args: value.args,
env: value.env,
is_validator: value.is_validator,
rpc_port: value.rpc_port,
prometheus_port: value.prometheus_port,
p2p_port: value.p2p_port,
override_eth_key: value.override_eth_key,
}
}
}
};
}
impl_from_for_add_node_opts!(AddNodeOptions);
@@ -52,305 +52,281 @@ impl_from_for_add_node_opts!(AddCollatorOptions);
/// A node configuration, with fine-grained configuration options.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct NodeSpec {
// Node name (should be unique or an index will be appended).
pub(crate) name: String,
// Node name (should be unique or an index will be appended).
pub(crate) name: String,
/// Node key, used for compute the p2p identity.
pub(crate) key: String,
/// Node key, used for compute the p2p identity.
pub(crate) key: String,
// libp2p local identity
pub(crate) peer_id: String,
// libp2p local identity
pub(crate) peer_id: String,
/// Accounts to be injected in the keystore.
pub(crate) accounts: NodeAccounts,
/// Accounts to be injected in the keystore.
pub(crate) accounts: NodeAccounts,
/// Image to run (only podman/k8s). Override the default.
pub(crate) image: Option<Image>,
/// Image to run (only podman/k8s). Override the default.
pub(crate) image: Option<Image>,
/// Command to run the node. Override the default.
pub(crate) command: Command,
/// Command to run the node. Override the default.
pub(crate) command: Command,
/// Optional subcommand for the node.
pub(crate) subcommand: Option<Command>,
/// Optional subcommand for the node.
pub(crate) subcommand: Option<Command>,
/// Arguments to use for node. Appended to default.
pub(crate) args: Vec<Arg>,
/// Arguments to use for node. Appended to default.
pub(crate) args: Vec<Arg>,
// The help command output containing the available arguments.
pub(crate) available_args_output: Option<String>,
// The help command output containing the available arguments.
pub(crate) available_args_output: Option<String>,
/// Wether the node is a validator.
pub(crate) is_validator: bool,
/// Wether the node is a validator.
pub(crate) is_validator: bool,
/// Whether the node keys must be added to invulnerables.
pub(crate) is_invulnerable: bool,
/// Whether the node keys must be added to invulnerables.
pub(crate) is_invulnerable: bool,
/// Whether the node is a bootnode.
pub(crate) is_bootnode: bool,
/// Whether the node is a bootnode.
pub(crate) is_bootnode: bool,
/// Node initial balance present in genesis.
pub(crate) initial_balance: u128,
/// Node initial balance present in genesis.
pub(crate) initial_balance: u128,
/// Environment variables to set (inside pod for podman/k8s, inside shell for native).
pub(crate) env: Vec<EnvVar>,
/// Environment variables to set (inside pod for podman/k8s, inside shell for native).
pub(crate) env: Vec<EnvVar>,
/// List of node's bootnodes addresses to use. Appended to default.
pub(crate) bootnodes_addresses: Vec<Multiaddr>,
/// List of node's bootnodes addresses to use. Appended to default.
pub(crate) bootnodes_addresses: Vec<Multiaddr>,
/// Default resources. Override the default.
pub(crate) resources: Option<Resources>,
/// Default resources. Override the default.
pub(crate) resources: Option<Resources>,
/// Websocket port to use.
pub(crate) ws_port: ParkedPort,
/// Websocket port to use.
pub(crate) ws_port: ParkedPort,
/// RPC port to use.
pub(crate) rpc_port: ParkedPort,
/// RPC port to use.
pub(crate) rpc_port: ParkedPort,
/// Prometheus port to use.
pub(crate) prometheus_port: ParkedPort,
/// Prometheus port to use.
pub(crate) prometheus_port: ParkedPort,
/// P2P port to use.
pub(crate) p2p_port: ParkedPort,
/// P2P port to use.
pub(crate) p2p_port: ParkedPort,
/// libp2p cert hash to use with `webrtc` transport.
pub(crate) p2p_cert_hash: Option<String>,
/// libp2p cert hash to use with `webrtc` transport.
pub(crate) p2p_cert_hash: Option<String>,
/// Database snapshot. Override the default.
pub(crate) db_snapshot: Option<AssetLocation>,
/// Database snapshot. Override the default.
pub(crate) db_snapshot: Option<AssetLocation>,
/// P2P port to use by full node if this is the case
pub(crate) full_node_p2p_port: Option<ParkedPort>,
/// Prometheus port to use by full node if this is the case
pub(crate) full_node_prometheus_port: Option<ParkedPort>,
/// P2P port to use by full node if this is the case
pub(crate) full_node_p2p_port: Option<ParkedPort>,
/// Prometheus port to use by full node if this is the case
pub(crate) full_node_prometheus_port: Option<ParkedPort>,
/// Optionally specify a log path for the node
pub(crate) node_log_path: Option<PathBuf>,
/// Optionally specify a log path for the node
pub(crate) node_log_path: Option<PathBuf>,
/// Optionally specify a keystore path for the node
pub(crate) keystore_path: Option<PathBuf>,
/// Optionally specify a keystore path for the node
pub(crate) keystore_path: Option<PathBuf>,
/// Keystore key types to generate.
/// Supports short form (e.g., "audi") using predefined schemas,
/// or long form (e.g., "audi_sr") with explicit schema (sr, ed, ec).
pub(crate) keystore_key_types: Vec<String>,
/// Keystore key types to generate.
/// Supports short form (e.g., "audi") using predefined schemas,
/// or long form (e.g., "audi_sr") with explicit schema (sr, ed, ec).
pub(crate) keystore_key_types: Vec<String>,
}
impl NodeSpec {
pub fn from_config(
node_config: &NodeConfig,
chain_context: &ChainDefaultContext,
full_node_present: bool,
evm_based: bool,
) -> Result<Self, OrchestratorError> {
// Check first if the image is set at node level, then try with the default
let image = node_config.image().or(chain_context.default_image).cloned();
pub fn from_config(
node_config: &NodeConfig,
chain_context: &ChainDefaultContext,
full_node_present: bool,
evm_based: bool,
) -> Result<Self, OrchestratorError> {
// Check first if the image is set at node level, then try with the default
let image = node_config.image().or(chain_context.default_image).cloned();
// Check first if the command is set at node level, then try with the default
let command = if let Some(cmd) = node_config.command() {
cmd.clone()
} else if let Some(cmd) = chain_context.default_command {
cmd.clone()
} else {
return Err(OrchestratorError::InvalidNodeConfig(
node_config.name().into(),
"command".to_string(),
));
};
// Check first if the command is set at node level, then try with the default
let command = if let Some(cmd) = node_config.command() {
cmd.clone()
} else if let Some(cmd) = chain_context.default_command {
cmd.clone()
} else {
return Err(OrchestratorError::InvalidNodeConfig(
node_config.name().into(),
"command".to_string(),
));
};
let subcommand = node_config.subcommand().cloned();
let subcommand = node_config.subcommand().cloned();
// If `args` is set at `node` level use them
// otherwise use the default_args (can be empty).
let args: Vec<Arg> = if node_config.args().is_empty() {
chain_context
.default_args
.iter()
.map(|x| x.to_owned().clone())
.collect()
} else {
node_config.args().into_iter().cloned().collect()
};
// If `args` is set at `node` level use them
// otherwise use the default_args (can be empty).
let args: Vec<Arg> = if node_config.args().is_empty() {
chain_context.default_args.iter().map(|x| x.to_owned().clone()).collect()
} else {
node_config.args().into_iter().cloned().collect()
};
let (key, peer_id) = generators::generate_node_identity(node_config.name())?;
let (key, peer_id) = generators::generate_node_identity(node_config.name())?;
let mut name = node_config.name().to_string();
let seed = format!("//{}{name}", name.remove(0).to_uppercase());
let accounts = generators::generate_node_keys(&seed)?;
let mut accounts = NodeAccounts { seed, accounts };
let mut name = node_config.name().to_string();
let seed = format!("//{}{name}", name.remove(0).to_uppercase());
let accounts = generators::generate_node_keys(&seed)?;
let mut accounts = NodeAccounts { seed, accounts };
if evm_based {
if let Some(session_key) = node_config.override_eth_key() {
accounts
.accounts
.insert("eth".into(), NodeAccount::new(session_key, session_key));
}
}
if evm_based {
if let Some(session_key) = node_config.override_eth_key() {
accounts.accounts.insert("eth".into(), NodeAccount::new(session_key, session_key));
}
}
let db_snapshot = match (node_config.db_snapshot(), chain_context.default_db_snapshot) {
(Some(db_snapshot), _) => Some(db_snapshot),
(None, Some(db_snapshot)) => Some(db_snapshot),
_ => None,
};
let db_snapshot = match (node_config.db_snapshot(), chain_context.default_db_snapshot) {
(Some(db_snapshot), _) => Some(db_snapshot),
(None, Some(db_snapshot)) => Some(db_snapshot),
_ => None,
};
let (full_node_p2p_port, full_node_prometheus_port) = if full_node_present {
(
Some(generators::generate_node_port(None)?),
Some(generators::generate_node_port(None)?),
)
} else {
(None, None)
};
let (full_node_p2p_port, full_node_prometheus_port) = if full_node_present {
(
Some(generators::generate_node_port(None)?),
Some(generators::generate_node_port(None)?),
)
} else {
(None, None)
};
Ok(Self {
name: node_config.name().to_string(),
key,
peer_id,
image,
command,
subcommand,
args,
available_args_output: None,
is_validator: node_config.is_validator(),
is_invulnerable: node_config.is_invulnerable(),
is_bootnode: node_config.is_bootnode(),
initial_balance: node_config.initial_balance(),
env: node_config.env().into_iter().cloned().collect(),
bootnodes_addresses: node_config
.bootnodes_addresses()
.into_iter()
.cloned()
.collect(),
resources: node_config.resources().cloned(),
p2p_cert_hash: node_config.p2p_cert_hash().map(str::to_string),
db_snapshot: db_snapshot.cloned(),
accounts,
ws_port: generators::generate_node_port(node_config.ws_port())?,
rpc_port: generators::generate_node_port(node_config.rpc_port())?,
prometheus_port: generators::generate_node_port(node_config.prometheus_port())?,
p2p_port: generators::generate_node_port(node_config.p2p_port())?,
full_node_p2p_port,
full_node_prometheus_port,
node_log_path: node_config.node_log_path().cloned(),
keystore_path: node_config.keystore_path().cloned(),
keystore_key_types: node_config
.keystore_key_types()
.into_iter()
.map(str::to_string)
.collect(),
})
}
Ok(Self {
name: node_config.name().to_string(),
key,
peer_id,
image,
command,
subcommand,
args,
available_args_output: None,
is_validator: node_config.is_validator(),
is_invulnerable: node_config.is_invulnerable(),
is_bootnode: node_config.is_bootnode(),
initial_balance: node_config.initial_balance(),
env: node_config.env().into_iter().cloned().collect(),
bootnodes_addresses: node_config.bootnodes_addresses().into_iter().cloned().collect(),
resources: node_config.resources().cloned(),
p2p_cert_hash: node_config.p2p_cert_hash().map(str::to_string),
db_snapshot: db_snapshot.cloned(),
accounts,
ws_port: generators::generate_node_port(node_config.ws_port())?,
rpc_port: generators::generate_node_port(node_config.rpc_port())?,
prometheus_port: generators::generate_node_port(node_config.prometheus_port())?,
p2p_port: generators::generate_node_port(node_config.p2p_port())?,
full_node_p2p_port,
full_node_prometheus_port,
node_log_path: node_config.node_log_path().cloned(),
keystore_path: node_config.keystore_path().cloned(),
keystore_key_types: node_config
.keystore_key_types()
.into_iter()
.map(str::to_string)
.collect(),
})
}
pub fn from_ad_hoc(
name: impl Into<String>,
options: AddNodeSpecOpts,
chain_context: &ChainDefaultContext,
full_node_present: bool,
evm_based: bool,
) -> Result<Self, OrchestratorError> {
// Check first if the image is set at node level, then try with the default
let image = if let Some(img) = options.image {
Some(img.clone())
} else {
chain_context.default_image.cloned()
};
pub fn from_ad_hoc(
name: impl Into<String>,
options: AddNodeSpecOpts,
chain_context: &ChainDefaultContext,
full_node_present: bool,
evm_based: bool,
) -> Result<Self, OrchestratorError> {
// Check first if the image is set at node level, then try with the default
let image = if let Some(img) = options.image {
Some(img.clone())
} else {
chain_context.default_image.cloned()
};
let name = name.into();
// Check first if the command is set at node level, then try with the default
let command = if let Some(cmd) = options.command {
cmd.clone()
} else if let Some(cmd) = chain_context.default_command {
cmd.clone()
} else {
return Err(OrchestratorError::InvalidNodeConfig(
name,
"command".to_string(),
));
};
let name = name.into();
// Check first if the command is set at node level, then try with the default
let command = if let Some(cmd) = options.command {
cmd.clone()
} else if let Some(cmd) = chain_context.default_command {
cmd.clone()
} else {
return Err(OrchestratorError::InvalidNodeConfig(name, "command".to_string()));
};
let subcommand = options.subcommand.clone();
let subcommand = options.subcommand.clone();
// If `args` is set at `node` level use them
// otherwise use the default_args (can be empty).
let args: Vec<Arg> = if options.args.is_empty() {
chain_context
.default_args
.iter()
.map(|x| x.to_owned().clone())
.collect()
} else {
options.args
};
// If `args` is set at `node` level use them
// otherwise use the default_args (can be empty).
let args: Vec<Arg> = if options.args.is_empty() {
chain_context.default_args.iter().map(|x| x.to_owned().clone()).collect()
} else {
options.args
};
let (key, peer_id) = generators::generate_node_identity(&name)?;
let (key, peer_id) = generators::generate_node_identity(&name)?;
let mut name_capitalized = name.clone();
let seed = format!(
"//{}{name_capitalized}",
name_capitalized.remove(0).to_uppercase()
);
let accounts = generators::generate_node_keys(&seed)?;
let mut accounts = NodeAccounts { seed, accounts };
let mut name_capitalized = name.clone();
let seed = format!("//{}{name_capitalized}", name_capitalized.remove(0).to_uppercase());
let accounts = generators::generate_node_keys(&seed)?;
let mut accounts = NodeAccounts { seed, accounts };
if evm_based {
if let Some(session_key) = options.override_eth_key.as_ref() {
accounts
.accounts
.insert("eth".into(), NodeAccount::new(session_key, session_key));
}
}
if evm_based {
if let Some(session_key) = options.override_eth_key.as_ref() {
accounts.accounts.insert("eth".into(), NodeAccount::new(session_key, session_key));
}
}
let (full_node_p2p_port, full_node_prometheus_port) = if full_node_present {
(
Some(generators::generate_node_port(None)?),
Some(generators::generate_node_port(None)?),
)
} else {
(None, None)
};
let (full_node_p2p_port, full_node_prometheus_port) = if full_node_present {
(
Some(generators::generate_node_port(None)?),
Some(generators::generate_node_port(None)?),
)
} else {
(None, None)
};
//
Ok(Self {
name,
key,
peer_id,
image,
command,
subcommand,
args,
available_args_output: None,
is_validator: options.is_validator,
is_invulnerable: false,
is_bootnode: false,
initial_balance: 0,
env: options.env,
bootnodes_addresses: vec![],
resources: None,
p2p_cert_hash: None,
db_snapshot: None,
accounts,
// should be deprecated now!
ws_port: generators::generate_node_port(None)?,
rpc_port: generators::generate_node_port(options.rpc_port)?,
prometheus_port: generators::generate_node_port(options.prometheus_port)?,
p2p_port: generators::generate_node_port(options.p2p_port)?,
full_node_p2p_port,
full_node_prometheus_port,
node_log_path: None,
keystore_path: None,
keystore_key_types: vec![],
})
}
//
Ok(Self {
name,
key,
peer_id,
image,
command,
subcommand,
args,
available_args_output: None,
is_validator: options.is_validator,
is_invulnerable: false,
is_bootnode: false,
initial_balance: 0,
env: options.env,
bootnodes_addresses: vec![],
resources: None,
p2p_cert_hash: None,
db_snapshot: None,
accounts,
// should be deprecated now!
ws_port: generators::generate_node_port(None)?,
rpc_port: generators::generate_node_port(options.rpc_port)?,
prometheus_port: generators::generate_node_port(options.prometheus_port)?,
p2p_port: generators::generate_node_port(options.p2p_port)?,
full_node_p2p_port,
full_node_prometheus_port,
node_log_path: None,
keystore_path: None,
keystore_key_types: vec![],
})
}
pub(crate) fn supports_arg(&self, arg: impl AsRef<str>) -> bool {
self.available_args_output
.as_ref()
.expect(&format!(
"available args should be present at this point {THIS_IS_A_BUG}"
))
.contains(arg.as_ref())
}
pub(crate) fn supports_arg(&self, arg: impl AsRef<str>) -> bool {
self.available_args_output
.as_ref()
.expect(&format!("available args should be present at this point {THIS_IS_A_BUG}"))
.contains(arg.as_ref())
}
pub fn command(&self) -> &str {
self.command.as_str()
}
pub fn command(&self) -> &str {
self.command.as_str()
}
}
@@ -1,181 +1,181 @@
use std::collections::{HashMap, HashSet};
use configuration::{
shared::{
helpers::generate_unique_node_name_from_names,
resources::Resources,
types::{Arg, AssetLocation, Chain, Command, Image},
},
types::JsonOverrides,
NodeConfig, RelaychainConfig,
shared::{
helpers::generate_unique_node_name_from_names,
resources::Resources,
types::{Arg, AssetLocation, Chain, Command, Image},
},
types::JsonOverrides,
NodeConfig, RelaychainConfig,
};
use serde::{Deserialize, Serialize};
use support::replacer::apply_replacements;
use super::node::NodeSpec;
use crate::{
errors::OrchestratorError,
generators::chain_spec::{ChainSpec, Context},
shared::{constants::DEFAULT_CHAIN_SPEC_TPL_COMMAND, types::ChainDefaultContext},
errors::OrchestratorError,
generators::chain_spec::{ChainSpec, Context},
shared::{constants::DEFAULT_CHAIN_SPEC_TPL_COMMAND, types::ChainDefaultContext},
};
/// A relaychain configuration spec
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RelaychainSpec {
/// Chain to use (e.g. rococo-local).
pub(crate) chain: Chain,
/// Chain to use (e.g. rococo-local).
pub(crate) chain: Chain,
/// Default command to run the node. Can be overridden on each node.
pub(crate) default_command: Option<Command>,
/// Default command to run the node. Can be overridden on each node.
pub(crate) default_command: Option<Command>,
/// Default image to use (only podman/k8s). Can be overridden on each node.
pub(crate) default_image: Option<Image>,
/// Default image to use (only podman/k8s). Can be overridden on each node.
pub(crate) default_image: Option<Image>,
/// Default resources. Can be overridden on each node.
pub(crate) default_resources: Option<Resources>,
/// Default resources. Can be overridden on each node.
pub(crate) default_resources: Option<Resources>,
/// Default database snapshot. Can be overridden on each node.
pub(crate) default_db_snapshot: Option<AssetLocation>,
/// Default database snapshot. Can be overridden on each node.
pub(crate) default_db_snapshot: Option<AssetLocation>,
/// Default arguments to use in nodes. Can be overridden on each node.
pub(crate) default_args: Vec<Arg>,
/// Default arguments to use in nodes. Can be overridden on each node.
pub(crate) default_args: Vec<Arg>,
// chain_spec_path: Option<AssetLocation>,
pub(crate) chain_spec: ChainSpec,
// chain_spec_path: Option<AssetLocation>,
pub(crate) chain_spec: ChainSpec,
/// Set the count of nominators to generator (used with PoS networks).
pub(crate) random_nominators_count: u32,
/// Set the count of nominators to generator (used with PoS networks).
pub(crate) random_nominators_count: u32,
/// Set the max nominators value (used with PoS networks).
pub(crate) max_nominations: u8,
/// Set the max nominators value (used with PoS networks).
pub(crate) max_nominations: u8,
/// Genesis overrides as JSON value.
pub(crate) runtime_genesis_patch: Option<serde_json::Value>,
/// Genesis overrides as JSON value.
pub(crate) runtime_genesis_patch: Option<serde_json::Value>,
/// Wasm override path/url to use.
pub(crate) wasm_override: Option<AssetLocation>,
/// Wasm override path/url to use.
pub(crate) wasm_override: Option<AssetLocation>,
/// Nodes to run.
pub(crate) nodes: Vec<NodeSpec>,
/// Nodes to run.
pub(crate) nodes: Vec<NodeSpec>,
/// Raw chain-spec override path, url or inline json to use.
pub(crate) raw_spec_override: Option<JsonOverrides>,
/// Raw chain-spec override path, url or inline json to use.
pub(crate) raw_spec_override: Option<JsonOverrides>,
}
impl RelaychainSpec {
pub fn from_config(config: &RelaychainConfig) -> Result<RelaychainSpec, OrchestratorError> {
// Relaychain main command to use, in order:
// set as `default_command` or
// use the command of the first node.
// If non of those is set, return an error.
let main_cmd = config
.default_command()
.or(config.nodes().first().and_then(|node| node.command()))
.ok_or(OrchestratorError::InvalidConfig(
"Relaychain, either default_command or first node with a command needs to be set."
.to_string(),
))?;
pub fn from_config(config: &RelaychainConfig) -> Result<RelaychainSpec, OrchestratorError> {
// Relaychain main command to use, in order:
// set as `default_command` or
// use the command of the first node.
// If non of those is set, return an error.
let main_cmd = config
.default_command()
.or(config.nodes().first().and_then(|node| node.command()))
.ok_or(OrchestratorError::InvalidConfig(
"Relaychain, either default_command or first node with a command needs to be set."
.to_string(),
))?;
// TODO: internally we use image as String
let main_image = config
.default_image()
.or(config.nodes().first().and_then(|node| node.image()))
.map(|image| image.as_str().to_string());
// TODO: internally we use image as String
let main_image = config
.default_image()
.or(config.nodes().first().and_then(|node| node.image()))
.map(|image| image.as_str().to_string());
let replacements = HashMap::from([
("disableBootnodes", "--disable-default-bootnode"),
("mainCommand", main_cmd.as_str()),
]);
let tmpl = if let Some(tmpl) = config.chain_spec_command() {
apply_replacements(tmpl, &replacements)
} else {
apply_replacements(DEFAULT_CHAIN_SPEC_TPL_COMMAND, &replacements)
};
let replacements = HashMap::from([
("disableBootnodes", "--disable-default-bootnode"),
("mainCommand", main_cmd.as_str()),
]);
let tmpl = if let Some(tmpl) = config.chain_spec_command() {
apply_replacements(tmpl, &replacements)
} else {
apply_replacements(DEFAULT_CHAIN_SPEC_TPL_COMMAND, &replacements)
};
let chain_spec = ChainSpec::new(config.chain().as_str(), Context::Relay)
.set_chain_name(config.chain().as_str())
.command(
tmpl.as_str(),
config.chain_spec_command_is_local(),
config.chain_spec_command_output_path(),
)
.image(main_image.clone());
let chain_spec = ChainSpec::new(config.chain().as_str(), Context::Relay)
.set_chain_name(config.chain().as_str())
.command(
tmpl.as_str(),
config.chain_spec_command_is_local(),
config.chain_spec_command_output_path(),
)
.image(main_image.clone());
// Add asset location if present
let chain_spec = if let Some(chain_spec_path) = config.chain_spec_path() {
chain_spec.asset_location(chain_spec_path.clone())
} else {
chain_spec
};
// Add asset location if present
let chain_spec = if let Some(chain_spec_path) = config.chain_spec_path() {
chain_spec.asset_location(chain_spec_path.clone())
} else {
chain_spec
};
// add chain-spec runtime if present
let chain_spec = if let Some(chain_spec_runtime) = config.chain_spec_runtime() {
chain_spec.runtime(chain_spec_runtime.clone())
} else {
chain_spec
};
// add chain-spec runtime if present
let chain_spec = if let Some(chain_spec_runtime) = config.chain_spec_runtime() {
chain_spec.runtime(chain_spec_runtime.clone())
} else {
chain_spec
};
// build the `node_specs`
let chain_context = ChainDefaultContext {
default_command: config.default_command(),
default_image: config.default_image(),
default_resources: config.default_resources(),
default_db_snapshot: config.default_db_snapshot(),
default_args: config.default_args(),
};
// build the `node_specs`
let chain_context = ChainDefaultContext {
default_command: config.default_command(),
default_image: config.default_image(),
default_resources: config.default_resources(),
default_db_snapshot: config.default_db_snapshot(),
default_args: config.default_args(),
};
let mut nodes: Vec<NodeConfig> = config.nodes().into_iter().cloned().collect();
nodes.extend(
config
.group_node_configs()
.into_iter()
.flat_map(|node_group| node_group.expand_group_configs()),
);
let mut nodes: Vec<NodeConfig> = config.nodes().into_iter().cloned().collect();
nodes.extend(
config
.group_node_configs()
.into_iter()
.flat_map(|node_group| node_group.expand_group_configs()),
);
let mut names = HashSet::new();
let (nodes, mut errs) = nodes
.iter()
.map(|node_config| NodeSpec::from_config(node_config, &chain_context, false, false))
.fold((vec![], vec![]), |(mut nodes, mut errs), result| {
match result {
Ok(mut node) => {
let unique_name =
generate_unique_node_name_from_names(node.name, &mut names);
node.name = unique_name;
nodes.push(node);
},
Err(err) => errs.push(err),
}
(nodes, errs)
});
let mut names = HashSet::new();
let (nodes, mut errs) = nodes
.iter()
.map(|node_config| NodeSpec::from_config(node_config, &chain_context, false, false))
.fold((vec![], vec![]), |(mut nodes, mut errs), result| {
match result {
Ok(mut node) => {
let unique_name =
generate_unique_node_name_from_names(node.name, &mut names);
node.name = unique_name;
nodes.push(node);
},
Err(err) => errs.push(err),
}
(nodes, errs)
});
if !errs.is_empty() {
// TODO: merge errs, maybe return something like Result<Sometype, Vec<OrchestratorError>>
return Err(errs.swap_remove(0));
}
if !errs.is_empty() {
// TODO: merge errs, maybe return something like Result<Sometype, Vec<OrchestratorError>>
return Err(errs.swap_remove(0));
}
Ok(RelaychainSpec {
chain: config.chain().clone(),
default_command: config.default_command().cloned(),
default_image: config.default_image().cloned(),
default_resources: config.default_resources().cloned(),
default_db_snapshot: config.default_db_snapshot().cloned(),
wasm_override: config.wasm_override().cloned(),
default_args: config.default_args().into_iter().cloned().collect(),
chain_spec,
random_nominators_count: config.random_nominators_count().unwrap_or(0),
max_nominations: config.max_nominations().unwrap_or(24),
runtime_genesis_patch: config.runtime_genesis_patch().cloned(),
nodes,
raw_spec_override: config.raw_spec_override().cloned(),
})
}
Ok(RelaychainSpec {
chain: config.chain().clone(),
default_command: config.default_command().cloned(),
default_image: config.default_image().cloned(),
default_resources: config.default_resources().cloned(),
default_db_snapshot: config.default_db_snapshot().cloned(),
wasm_override: config.wasm_override().cloned(),
default_args: config.default_args().into_iter().cloned().collect(),
chain_spec,
random_nominators_count: config.random_nominators_count().unwrap_or(0),
max_nominations: config.max_nominations().unwrap_or(24),
runtime_genesis_patch: config.runtime_genesis_patch().cloned(),
nodes,
raw_spec_override: config.raw_spec_override().cloned(),
})
}
pub fn chain_spec(&self) -> &ChainSpec {
&self.chain_spec
}
pub fn chain_spec(&self) -> &ChainSpec {
&self.chain_spec
}
pub fn chain_spec_mut(&mut self) -> &mut ChainSpec {
&mut self.chain_spec
}
pub fn chain_spec_mut(&mut self) -> &mut ChainSpec {
&mut self.chain_spec
}
}
@@ -1,12 +1,12 @@
use std::{
collections::{HashMap, HashSet},
path::PathBuf,
collections::{HashMap, HashSet},
path::PathBuf,
};
use configuration::{
shared::{helpers::generate_unique_node_name_from_names, resources::Resources},
types::{Arg, AssetLocation, Chain, Command, Image, JsonOverrides},
NodeConfig, ParachainConfig, RegistrationStrategy,
shared::{helpers::generate_unique_node_name_from_names, resources::Resources},
types::{Arg, AssetLocation, Chain, Command, Image, JsonOverrides},
NodeConfig, ParachainConfig, RegistrationStrategy,
};
use provider::DynNamespace;
use serde::{Deserialize, Serialize};
@@ -15,372 +15,348 @@ use tracing::debug;
use super::node::NodeSpec;
use crate::{
errors::OrchestratorError,
generators::{
chain_spec::{ChainSpec, Context, ParaGenesisConfig},
para_artifact::*,
},
shared::{constants::DEFAULT_CHAIN_SPEC_TPL_COMMAND, types::ChainDefaultContext},
ScopedFilesystem,
errors::OrchestratorError,
generators::{
chain_spec::{ChainSpec, Context, ParaGenesisConfig},
para_artifact::*,
},
shared::{constants::DEFAULT_CHAIN_SPEC_TPL_COMMAND, types::ChainDefaultContext},
ScopedFilesystem,
};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TeyrchainSpec {
// `name` of the parachain (used in some corner cases)
// name: Option<Chain>,
/// Parachain id
pub(crate) id: u32,
// `name` of the parachain (used in some corner cases)
// name: Option<Chain>,
/// Parachain id
pub(crate) id: u32,
/// Unique id of the parachain, in the patter of <para_id>-<n>
/// where the suffix is only present if more than one parachain is set with the same id
pub(crate) unique_id: String,
/// Unique id of the parachain, in the patter of <para_id>-<n>
/// where the suffix is only present if more than one parachain is set with the same id
pub(crate) unique_id: String,
/// Default command to run the node. Can be overridden on each node.
pub(crate) default_command: Option<Command>,
/// Default command to run the node. Can be overridden on each node.
pub(crate) default_command: Option<Command>,
/// Default image to use (only podman/k8s). Can be overridden on each node.
pub(crate) default_image: Option<Image>,
/// Default image to use (only podman/k8s). Can be overridden on each node.
pub(crate) default_image: Option<Image>,
/// Default resources. Can be overridden on each node.
pub(crate) default_resources: Option<Resources>,
/// Default resources. Can be overridden on each node.
pub(crate) default_resources: Option<Resources>,
/// Default database snapshot. Can be overridden on each node.
pub(crate) default_db_snapshot: Option<AssetLocation>,
/// Default database snapshot. Can be overridden on each node.
pub(crate) default_db_snapshot: Option<AssetLocation>,
/// Default arguments to use in nodes. Can be overridden on each node.
pub(crate) default_args: Vec<Arg>,
/// Default arguments to use in nodes. Can be overridden on each node.
pub(crate) default_args: Vec<Arg>,
/// Chain-spec, only needed by cumulus based paras
pub(crate) chain_spec: Option<ChainSpec>,
/// Chain-spec, only needed by cumulus based paras
pub(crate) chain_spec: Option<ChainSpec>,
/// Do not automatically assign a bootnode role if no nodes are marked as bootnodes.
pub(crate) no_default_bootnodes: bool,
/// Do not automatically assign a bootnode role if no nodes are marked as bootnodes.
pub(crate) no_default_bootnodes: bool,
/// Registration strategy to use
pub(crate) registration_strategy: RegistrationStrategy,
/// Registration strategy to use
pub(crate) registration_strategy: RegistrationStrategy,
/// Onboard as parachain or parathread
pub(crate) onboard_as_parachain: bool,
/// Onboard as parachain or parathread
pub(crate) onboard_as_parachain: bool,
/// Is the parachain cumulus-based
pub(crate) is_cumulus_based: bool,
/// Is the parachain cumulus-based
pub(crate) is_cumulus_based: bool,
/// Is the parachain evm-based
pub(crate) is_evm_based: bool,
/// Is the parachain evm-based
pub(crate) is_evm_based: bool,
/// Initial balance
pub(crate) initial_balance: u128,
/// Initial balance
pub(crate) initial_balance: u128,
/// Genesis state (head) to register the parachain
pub(crate) genesis_state: ParaArtifact,
/// Genesis state (head) to register the parachain
pub(crate) genesis_state: ParaArtifact,
/// Genesis WASM to register the parachain
pub(crate) genesis_wasm: ParaArtifact,
/// Genesis WASM to register the parachain
pub(crate) genesis_wasm: ParaArtifact,
/// Genesis overrides as JSON value.
pub(crate) genesis_overrides: Option<serde_json::Value>,
/// Genesis overrides as JSON value.
pub(crate) genesis_overrides: Option<serde_json::Value>,
/// Wasm override path/url to use.
pub(crate) wasm_override: Option<AssetLocation>,
/// Wasm override path/url to use.
pub(crate) wasm_override: Option<AssetLocation>,
/// Collators to spawn
pub(crate) collators: Vec<NodeSpec>,
/// Collators to spawn
pub(crate) collators: Vec<NodeSpec>,
/// Raw chain-spec override path, url or inline json to use.
pub(crate) raw_spec_override: Option<JsonOverrides>,
/// Raw chain-spec override path, url or inline json to use.
pub(crate) raw_spec_override: Option<JsonOverrides>,
/// Bootnodes addresses to use for the parachain nodes
pub(crate) bootnodes_addresses: Vec<multiaddr::Multiaddr>,
/// Bootnodes addresses to use for the parachain nodes
pub(crate) bootnodes_addresses: Vec<multiaddr::Multiaddr>,
}
impl TeyrchainSpec {
pub fn from_config(
config: &ParachainConfig,
relay_chain: Chain,
) -> Result<TeyrchainSpec, OrchestratorError> {
let main_cmd = if let Some(cmd) = config.default_command() {
cmd
} else if let Some(first_node) = config.collators().first() {
let Some(cmd) = first_node.command() else {
return Err(OrchestratorError::InvalidConfig(format!("Parachain {}, either default_command or command in the first node needs to be set.", config.id())));
};
pub fn from_config(
config: &ParachainConfig,
relay_chain: Chain,
) -> Result<TeyrchainSpec, OrchestratorError> {
let main_cmd = if let Some(cmd) = config.default_command() {
cmd
} else if let Some(first_node) = config.collators().first() {
let Some(cmd) = first_node.command() else {
return Err(OrchestratorError::InvalidConfig(format!("Parachain {}, either default_command or command in the first node needs to be set.", config.id())));
};
cmd
} else {
return Err(OrchestratorError::InvalidConfig(format!(
"Parachain {}, without nodes and default_command isn't set.",
config.id()
)));
};
cmd
} else {
return Err(OrchestratorError::InvalidConfig(format!(
"Parachain {}, without nodes and default_command isn't set.",
config.id()
)));
};
// TODO: internally we use image as String
let main_image = config
.default_image()
.or(config.collators().first().and_then(|node| node.image()))
.map(|image| image.as_str().to_string());
// TODO: internally we use image as String
let main_image = config
.default_image()
.or(config.collators().first().and_then(|node| node.image()))
.map(|image| image.as_str().to_string());
let chain_spec = if config.is_cumulus_based() {
// we need a chain-spec
let chain_name = if let Some(chain_name) = config.chain() {
chain_name.as_str()
} else {
""
};
let chain_spec = if config.is_cumulus_based() {
// we need a chain-spec
let chain_name =
if let Some(chain_name) = config.chain() { chain_name.as_str() } else { "" };
let chain_spec_builder = if chain_name.is_empty() {
// if the chain don't have name use the unique_id for the name of the file
ChainSpec::new(
config.unique_id().to_string(),
Context::Para {
relay_chain,
para_id: config.id(),
},
)
} else {
let chain_spec_file_name = if config.unique_id().contains('-') {
&format!("{}-{}", chain_name, config.unique_id())
} else {
chain_name
};
ChainSpec::new(
chain_spec_file_name,
Context::Para {
relay_chain,
para_id: config.id(),
},
)
};
let chain_spec_builder = chain_spec_builder.set_chain_name(chain_name);
let chain_spec_builder = if chain_name.is_empty() {
// if the chain don't have name use the unique_id for the name of the file
ChainSpec::new(
config.unique_id().to_string(),
Context::Para { relay_chain, para_id: config.id() },
)
} else {
let chain_spec_file_name = if config.unique_id().contains('-') {
&format!("{}-{}", chain_name, config.unique_id())
} else {
chain_name
};
ChainSpec::new(
chain_spec_file_name,
Context::Para { relay_chain, para_id: config.id() },
)
};
let chain_spec_builder = chain_spec_builder.set_chain_name(chain_name);
let replacements = HashMap::from([
("disableBootnodes", "--disable-default-bootnode"),
("mainCommand", main_cmd.as_str()),
]);
let tmpl = if let Some(tmpl) = config.chain_spec_command() {
apply_replacements(tmpl, &replacements)
} else {
apply_replacements(DEFAULT_CHAIN_SPEC_TPL_COMMAND, &replacements)
};
let replacements = HashMap::from([
("disableBootnodes", "--disable-default-bootnode"),
("mainCommand", main_cmd.as_str()),
]);
let tmpl = if let Some(tmpl) = config.chain_spec_command() {
apply_replacements(tmpl, &replacements)
} else {
apply_replacements(DEFAULT_CHAIN_SPEC_TPL_COMMAND, &replacements)
};
let chain_spec = chain_spec_builder
.command(
tmpl.as_str(),
config.chain_spec_command_is_local(),
config.chain_spec_command_output_path(),
)
.image(main_image.clone());
let chain_spec = chain_spec_builder
.command(
tmpl.as_str(),
config.chain_spec_command_is_local(),
config.chain_spec_command_output_path(),
)
.image(main_image.clone());
let chain_spec = if let Some(chain_spec_path) = config.chain_spec_path() {
chain_spec.asset_location(chain_spec_path.clone())
} else {
chain_spec
};
let chain_spec = if let Some(chain_spec_path) = config.chain_spec_path() {
chain_spec.asset_location(chain_spec_path.clone())
} else {
chain_spec
};
// add chain-spec runtime if present
let chain_spec = if let Some(chain_spec_runtime) = config.chain_spec_runtime() {
chain_spec.runtime(chain_spec_runtime.clone())
} else {
chain_spec
};
// add chain-spec runtime if present
let chain_spec = if let Some(chain_spec_runtime) = config.chain_spec_runtime() {
chain_spec.runtime(chain_spec_runtime.clone())
} else {
chain_spec
};
Some(chain_spec)
} else {
None
};
Some(chain_spec)
} else {
None
};
// build the `node_specs`
let chain_context = ChainDefaultContext {
default_command: config.default_command(),
default_image: config.default_image(),
default_resources: config.default_resources(),
default_db_snapshot: config.default_db_snapshot(),
default_args: config.default_args(),
};
// build the `node_specs`
let chain_context = ChainDefaultContext {
default_command: config.default_command(),
default_image: config.default_image(),
default_resources: config.default_resources(),
default_db_snapshot: config.default_db_snapshot(),
default_args: config.default_args(),
};
// We want to track the errors for all the nodes and report them ones
let mut errs: Vec<OrchestratorError> = Default::default();
let mut collators: Vec<NodeSpec> = Default::default();
// We want to track the errors for all the nodes and report them ones
let mut errs: Vec<OrchestratorError> = Default::default();
let mut collators: Vec<NodeSpec> = Default::default();
let mut nodes: Vec<NodeConfig> = config.collators().into_iter().cloned().collect();
nodes.extend(
config
.group_collators_configs()
.into_iter()
.flat_map(|node_group| node_group.expand_group_configs()),
);
let mut nodes: Vec<NodeConfig> = config.collators().into_iter().cloned().collect();
nodes.extend(
config
.group_collators_configs()
.into_iter()
.flat_map(|node_group| node_group.expand_group_configs()),
);
let mut names = HashSet::new();
for node_config in nodes {
match NodeSpec::from_config(&node_config, &chain_context, true, config.is_evm_based()) {
Ok(mut node) => {
let unique_name = generate_unique_node_name_from_names(node.name, &mut names);
node.name = unique_name;
collators.push(node)
},
Err(err) => errs.push(err),
}
}
let genesis_state = if let Some(path) = config.genesis_state_path() {
ParaArtifact::new(
ParaArtifactType::State,
ParaArtifactBuildOption::Path(path.to_string()),
)
} else {
let cmd = if let Some(cmd) = config.genesis_state_generator() {
cmd.cmd()
} else {
main_cmd
};
ParaArtifact::new(
ParaArtifactType::State,
ParaArtifactBuildOption::Command(cmd.as_str().into()),
)
.image(main_image.clone())
};
let mut names = HashSet::new();
for node_config in nodes {
match NodeSpec::from_config(&node_config, &chain_context, true, config.is_evm_based()) {
Ok(mut node) => {
let unique_name = generate_unique_node_name_from_names(node.name, &mut names);
node.name = unique_name;
collators.push(node)
},
Err(err) => errs.push(err),
}
}
let genesis_state = if let Some(path) = config.genesis_state_path() {
ParaArtifact::new(
ParaArtifactType::State,
ParaArtifactBuildOption::Path(path.to_string()),
)
} else {
let cmd =
if let Some(cmd) = config.genesis_state_generator() { cmd.cmd() } else { main_cmd };
ParaArtifact::new(
ParaArtifactType::State,
ParaArtifactBuildOption::Command(cmd.as_str().into()),
)
.image(main_image.clone())
};
let genesis_wasm = if let Some(path) = config.genesis_wasm_path() {
ParaArtifact::new(
ParaArtifactType::Wasm,
ParaArtifactBuildOption::Path(path.to_string()),
)
} else {
let cmd = if let Some(cmd) = config.genesis_wasm_generator() {
cmd.as_str()
} else {
main_cmd.as_str()
};
ParaArtifact::new(
ParaArtifactType::Wasm,
ParaArtifactBuildOption::Command(cmd.into()),
)
.image(main_image.clone())
};
let genesis_wasm = if let Some(path) = config.genesis_wasm_path() {
ParaArtifact::new(
ParaArtifactType::Wasm,
ParaArtifactBuildOption::Path(path.to_string()),
)
} else {
let cmd = if let Some(cmd) = config.genesis_wasm_generator() {
cmd.as_str()
} else {
main_cmd.as_str()
};
ParaArtifact::new(ParaArtifactType::Wasm, ParaArtifactBuildOption::Command(cmd.into()))
.image(main_image.clone())
};
let para_spec = TeyrchainSpec {
id: config.id(),
// ensure unique id is set at this point, if not just set to the para_id
unique_id: if config.unique_id().is_empty() {
config.id().to_string()
} else {
config.unique_id().to_string()
},
default_command: config.default_command().cloned(),
default_image: config.default_image().cloned(),
default_resources: config.default_resources().cloned(),
default_db_snapshot: config.default_db_snapshot().cloned(),
wasm_override: config.wasm_override().cloned(),
default_args: config.default_args().into_iter().cloned().collect(),
chain_spec,
no_default_bootnodes: config.no_default_bootnodes(),
registration_strategy: config
.registration_strategy()
.unwrap_or(&RegistrationStrategy::InGenesis)
.clone(),
onboard_as_parachain: config.onboard_as_parachain(),
is_cumulus_based: config.is_cumulus_based(),
is_evm_based: config.is_evm_based(),
initial_balance: config.initial_balance(),
genesis_state,
genesis_wasm,
genesis_overrides: config.genesis_overrides().cloned(),
collators,
raw_spec_override: config.raw_spec_override().cloned(),
bootnodes_addresses: config.bootnodes_addresses().into_iter().cloned().collect(),
};
let para_spec = TeyrchainSpec {
id: config.id(),
// ensure unique id is set at this point, if not just set to the para_id
unique_id: if config.unique_id().is_empty() {
config.id().to_string()
} else {
config.unique_id().to_string()
},
default_command: config.default_command().cloned(),
default_image: config.default_image().cloned(),
default_resources: config.default_resources().cloned(),
default_db_snapshot: config.default_db_snapshot().cloned(),
wasm_override: config.wasm_override().cloned(),
default_args: config.default_args().into_iter().cloned().collect(),
chain_spec,
no_default_bootnodes: config.no_default_bootnodes(),
registration_strategy: config
.registration_strategy()
.unwrap_or(&RegistrationStrategy::InGenesis)
.clone(),
onboard_as_parachain: config.onboard_as_parachain(),
is_cumulus_based: config.is_cumulus_based(),
is_evm_based: config.is_evm_based(),
initial_balance: config.initial_balance(),
genesis_state,
genesis_wasm,
genesis_overrides: config.genesis_overrides().cloned(),
collators,
raw_spec_override: config.raw_spec_override().cloned(),
bootnodes_addresses: config.bootnodes_addresses().into_iter().cloned().collect(),
};
Ok(para_spec)
}
Ok(para_spec)
}
pub fn registration_strategy(&self) -> &RegistrationStrategy {
&self.registration_strategy
}
pub fn registration_strategy(&self) -> &RegistrationStrategy {
&self.registration_strategy
}
pub fn get_genesis_config(&self) -> Result<ParaGenesisConfig<&PathBuf>, OrchestratorError> {
let genesis_config = ParaGenesisConfig {
state_path: self.genesis_state.artifact_path().ok_or(
OrchestratorError::InvariantError(
"artifact path for state must be set at this point",
),
)?,
wasm_path: self.genesis_wasm.artifact_path().ok_or(
OrchestratorError::InvariantError(
"artifact path for wasm must be set at this point",
),
)?,
id: self.id,
as_parachain: self.onboard_as_parachain,
};
Ok(genesis_config)
}
pub fn get_genesis_config(&self) -> Result<ParaGenesisConfig<&PathBuf>, OrchestratorError> {
let genesis_config = ParaGenesisConfig {
state_path: self.genesis_state.artifact_path().ok_or(
OrchestratorError::InvariantError(
"artifact path for state must be set at this point",
),
)?,
wasm_path: self.genesis_wasm.artifact_path().ok_or(
OrchestratorError::InvariantError(
"artifact path for wasm must be set at this point",
),
)?,
id: self.id,
as_parachain: self.onboard_as_parachain,
};
Ok(genesis_config)
}
pub fn id(&self) -> u32 {
self.id
}
pub fn id(&self) -> u32 {
self.id
}
pub fn chain_spec(&self) -> Option<&ChainSpec> {
self.chain_spec.as_ref()
}
pub fn chain_spec(&self) -> Option<&ChainSpec> {
self.chain_spec.as_ref()
}
pub fn chain_spec_mut(&mut self) -> Option<&mut ChainSpec> {
self.chain_spec.as_mut()
}
pub fn chain_spec_mut(&mut self) -> Option<&mut ChainSpec> {
self.chain_spec.as_mut()
}
/// Build parachain chain-spec
///
/// This function customize the chain-spec (if is possible) and build the raw version
/// of the chain-spec.
pub(crate) async fn build_chain_spec<'a, T>(
&mut self,
relay_chain_id: &str,
ns: &DynNamespace,
scoped_fs: &ScopedFilesystem<'a, T>,
) -> Result<Option<PathBuf>, anyhow::Error>
where
T: FileSystem,
{
let cloned = self.clone();
let chain_spec_raw_path = if let Some(chain_spec) = self.chain_spec.as_mut() {
debug!("parachain chain-spec building!");
chain_spec.build(ns, scoped_fs).await?;
debug!("parachain chain-spec built!");
/// Build parachain chain-spec
///
/// This function customize the chain-spec (if is possible) and build the raw version
/// of the chain-spec.
pub(crate) async fn build_chain_spec<'a, T>(
&mut self,
relay_chain_id: &str,
ns: &DynNamespace,
scoped_fs: &ScopedFilesystem<'a, T>,
) -> Result<Option<PathBuf>, anyhow::Error>
where
T: FileSystem,
{
let cloned = self.clone();
let chain_spec_raw_path = if let Some(chain_spec) = self.chain_spec.as_mut() {
debug!("parachain chain-spec building!");
chain_spec.build(ns, scoped_fs).await?;
debug!("parachain chain-spec built!");
chain_spec
.customize_para(&cloned, relay_chain_id, scoped_fs)
.await?;
debug!("parachain chain-spec customized!");
chain_spec
.build_raw(ns, scoped_fs, Some(relay_chain_id.try_into()?))
.await?;
debug!("parachain chain-spec raw built!");
chain_spec.customize_para(&cloned, relay_chain_id, scoped_fs).await?;
debug!("parachain chain-spec customized!");
chain_spec.build_raw(ns, scoped_fs, Some(relay_chain_id.try_into()?)).await?;
debug!("parachain chain-spec raw built!");
// override wasm if needed
if let Some(ref wasm_override) = self.wasm_override {
chain_spec.override_code(scoped_fs, wasm_override).await?;
}
// override wasm if needed
if let Some(ref wasm_override) = self.wasm_override {
chain_spec.override_code(scoped_fs, wasm_override).await?;
}
// override raw spec if needed
if let Some(ref raw_spec_override) = self.raw_spec_override {
chain_spec
.override_raw_spec(scoped_fs, raw_spec_override)
.await?;
}
// override raw spec if needed
if let Some(ref raw_spec_override) = self.raw_spec_override {
chain_spec.override_raw_spec(scoped_fs, raw_spec_override).await?;
}
let chain_spec_raw_path =
chain_spec
.raw_path()
.ok_or(OrchestratorError::InvariantError(
"chain-spec raw path should be set now",
))?;
let chain_spec_raw_path = chain_spec.raw_path().ok_or(
OrchestratorError::InvariantError("chain-spec raw path should be set now"),
)?;
Some(chain_spec_raw_path.to_path_buf())
} else {
None
};
Ok(chain_spec_raw_path)
}
Some(chain_spec_raw_path.to_path_buf())
} else {
None
};
Ok(chain_spec_raw_path)
}
/// Get the bootnodes addresses for the parachain spec
pub(crate) fn bootnodes_addresses(&self) -> Vec<&multiaddr::Multiaddr> {
self.bootnodes_addresses.iter().collect()
}
/// Get the bootnodes addresses for the parachain spec
pub(crate) fn bootnodes_addresses(&self) -> Vec<&multiaddr::Multiaddr> {
self.bootnodes_addresses.iter().collect()
}
}
@@ -10,7 +10,7 @@ pub const RPC_HTTP_PORT: u16 = 9933;
pub const P2P_PORT: u16 = 30333;
// default command template to build chain-spec
pub const DEFAULT_CHAIN_SPEC_TPL_COMMAND: &str =
"{{mainCommand}} build-spec --chain {{chainName}} {{disableBootnodes}}";
"{{mainCommand}} build-spec --chain {{chainName}} {{disableBootnodes}}";
// interval to determine how often to run node liveness checks
pub const NODE_MONITORING_INTERVAL_SECONDS: u64 = 15;
// how long to wait before a node is considered unresponsive
@@ -1,13 +1,13 @@
use std::{
collections::HashMap,
net::TcpListener,
path::PathBuf,
sync::{Arc, RwLock},
collections::HashMap,
net::TcpListener,
path::PathBuf,
sync::{Arc, RwLock},
};
use configuration::shared::{
resources::Resources,
types::{Arg, AssetLocation, Command, Image, Port},
resources::Resources,
types::{Arg, AssetLocation, Command, Image, Port},
};
use serde::{Deserialize, Serialize};
@@ -15,85 +15,75 @@ pub type Accounts = HashMap<String, NodeAccount>;
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct NodeAccount {
pub address: String,
pub public_key: String,
pub address: String,
pub public_key: String,
}
impl NodeAccount {
pub fn new(addr: impl Into<String>, pk: impl Into<String>) -> Self {
Self {
address: addr.into(),
public_key: pk.into(),
}
}
pub fn new(addr: impl Into<String>, pk: impl Into<String>) -> Self {
Self { address: addr.into(), public_key: pk.into() }
}
}
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
pub struct NodeAccounts {
pub seed: String,
pub accounts: Accounts,
pub seed: String,
pub accounts: Accounts,
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct ParkedPort(
pub(crate) Port,
#[serde(skip)] pub(crate) Arc<RwLock<Option<TcpListener>>>,
);
pub struct ParkedPort(pub(crate) Port, #[serde(skip)] pub(crate) Arc<RwLock<Option<TcpListener>>>);
impl ParkedPort {
pub(crate) fn new(port: u16, listener: TcpListener) -> ParkedPort {
let listener = Arc::new(RwLock::new(Some(listener)));
ParkedPort(port, listener)
}
pub(crate) fn new(port: u16, listener: TcpListener) -> ParkedPort {
let listener = Arc::new(RwLock::new(Some(listener)));
ParkedPort(port, listener)
}
pub(crate) fn drop_listener(&self) {
// drop the listener will allow the running node to start listenen connections
let mut l = self.1.write().unwrap();
*l = None;
}
pub(crate) fn drop_listener(&self) {
// drop the listener will allow the running node to start listenen connections
let mut l = self.1.write().unwrap();
*l = None;
}
}
#[derive(Debug, Clone, Default)]
pub struct ChainDefaultContext<'a> {
pub default_command: Option<&'a Command>,
pub default_image: Option<&'a Image>,
pub default_resources: Option<&'a Resources>,
pub default_db_snapshot: Option<&'a AssetLocation>,
pub default_args: Vec<&'a Arg>,
pub default_command: Option<&'a Command>,
pub default_image: Option<&'a Image>,
pub default_resources: Option<&'a Resources>,
pub default_db_snapshot: Option<&'a AssetLocation>,
pub default_args: Vec<&'a Arg>,
}
#[derive(Debug, Clone)]
pub struct RegisterParachainOptions {
pub id: u32,
pub wasm_path: PathBuf,
pub state_path: PathBuf,
pub node_ws_url: String,
pub onboard_as_para: bool,
pub seed: Option<[u8; 32]>,
pub finalization: bool,
pub id: u32,
pub wasm_path: PathBuf,
pub state_path: PathBuf,
pub node_ws_url: String,
pub onboard_as_para: bool,
pub seed: Option<[u8; 32]>,
pub finalization: bool,
}
pub struct RuntimeUpgradeOptions {
/// Location of the wasm file (could be either a local file or an url)
pub wasm: AssetLocation,
/// Name of the node to use as rpc endpoint
pub node_name: Option<String>,
/// Seed to use to sign and submit (default to //Alice)
pub seed: Option<[u8; 32]>,
/// Location of the wasm file (could be either a local file or an url)
pub wasm: AssetLocation,
/// Name of the node to use as rpc endpoint
pub node_name: Option<String>,
/// Seed to use to sign and submit (default to //Alice)
pub seed: Option<[u8; 32]>,
}
impl RuntimeUpgradeOptions {
pub fn new(wasm: AssetLocation) -> Self {
Self {
wasm,
node_name: None,
seed: None,
}
}
pub fn new(wasm: AssetLocation) -> Self {
Self { wasm, node_name: None, seed: None }
}
}
#[derive(Debug, Clone)]
pub struct ParachainGenesisArgs {
pub genesis_head: String,
pub validation_code: String,
pub parachain: bool,
pub genesis_head: String,
pub validation_code: String,
pub parachain: bool,
}
+222 -252
View File
@@ -3,303 +3,273 @@ use std::{collections::HashMap, path::PathBuf};
use anyhow::Context;
use configuration::GlobalSettings;
use provider::{
constants::{LOCALHOST, NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_RELAY_DATA_DIR, P2P_PORT},
shared::helpers::running_in_ci,
types::{SpawnNodeOptions, TransferedFile},
DynNamespace,
constants::{LOCALHOST, NODE_CONFIG_DIR, NODE_DATA_DIR, NODE_RELAY_DATA_DIR, P2P_PORT},
shared::helpers::running_in_ci,
types::{SpawnNodeOptions, TransferedFile},
DynNamespace,
};
use support::{
constants::THIS_IS_A_BUG, fs::FileSystem, replacer::apply_running_network_replacements,
constants::THIS_IS_A_BUG, fs::FileSystem, replacer::apply_running_network_replacements,
};
use tracing::info;
use crate::{
generators,
network::node::NetworkNode,
network_spec::{node::NodeSpec, teyrchain::TeyrchainSpec},
shared::constants::{FULL_NODE_PROMETHEUS_PORT, PROMETHEUS_PORT, RPC_PORT},
ScopedFilesystem, ZombieRole,
generators,
network::node::NetworkNode,
network_spec::{node::NodeSpec, teyrchain::TeyrchainSpec},
shared::constants::{FULL_NODE_PROMETHEUS_PORT, PROMETHEUS_PORT, RPC_PORT},
ScopedFilesystem, ZombieRole,
};
#[derive(Clone)]
pub struct SpawnNodeCtx<'a, T: FileSystem> {
/// Relaychain id, from the chain-spec (e.g rococo_local_testnet)
pub(crate) chain_id: &'a str,
// Parachain id, from the chain-spec (e.g local_testnet)
pub(crate) parachain_id: Option<&'a str>,
/// Relaychain chain name (e.g rococo-local)
pub(crate) chain: &'a str,
/// Role of the node in the network
pub(crate) role: ZombieRole,
/// Ref to the namespace
pub(crate) ns: &'a DynNamespace,
/// Ref to an scoped filesystem (encapsulate fs actions inside the ns directory)
pub(crate) scoped_fs: &'a ScopedFilesystem<'a, T>,
/// Ref to a parachain (used to spawn collators)
pub(crate) parachain: Option<&'a TeyrchainSpec>,
/// The string representation of the bootnode address to pass to nodes
pub(crate) bootnodes_addr: &'a Vec<String>,
/// Flag to wait node is ready or not
/// Ready state means we can query Prometheus internal server
pub(crate) wait_ready: bool,
/// A json representation of the running nodes with their names as 'key'
pub(crate) nodes_by_name: serde_json::Value,
/// A ref to the global settings
pub(crate) global_settings: &'a GlobalSettings,
/// Relaychain id, from the chain-spec (e.g rococo_local_testnet)
pub(crate) chain_id: &'a str,
// Parachain id, from the chain-spec (e.g local_testnet)
pub(crate) parachain_id: Option<&'a str>,
/// Relaychain chain name (e.g rococo-local)
pub(crate) chain: &'a str,
/// Role of the node in the network
pub(crate) role: ZombieRole,
/// Ref to the namespace
pub(crate) ns: &'a DynNamespace,
/// Ref to an scoped filesystem (encapsulate fs actions inside the ns directory)
pub(crate) scoped_fs: &'a ScopedFilesystem<'a, T>,
/// Ref to a parachain (used to spawn collators)
pub(crate) parachain: Option<&'a TeyrchainSpec>,
/// The string representation of the bootnode address to pass to nodes
pub(crate) bootnodes_addr: &'a Vec<String>,
/// Flag to wait node is ready or not
/// Ready state means we can query Prometheus internal server
pub(crate) wait_ready: bool,
/// A json representation of the running nodes with their names as 'key'
pub(crate) nodes_by_name: serde_json::Value,
/// A ref to the global settings
pub(crate) global_settings: &'a GlobalSettings,
}
pub async fn spawn_node<'a, T>(
node: &NodeSpec,
mut files_to_inject: Vec<TransferedFile>,
ctx: &SpawnNodeCtx<'a, T>,
node: &NodeSpec,
mut files_to_inject: Vec<TransferedFile>,
ctx: &SpawnNodeCtx<'a, T>,
) -> Result<NetworkNode, anyhow::Error>
where
T: FileSystem,
T: FileSystem,
{
let mut created_paths = vec![];
// Create and inject the keystore IFF
// - The node is validator in the relaychain
// - The node is collator (encoded as validator) and the parachain is cumulus_based
// (parachain_id) should be set then.
if node.is_validator && (ctx.parachain.is_none() || ctx.parachain_id.is_some()) {
// Generate keystore for node
let node_files_path = if let Some(para) = ctx.parachain {
para.id.to_string()
} else {
node.name.clone()
};
let asset_hub_polkadot = ctx
.parachain_id
.map(|id| id.starts_with("asset-hub-polkadot"))
.unwrap_or_default();
let keystore_key_types = node.keystore_key_types.iter().map(String::as_str).collect();
let key_filenames = generators::generate_node_keystore(
&node.accounts,
&node_files_path,
ctx.scoped_fs,
asset_hub_polkadot,
keystore_key_types,
)
.await
.unwrap();
let mut created_paths = vec![];
// Create and inject the keystore IFF
// - The node is validator in the relaychain
// - The node is collator (encoded as validator) and the parachain is cumulus_based
// (parachain_id) should be set then.
if node.is_validator && (ctx.parachain.is_none() || ctx.parachain_id.is_some()) {
// Generate keystore for node
let node_files_path =
if let Some(para) = ctx.parachain { para.id.to_string() } else { node.name.clone() };
let asset_hub_polkadot =
ctx.parachain_id.map(|id| id.starts_with("asset-hub-polkadot")).unwrap_or_default();
let keystore_key_types = node.keystore_key_types.iter().map(String::as_str).collect();
let key_filenames = generators::generate_node_keystore(
&node.accounts,
&node_files_path,
ctx.scoped_fs,
asset_hub_polkadot,
keystore_key_types,
)
.await
.unwrap();
// Paths returned are relative to the base dir, we need to convert into
// fullpaths to inject them in the nodes.
let remote_keystore_chain_id = if let Some(id) = ctx.parachain_id {
id
} else {
ctx.chain_id
};
// Paths returned are relative to the base dir, we need to convert into
// fullpaths to inject them in the nodes.
let remote_keystore_chain_id =
if let Some(id) = ctx.parachain_id { id } else { ctx.chain_id };
let keystore_path = node.keystore_path.clone().unwrap_or(PathBuf::from(format!(
"/data/chains/{remote_keystore_chain_id}/keystore",
)));
let keystore_path = node
.keystore_path
.clone()
.unwrap_or(PathBuf::from(format!("/data/chains/{remote_keystore_chain_id}/keystore",)));
for key_filename in key_filenames {
let f = TransferedFile::new(
PathBuf::from(format!(
"{}/{}/{}",
ctx.ns.base_dir().to_string_lossy(),
node_files_path,
key_filename.to_string_lossy()
)),
keystore_path.join(key_filename),
);
files_to_inject.push(f);
}
created_paths.push(keystore_path);
}
for key_filename in key_filenames {
let f = TransferedFile::new(
PathBuf::from(format!(
"{}/{}/{}",
ctx.ns.base_dir().to_string_lossy(),
node_files_path,
key_filename.to_string_lossy()
)),
keystore_path.join(key_filename),
);
files_to_inject.push(f);
}
created_paths.push(keystore_path);
}
let base_dir = format!("{}/{}", ctx.ns.base_dir().to_string_lossy(), &node.name);
let base_dir = format!("{}/{}", ctx.ns.base_dir().to_string_lossy(), &node.name);
let (cfg_path, data_path, relay_data_path) = if !ctx.ns.capabilities().prefix_with_full_path {
(
NODE_CONFIG_DIR.into(),
NODE_DATA_DIR.into(),
NODE_RELAY_DATA_DIR.into(),
)
} else {
let cfg_path = format!("{}{NODE_CONFIG_DIR}", &base_dir);
let data_path = format!("{}{NODE_DATA_DIR}", &base_dir);
let relay_data_path = format!("{}{NODE_RELAY_DATA_DIR}", &base_dir);
(cfg_path, data_path, relay_data_path)
};
let (cfg_path, data_path, relay_data_path) = if !ctx.ns.capabilities().prefix_with_full_path {
(NODE_CONFIG_DIR.into(), NODE_DATA_DIR.into(), NODE_RELAY_DATA_DIR.into())
} else {
let cfg_path = format!("{}{NODE_CONFIG_DIR}", &base_dir);
let data_path = format!("{}{NODE_DATA_DIR}", &base_dir);
let relay_data_path = format!("{}{NODE_RELAY_DATA_DIR}", &base_dir);
(cfg_path, data_path, relay_data_path)
};
let gen_opts = generators::GenCmdOptions {
relay_chain_name: ctx.chain,
cfg_path: &cfg_path, // TODO: get from provider/ns
data_path: &data_path, // TODO: get from provider
relay_data_path: &relay_data_path, // TODO: get from provider
use_wrapper: false, // TODO: get from provider
bootnode_addr: ctx.bootnodes_addr.clone(),
use_default_ports_in_cmd: ctx.ns.capabilities().use_default_ports_in_cmd,
// IFF the provider require an image (e.g k8s) we know this is not native
is_native: !ctx.ns.capabilities().requires_image,
};
let gen_opts = generators::GenCmdOptions {
relay_chain_name: ctx.chain,
cfg_path: &cfg_path, // TODO: get from provider/ns
data_path: &data_path, // TODO: get from provider
relay_data_path: &relay_data_path, // TODO: get from provider
use_wrapper: false, // TODO: get from provider
bootnode_addr: ctx.bootnodes_addr.clone(),
use_default_ports_in_cmd: ctx.ns.capabilities().use_default_ports_in_cmd,
// IFF the provider require an image (e.g k8s) we know this is not native
is_native: !ctx.ns.capabilities().requires_image,
};
let mut collator_full_node_prom_port: Option<u16> = None;
let mut collator_full_node_prom_port_external: Option<u16> = None;
let mut collator_full_node_prom_port: Option<u16> = None;
let mut collator_full_node_prom_port_external: Option<u16> = None;
let (program, args) = match ctx.role {
// Collator should be `non-cumulus` one (e.g adder/undying)
ZombieRole::Node | ZombieRole::Collator => {
let maybe_para_id = ctx.parachain.map(|para| para.id);
let (program, args) = match ctx.role {
// Collator should be `non-cumulus` one (e.g adder/undying)
ZombieRole::Node | ZombieRole::Collator => {
let maybe_para_id = ctx.parachain.map(|para| para.id);
generators::generate_node_command(node, gen_opts, maybe_para_id)
},
ZombieRole::CumulusCollator => {
let para = ctx.parachain.expect(&format!(
"parachain must be part of the context {THIS_IS_A_BUG}"
));
collator_full_node_prom_port = node.full_node_prometheus_port.as_ref().map(|p| p.0);
generators::generate_node_command(node, gen_opts, maybe_para_id)
},
ZombieRole::CumulusCollator => {
let para = ctx
.parachain
.expect(&format!("parachain must be part of the context {THIS_IS_A_BUG}"));
collator_full_node_prom_port = node.full_node_prometheus_port.as_ref().map(|p| p.0);
generators::generate_node_command_cumulus(node, gen_opts, para.id)
},
_ => unreachable!(), /* TODO: do we need those?
* ZombieRole::Bootnode => todo!(),
* ZombieRole::Companion => todo!(), */
};
generators::generate_node_command_cumulus(node, gen_opts, para.id)
},
_ => unreachable!(), /* TODO: do we need those?
* ZombieRole::Bootnode => todo!(),
* ZombieRole::Companion => todo!(), */
};
// apply running networ replacements
let args: Vec<String> = args
.iter()
.map(|arg| apply_running_network_replacements(arg, &ctx.nodes_by_name))
.collect();
// apply running networ replacements
let args: Vec<String> = args
.iter()
.map(|arg| apply_running_network_replacements(arg, &ctx.nodes_by_name))
.collect();
info!(
"🚀 {}, spawning.... with command: {} {}",
node.name,
program,
args.join(" ")
);
info!("🚀 {}, spawning.... with command: {} {}", node.name, program, args.join(" "));
let ports = if ctx.ns.capabilities().use_default_ports_in_cmd {
// should use default ports to as internal
[
(P2P_PORT, node.p2p_port.0),
(RPC_PORT, node.rpc_port.0),
(PROMETHEUS_PORT, node.prometheus_port.0),
]
} else {
[
(P2P_PORT, P2P_PORT),
(RPC_PORT, RPC_PORT),
(PROMETHEUS_PORT, PROMETHEUS_PORT),
]
};
let ports = if ctx.ns.capabilities().use_default_ports_in_cmd {
// should use default ports to as internal
[
(P2P_PORT, node.p2p_port.0),
(RPC_PORT, node.rpc_port.0),
(PROMETHEUS_PORT, node.prometheus_port.0),
]
} else {
[(P2P_PORT, P2P_PORT), (RPC_PORT, RPC_PORT), (PROMETHEUS_PORT, PROMETHEUS_PORT)]
};
let spawn_ops = SpawnNodeOptions::new(node.name.clone(), program)
.args(args)
.env(
node.env
.iter()
.map(|var| (var.name.clone(), var.value.clone())),
)
.injected_files(files_to_inject)
.created_paths(created_paths)
.db_snapshot(node.db_snapshot.clone())
.port_mapping(HashMap::from(ports))
.node_log_path(node.node_log_path.clone());
let spawn_ops = SpawnNodeOptions::new(node.name.clone(), program)
.args(args)
.env(node.env.iter().map(|var| (var.name.clone(), var.value.clone())))
.injected_files(files_to_inject)
.created_paths(created_paths)
.db_snapshot(node.db_snapshot.clone())
.port_mapping(HashMap::from(ports))
.node_log_path(node.node_log_path.clone());
let spawn_ops = if let Some(image) = node.image.as_ref() {
spawn_ops.image(image.as_str())
} else {
spawn_ops
};
let spawn_ops = if let Some(image) = node.image.as_ref() {
spawn_ops.image(image.as_str())
} else {
spawn_ops
};
// Drops the port parking listeners before spawn
node.ws_port.drop_listener();
node.p2p_port.drop_listener();
node.rpc_port.drop_listener();
node.prometheus_port.drop_listener();
if let Some(port) = &node.full_node_p2p_port {
port.drop_listener();
}
if let Some(port) = &node.full_node_prometheus_port {
port.drop_listener();
}
// Drops the port parking listeners before spawn
node.ws_port.drop_listener();
node.p2p_port.drop_listener();
node.rpc_port.drop_listener();
node.prometheus_port.drop_listener();
if let Some(port) = &node.full_node_p2p_port {
port.drop_listener();
}
if let Some(port) = &node.full_node_prometheus_port {
port.drop_listener();
}
let running_node = ctx.ns.spawn_node(&spawn_ops).await.with_context(|| {
format!(
"Failed to spawn node: {} with opts: {:#?}",
node.name, spawn_ops
)
})?;
let running_node = ctx.ns.spawn_node(&spawn_ops).await.with_context(|| {
format!("Failed to spawn node: {} with opts: {:#?}", node.name, spawn_ops)
})?;
let mut ip_to_use = if let Some(local_ip) = ctx.global_settings.local_ip() {
*local_ip
} else {
LOCALHOST
};
let mut ip_to_use =
if let Some(local_ip) = ctx.global_settings.local_ip() { *local_ip } else { LOCALHOST };
let (rpc_port_external, prometheus_port_external, p2p_external);
let (rpc_port_external, prometheus_port_external, p2p_external);
if running_in_ci() && ctx.ns.provider_name() == "k8s" {
// running kubernets in ci require to use ip and default port
(rpc_port_external, prometheus_port_external, p2p_external) =
(RPC_PORT, PROMETHEUS_PORT, P2P_PORT);
collator_full_node_prom_port_external = Some(FULL_NODE_PROMETHEUS_PORT);
ip_to_use = running_node.ip().await?;
} else {
// Create port-forward iff we are not in CI or provider doesn't use the default ports (native)
let ports = futures::future::try_join_all(vec![
running_node.create_port_forward(node.rpc_port.0, RPC_PORT),
running_node.create_port_forward(node.prometheus_port.0, PROMETHEUS_PORT),
])
.await?;
if running_in_ci() && ctx.ns.provider_name() == "k8s" {
// running kubernets in ci require to use ip and default port
(rpc_port_external, prometheus_port_external, p2p_external) =
(RPC_PORT, PROMETHEUS_PORT, P2P_PORT);
collator_full_node_prom_port_external = Some(FULL_NODE_PROMETHEUS_PORT);
ip_to_use = running_node.ip().await?;
} else {
// Create port-forward iff we are not in CI or provider doesn't use the default ports (native)
let ports = futures::future::try_join_all(vec![
running_node.create_port_forward(node.rpc_port.0, RPC_PORT),
running_node.create_port_forward(node.prometheus_port.0, PROMETHEUS_PORT),
])
.await?;
(rpc_port_external, prometheus_port_external, p2p_external) = (
ports[0].unwrap_or(node.rpc_port.0),
ports[1].unwrap_or(node.prometheus_port.0),
// p2p don't need port-fwd
node.p2p_port.0,
);
(rpc_port_external, prometheus_port_external, p2p_external) = (
ports[0].unwrap_or(node.rpc_port.0),
ports[1].unwrap_or(node.prometheus_port.0),
// p2p don't need port-fwd
node.p2p_port.0,
);
if let Some(full_node_prom_port) = collator_full_node_prom_port {
let port_fwd = running_node
.create_port_forward(full_node_prom_port, FULL_NODE_PROMETHEUS_PORT)
.await?;
collator_full_node_prom_port_external = Some(port_fwd.unwrap_or(full_node_prom_port));
}
}
if let Some(full_node_prom_port) = collator_full_node_prom_port {
let port_fwd = running_node
.create_port_forward(full_node_prom_port, FULL_NODE_PROMETHEUS_PORT)
.await?;
collator_full_node_prom_port_external = Some(port_fwd.unwrap_or(full_node_prom_port));
}
}
let multiaddr = generators::generate_node_bootnode_addr(
&node.peer_id,
&running_node.ip().await?,
p2p_external,
running_node.args().as_ref(),
&node.p2p_cert_hash,
)?;
let multiaddr = generators::generate_node_bootnode_addr(
&node.peer_id,
&running_node.ip().await?,
p2p_external,
running_node.args().as_ref(),
&node.p2p_cert_hash,
)?;
let ws_uri = format!("ws://{ip_to_use}:{rpc_port_external}");
let prometheus_uri = format!("http://{ip_to_use}:{prometheus_port_external}/metrics");
info!("🚀 {}, should be running now", node.name);
info!(
"💻 {}: direct link (pjs) https://polkadot.js.org/apps/?rpc={ws_uri}#/explorer",
node.name
);
info!(
let ws_uri = format!("ws://{ip_to_use}:{rpc_port_external}");
let prometheus_uri = format!("http://{ip_to_use}:{prometheus_port_external}/metrics");
info!("🚀 {}, should be running now", node.name);
info!(
"💻 {}: direct link (pjs) https://polkadot.js.org/apps/?rpc={ws_uri}#/explorer",
node.name
);
info!(
"💻 {}: direct link (papi) https://dev.papi.how/explorer#networkId=custom&endpoint={ws_uri}",
node.name
);
info!("📊 {}: metrics link {prometheus_uri}", node.name);
info!("📊 {}: metrics link {prometheus_uri}", node.name);
if let Some(full_node_prom_port) = collator_full_node_prom_port_external {
info!(
"📊 {}: collator full-node metrics link http://{}:{}/metrics",
node.name, ip_to_use, full_node_prom_port
);
}
if let Some(full_node_prom_port) = collator_full_node_prom_port_external {
info!(
"📊 {}: collator full-node metrics link http://{}:{}/metrics",
node.name, ip_to_use, full_node_prom_port
);
}
info!("📓 logs cmd: {}", running_node.log_cmd());
info!("📓 logs cmd: {}", running_node.log_cmd());
Ok(NetworkNode::new(
node.name.clone(),
ws_uri,
prometheus_uri,
multiaddr,
node.clone(),
running_node,
))
Ok(NetworkNode::new(
node.name.clone(),
ws_uri,
prometheus_uri,
multiaddr,
node.clone(),
running_node,
))
}
@@ -2,42 +2,38 @@ use pezkuwi_subxt::{backend::rpc::RpcClient, OnlineClient};
#[async_trait::async_trait]
pub trait ClientFromUrl: Sized {
async fn from_secure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error>;
async fn from_insecure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error>;
async fn from_secure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error>;
async fn from_insecure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error>;
}
#[async_trait::async_trait]
impl<Config: pezkuwi_subxt::Config + Send + Sync> ClientFromUrl for OnlineClient<Config> {
async fn from_secure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error> {
Self::from_url(url).await.map_err(Into::into)
}
async fn from_secure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error> {
Self::from_url(url).await.map_err(Into::into)
}
async fn from_insecure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error> {
Self::from_insecure_url(url).await.map_err(Into::into)
}
async fn from_insecure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error> {
Self::from_insecure_url(url).await.map_err(Into::into)
}
}
#[async_trait::async_trait]
impl ClientFromUrl for RpcClient {
async fn from_secure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error> {
Self::from_url(url)
.await
.map_err(pezkuwi_subxt::Error::from)
}
async fn from_secure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error> {
Self::from_url(url).await.map_err(pezkuwi_subxt::Error::from)
}
async fn from_insecure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error> {
Self::from_insecure_url(url)
.await
.map_err(pezkuwi_subxt::Error::from)
}
async fn from_insecure_url(url: &str) -> Result<Self, pezkuwi_subxt::Error> {
Self::from_insecure_url(url).await.map_err(pezkuwi_subxt::Error::from)
}
}
pub async fn get_client_from_url<T: ClientFromUrl + Send>(
url: &str,
url: &str,
) -> Result<T, pezkuwi_subxt::Error> {
if pezkuwi_subxt::utils::url_is_secure(url)? {
T::from_secure_url(url).await
} else {
T::from_insecure_url(url).await
}
if pezkuwi_subxt::utils::url_is_secure(url)? {
T::from_secure_url(url).await
} else {
T::from_insecure_url(url).await
}
}
@@ -5,65 +5,52 @@ use tracing::{debug, info};
use crate::network::node::NetworkNode;
pub async fn upgrade(
node: &NetworkNode,
wasm_data: &[u8],
sudo: &Keypair,
node: &NetworkNode,
wasm_data: &[u8],
sudo: &Keypair,
) -> Result<(), anyhow::Error> {
debug!(
"Upgrading runtime, using node: {} with endpoting {}",
node.name, node.ws_uri
);
let api: OnlineClient<BizinikiwConfig> = node.wait_client().await?;
debug!("Upgrading runtime, using node: {} with endpoting {}", node.name, node.ws_uri);
let api: OnlineClient<BizinikiwConfig> = node.wait_client().await?;
let upgrade = pezkuwi_subxt::dynamic::tx(
"System",
"set_code_without_checks",
vec![Value::from_bytes(wasm_data)],
);
let upgrade = pezkuwi_subxt::dynamic::tx(
"System",
"set_code_without_checks",
vec![Value::from_bytes(wasm_data)],
);
let sudo_call = pezkuwi_subxt::dynamic::tx(
"Sudo",
"sudo_unchecked_weight",
vec![
upgrade.into_value(),
Value::named_composite([
("ref_time", Value::primitive(1.into())),
("proof_size", Value::primitive(1.into())),
]),
],
);
let sudo_call = pezkuwi_subxt::dynamic::tx(
"Sudo",
"sudo_unchecked_weight",
vec![
upgrade.into_value(),
Value::named_composite([
("ref_time", Value::primitive(1.into())),
("proof_size", Value::primitive(1.into())),
]),
],
);
let mut tx = api
.tx()
.sign_and_submit_then_watch_default(&sudo_call, sudo)
.await?;
let mut tx = api.tx().sign_and_submit_then_watch_default(&sudo_call, sudo).await?;
// Below we use the low level API to replicate the `wait_for_in_block` behaviour
// which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237.
while let Some(status) = tx.next().await {
let status = status?;
match &status {
TxStatus::InBestBlock(tx_in_block) | TxStatus::InFinalizedBlock(tx_in_block) => {
let _result = tx_in_block.wait_for_success().await?;
let block_status = if status.as_finalized().is_some() {
"Finalized"
} else {
"Best"
};
info!(
"[{}] In block: {:#?}",
block_status,
tx_in_block.block_hash()
);
},
TxStatus::Error { message }
| TxStatus::Invalid { message }
| TxStatus::Dropped { message } => {
return Err(anyhow::format_err!("Error submitting tx: {message}"));
},
_ => continue,
}
}
// Below we use the low level API to replicate the `wait_for_in_block` behaviour
// which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237.
while let Some(status) = tx.next().await {
let status = status?;
match &status {
TxStatus::InBestBlock(tx_in_block) | TxStatus::InFinalizedBlock(tx_in_block) => {
let _result = tx_in_block.wait_for_success().await?;
let block_status =
if status.as_finalized().is_some() { "Finalized" } else { "Best" };
info!("[{}] In block: {:#?}", block_status, tx_in_block.block_hash());
},
TxStatus::Error { message }
| TxStatus::Invalid { message }
| TxStatus::Dropped { message } => {
return Err(anyhow::format_err!("Error submitting tx: {message}"));
},
_ => continue,
}
}
Ok(())
Ok(())
}
@@ -2,7 +2,7 @@ use serde::Deserializer;
pub fn default_as_empty_vec<'de, D, T>(_deserializer: D) -> Result<Vec<T>, D::Error>
where
D: Deserializer<'de>,
D: Deserializer<'de>,
{
Ok(Vec::new())
Ok(Vec::new())
}
@@ -6,12 +6,12 @@ use pest_derive::Parser;
/// An error at parsing level.
#[derive(thiserror::Error, Debug)]
pub enum ParserError {
#[error("error parsing input")]
ParseError(Box<pest::error::Error<Rule>>),
#[error("root node should be valid: {0}")]
ParseRootNodeError(String),
#[error("can't cast metric value as f64: {0}")]
CastValueError(#[from] ParseFloatError),
#[error("error parsing input")]
ParseError(Box<pest::error::Error<Rule>>),
#[error("root node should be valid: {0}")]
ParseRootNodeError(String),
#[error("can't cast metric value as f64: {0}")]
CastValueError(#[from] ParseFloatError),
}
// This include forces recompiling this source file if the grammar file changes.
@@ -25,154 +25,147 @@ pub struct MetricsParser;
pub type MetricMap = HashMap<String, f64>;
pub fn parse(input: &str) -> Result<MetricMap, ParserError> {
let mut metric_map: MetricMap = Default::default();
let mut pairs = MetricsParser::parse(Rule::statement, input)
.map_err(|e| ParserError::ParseError(Box::new(e)))?;
let mut metric_map: MetricMap = Default::default();
let mut pairs = MetricsParser::parse(Rule::statement, input)
.map_err(|e| ParserError::ParseError(Box::new(e)))?;
let root = pairs
.next()
.ok_or(ParserError::ParseRootNodeError(pairs.as_str().to_string()))?;
for token in root.into_inner() {
if token.as_rule() == Rule::block {
let inner = token.into_inner();
for value in inner {
match value.as_rule() {
Rule::genericomment | Rule::typexpr | Rule::helpexpr => {
// don't need to collect comments/types/helpers blocks.
continue;
},
Rule::promstmt => {
let mut key: &str = "";
let mut labels: Vec<(&str, &str)> = Vec::new();
let mut val: f64 = 0_f64;
for v in value.clone().into_inner() {
match &v.as_rule() {
Rule::key => {
key = v.as_span().as_str();
},
Rule::NaN | Rule::posInf | Rule::negInf => {
// noop (not used in substrate metrics)
},
Rule::number => {
val = v.as_span().as_str().parse::<f64>()?;
},
Rule::labels => {
// SAFETY: use unwrap should be safe since we are just
// walking the parser struct and if are matching a label
// should have a key/vals
for p in v.into_inner() {
let mut inner = p.into_inner();
let key = inner.next().unwrap().as_span().as_str();
let value = inner
.next()
.unwrap()
.into_inner()
.next()
.unwrap()
.as_span()
.as_str();
let root = pairs.next().ok_or(ParserError::ParseRootNodeError(pairs.as_str().to_string()))?;
for token in root.into_inner() {
if token.as_rule() == Rule::block {
let inner = token.into_inner();
for value in inner {
match value.as_rule() {
Rule::genericomment | Rule::typexpr | Rule::helpexpr => {
// don't need to collect comments/types/helpers blocks.
continue;
},
Rule::promstmt => {
let mut key: &str = "";
let mut labels: Vec<(&str, &str)> = Vec::new();
let mut val: f64 = 0_f64;
for v in value.clone().into_inner() {
match &v.as_rule() {
Rule::key => {
key = v.as_span().as_str();
},
Rule::NaN | Rule::posInf | Rule::negInf => {
// noop (not used in substrate metrics)
},
Rule::number => {
val = v.as_span().as_str().parse::<f64>()?;
},
Rule::labels => {
// SAFETY: use unwrap should be safe since we are just
// walking the parser struct and if are matching a label
// should have a key/vals
for p in v.into_inner() {
let mut inner = p.into_inner();
let key = inner.next().unwrap().as_span().as_str();
let value = inner
.next()
.unwrap()
.into_inner()
.next()
.unwrap()
.as_span()
.as_str();
labels.push((key, value));
}
},
_ => {
todo!("not implemented");
},
}
}
labels.push((key, value));
}
},
_ => {
todo!("not implemented");
},
}
}
// we should store to make it compatible with zombienet v1:
// key_without_prefix
// key_without_prefix_and_without_chain
// key_with_prefix_with_chain
// key_with_prefix_and_without_chain
let key_with_out_prefix =
key.split('_').collect::<Vec<&str>>()[1..].join("_");
let (labels_without_chain, labels_with_chain) =
labels.iter().fold((vec![], vec![]), |mut acc, item| {
if item.0.eq("chain") {
acc.1.push(format!("{}=\"{}\"", item.0, item.1));
} else {
acc.0.push(format!("{}=\"{}\"", item.0, item.1));
acc.1.push(format!("{}=\"{}\"", item.0, item.1));
}
acc
});
// we should store to make it compatible with zombienet v1:
// key_without_prefix
// key_without_prefix_and_without_chain
// key_with_prefix_with_chain
// key_with_prefix_and_without_chain
let key_with_out_prefix =
key.split('_').collect::<Vec<&str>>()[1..].join("_");
let (labels_without_chain, labels_with_chain) =
labels.iter().fold((vec![], vec![]), |mut acc, item| {
if item.0.eq("chain") {
acc.1.push(format!("{}=\"{}\"", item.0, item.1));
} else {
acc.0.push(format!("{}=\"{}\"", item.0, item.1));
acc.1.push(format!("{}=\"{}\"", item.0, item.1));
}
acc
});
let labels_with_chain_str = if labels_with_chain.is_empty() {
String::from("")
} else {
format!("{{{}}}", labels_with_chain.join(","))
};
let labels_with_chain_str = if labels_with_chain.is_empty() {
String::from("")
} else {
format!("{{{}}}", labels_with_chain.join(","))
};
let labels_without_chain_str = if labels_without_chain.is_empty() {
String::from("")
} else {
format!("{{{}}}", labels_without_chain.join(","))
};
let labels_without_chain_str = if labels_without_chain.is_empty() {
String::from("")
} else {
format!("{{{}}}", labels_without_chain.join(","))
};
metric_map.insert(format!("{key}{labels_without_chain_str}"), val);
metric_map.insert(
format!("{key_with_out_prefix}{labels_without_chain_str}"),
val,
);
metric_map.insert(format!("{key}{labels_with_chain_str}"), val);
metric_map
.insert(format!("{key_with_out_prefix}{labels_with_chain_str}"), val);
},
_ => {},
}
}
}
}
metric_map.insert(format!("{key}{labels_without_chain_str}"), val);
metric_map.insert(
format!("{key_with_out_prefix}{labels_without_chain_str}"),
val,
);
metric_map.insert(format!("{key}{labels_with_chain_str}"), val);
metric_map
.insert(format!("{key_with_out_prefix}{labels_with_chain_str}"), val);
},
_ => {},
}
}
}
}
Ok(metric_map)
Ok(metric_map)
}
#[cfg(test)]
mod tests {
use std::fs;
use std::fs;
use super::*;
use super::*;
#[test]
fn parse_metrics_works() {
let metrics_raw = fs::read_to_string("./testing/metrics.txt").unwrap();
let metrics = parse(&metrics_raw).unwrap();
#[test]
fn parse_metrics_works() {
let metrics_raw = fs::read_to_string("./testing/metrics.txt").unwrap();
let metrics = parse(&metrics_raw).unwrap();
// full key
assert_eq!(
metrics
.get("polkadot_node_is_active_validator{chain=\"rococo_local_testnet\"}")
.unwrap(),
&1_f64
);
// with prefix and no chain
assert_eq!(
metrics.get("polkadot_node_is_active_validator").unwrap(),
&1_f64
);
// no prefix with chain
assert_eq!(
metrics
.get("node_is_active_validator{chain=\"rococo_local_testnet\"}")
.unwrap(),
&1_f64
);
// no prefix without chain
assert_eq!(metrics.get("node_is_active_validator").unwrap(), &1_f64);
}
// full key
assert_eq!(
metrics
.get("polkadot_node_is_active_validator{chain=\"rococo_local_testnet\"}")
.unwrap(),
&1_f64
);
// with prefix and no chain
assert_eq!(metrics.get("polkadot_node_is_active_validator").unwrap(), &1_f64);
// no prefix with chain
assert_eq!(
metrics.get("node_is_active_validator{chain=\"rococo_local_testnet\"}").unwrap(),
&1_f64
);
// no prefix without chain
assert_eq!(metrics.get("node_is_active_validator").unwrap(), &1_f64);
}
#[test]
fn parse_invalid_metrics_str_should_fail() {
let metrics_raw = r"
#[test]
fn parse_invalid_metrics_str_should_fail() {
let metrics_raw = r"
# HELP polkadot_node_is_active_validator Tracks if the validator is in the active set. Updates at session boundary.
# TYPE polkadot_node_is_active_validator gauge
polkadot_node_is_active_validator{chain=} 1
";
let metrics = parse(metrics_raw);
assert!(metrics.is_err());
assert!(matches!(metrics, Err(ParserError::ParseError(_))));
}
let metrics = parse(metrics_raw);
assert!(metrics.is_err());
assert!(matches!(metrics, Err(ParserError::ParseError(_))));
}
}
@@ -16,581 +16,540 @@ pub type Result<T> = core::result::Result<T, Error>;
#[derive(Clone)]
pub struct DockerClient {
using_podman: bool,
using_podman: bool,
}
#[derive(Debug)]
pub struct ContainerRunOptions {
image: String,
command: Vec<String>,
env: Option<Vec<(String, String)>>,
volume_mounts: Option<HashMap<String, String>>,
name: Option<String>,
entrypoint: Option<String>,
port_mapping: HashMap<Port, Port>,
rm: bool,
detach: bool,
image: String,
command: Vec<String>,
env: Option<Vec<(String, String)>>,
volume_mounts: Option<HashMap<String, String>>,
name: Option<String>,
entrypoint: Option<String>,
port_mapping: HashMap<Port, Port>,
rm: bool,
detach: bool,
}
enum Container {
Docker(DockerContainer),
Podman(PodmanContainer),
Docker(DockerContainer),
Podman(PodmanContainer),
}
// TODO: we may don't need this
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct DockerContainer {
#[serde(alias = "Names", deserialize_with = "deserialize_list")]
names: Vec<String>,
#[serde(alias = "Ports", deserialize_with = "deserialize_list")]
ports: Vec<String>,
#[serde(alias = "State")]
state: String,
#[serde(alias = "Names", deserialize_with = "deserialize_list")]
names: Vec<String>,
#[serde(alias = "Ports", deserialize_with = "deserialize_list")]
ports: Vec<String>,
#[serde(alias = "State")]
state: String,
}
// TODO: we may don't need this
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct PodmanPort {
host_ip: String,
container_port: u16,
host_port: u16,
range: u16,
protocol: String,
host_ip: String,
container_port: u16,
host_port: u16,
range: u16,
protocol: String,
}
// TODO: we may don't need this
#[allow(dead_code)]
#[derive(Deserialize, Debug)]
struct PodmanContainer {
#[serde(alias = "Id")]
id: String,
#[serde(alias = "Image")]
image: String,
#[serde(alias = "Mounts")]
mounts: Vec<String>,
#[serde(alias = "Names")]
names: Vec<String>,
#[serde(alias = "Ports", deserialize_with = "deserialize_null_as_default")]
ports: Vec<PodmanPort>,
#[serde(alias = "State")]
state: String,
#[serde(alias = "Id")]
id: String,
#[serde(alias = "Image")]
image: String,
#[serde(alias = "Mounts")]
mounts: Vec<String>,
#[serde(alias = "Names")]
names: Vec<String>,
#[serde(alias = "Ports", deserialize_with = "deserialize_null_as_default")]
ports: Vec<PodmanPort>,
#[serde(alias = "State")]
state: String,
}
fn deserialize_list<'de, D>(deserializer: D) -> std::result::Result<Vec<String>, D::Error>
where
D: Deserializer<'de>,
D: Deserializer<'de>,
{
let str_sequence = String::deserialize(deserializer)?;
Ok(str_sequence
.split(',')
.filter(|item| !item.is_empty())
.map(|item| item.to_owned())
.collect())
let str_sequence = String::deserialize(deserializer)?;
Ok(str_sequence
.split(',')
.filter(|item| !item.is_empty())
.map(|item| item.to_owned())
.collect())
}
fn deserialize_null_as_default<'de, D, T>(deserializer: D) -> std::result::Result<T, D::Error>
where
T: Default + Deserialize<'de>,
D: Deserializer<'de>,
T: Default + Deserialize<'de>,
D: Deserializer<'de>,
{
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_default())
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_default())
}
impl ContainerRunOptions {
pub fn new<S>(image: &str, command: Vec<S>) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
ContainerRunOptions {
image: image.to_string(),
command: command
.clone()
.into_iter()
.map(|s| s.into())
.collect::<Vec<_>>(),
env: None,
volume_mounts: None,
name: None,
entrypoint: None,
port_mapping: HashMap::default(),
rm: false,
detach: true, // add -d flag by default
}
}
pub fn new<S>(image: &str, command: Vec<S>) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
ContainerRunOptions {
image: image.to_string(),
command: command.clone().into_iter().map(|s| s.into()).collect::<Vec<_>>(),
env: None,
volume_mounts: None,
name: None,
entrypoint: None,
port_mapping: HashMap::default(),
rm: false,
detach: true, // add -d flag by default
}
}
pub fn env<S>(mut self, env: Vec<(S, S)>) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
self.env = Some(
env.into_iter()
.map(|(name, value)| (name.into(), value.into()))
.collect(),
);
self
}
pub fn env<S>(mut self, env: Vec<(S, S)>) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
self.env = Some(env.into_iter().map(|(name, value)| (name.into(), value.into())).collect());
self
}
pub fn volume_mounts<S>(mut self, volume_mounts: HashMap<S, S>) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
self.volume_mounts = Some(
volume_mounts
.into_iter()
.map(|(source, target)| (source.into(), target.into()))
.collect(),
);
self
}
pub fn volume_mounts<S>(mut self, volume_mounts: HashMap<S, S>) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
self.volume_mounts = Some(
volume_mounts
.into_iter()
.map(|(source, target)| (source.into(), target.into()))
.collect(),
);
self
}
pub fn name<S>(mut self, name: S) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
self.name = Some(name.into());
self
}
pub fn name<S>(mut self, name: S) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
self.name = Some(name.into());
self
}
pub fn entrypoint<S>(mut self, entrypoint: S) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
self.entrypoint = Some(entrypoint.into());
self
}
pub fn entrypoint<S>(mut self, entrypoint: S) -> Self
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
self.entrypoint = Some(entrypoint.into());
self
}
pub fn port_mapping(mut self, port_mapping: &HashMap<Port, Port>) -> Self {
self.port_mapping.clone_from(port_mapping);
self
}
pub fn port_mapping(mut self, port_mapping: &HashMap<Port, Port>) -> Self {
self.port_mapping.clone_from(port_mapping);
self
}
pub fn rm(mut self) -> Self {
self.rm = true;
self
}
pub fn rm(mut self) -> Self {
self.rm = true;
self
}
pub fn detach(mut self, choice: bool) -> Self {
self.detach = choice;
self
}
pub fn detach(mut self, choice: bool) -> Self {
self.detach = choice;
self
}
}
impl DockerClient {
pub async fn new() -> Result<Self> {
let using_podman = Self::is_using_podman().await?;
pub async fn new() -> Result<Self> {
let using_podman = Self::is_using_podman().await?;
Ok(DockerClient { using_podman })
}
Ok(DockerClient { using_podman })
}
pub fn client_binary(&self) -> String {
String::from(if self.using_podman {
"podman"
} else {
"docker"
})
}
pub fn client_binary(&self) -> String {
String::from(if self.using_podman { "podman" } else { "docker" })
}
async fn is_using_podman() -> Result<bool> {
if let Ok(output) = tokio::process::Command::new("docker")
.arg("version")
.output()
.await
{
// detect whether we're actually running podman with docker emulation
return Ok(String::from_utf8_lossy(&output.stdout)
.to_lowercase()
.contains("podman"));
}
async fn is_using_podman() -> Result<bool> {
if let Ok(output) = tokio::process::Command::new("docker").arg("version").output().await {
// detect whether we're actually running podman with docker emulation
return Ok(String::from_utf8_lossy(&output.stdout).to_lowercase().contains("podman"));
}
tokio::process::Command::new("podman")
.arg("--version")
.output()
.await
.map_err(|err| anyhow!("Failed to detect container engine: {err}"))?;
tokio::process::Command::new("podman")
.arg("--version")
.output()
.await
.map_err(|err| anyhow!("Failed to detect container engine: {err}"))?;
Ok(true)
}
Ok(true)
}
}
impl DockerClient {
fn client_command(&self) -> tokio::process::Command {
tokio::process::Command::new(self.client_binary())
}
fn client_command(&self) -> tokio::process::Command {
tokio::process::Command::new(self.client_binary())
}
pub async fn create_volume(&self, name: &str) -> Result<()> {
let result = self
.client_command()
.args(["volume", "create", name])
.output()
.await
.map_err(|err| anyhow!("Failed to create volume '{name}': {err}"))?;
pub async fn create_volume(&self, name: &str) -> Result<()> {
let result = self
.client_command()
.args(["volume", "create", name])
.output()
.await
.map_err(|err| anyhow!("Failed to create volume '{name}': {err}"))?;
if !result.status.success() {
return Err(anyhow!(
"Failed to create volume '{name}': {}",
String::from_utf8_lossy(&result.stderr)
)
.into());
}
if !result.status.success() {
return Err(anyhow!(
"Failed to create volume '{name}': {}",
String::from_utf8_lossy(&result.stderr)
)
.into());
}
Ok(())
}
Ok(())
}
pub async fn container_run(&self, options: ContainerRunOptions) -> Result<String> {
let mut cmd = self.client_command();
cmd.args(["run", "--platform", "linux/amd64"]);
pub async fn container_run(&self, options: ContainerRunOptions) -> Result<String> {
let mut cmd = self.client_command();
cmd.args(["run", "--platform", "linux/amd64"]);
if options.detach {
cmd.arg("-d");
}
if options.detach {
cmd.arg("-d");
}
Self::apply_cmd_options(&mut cmd, &options);
Self::apply_cmd_options(&mut cmd, &options);
trace!("cmd: {:?}", cmd);
trace!("cmd: {:?}", cmd);
let result = cmd.output().await.map_err(|err| {
anyhow!(
"Failed to run container with image '{image}' and command '{command}': {err}",
image = options.image,
command = options.command.join(" "),
)
})?;
let result = cmd.output().await.map_err(|err| {
anyhow!(
"Failed to run container with image '{image}' and command '{command}': {err}",
image = options.image,
command = options.command.join(" "),
)
})?;
if !result.status.success() {
return Err(anyhow!(
"Failed to run container with image '{image}' and command '{command}': {err}",
image = options.image,
command = options.command.join(" "),
err = String::from_utf8_lossy(&result.stderr)
)
.into());
}
if !result.status.success() {
return Err(anyhow!(
"Failed to run container with image '{image}' and command '{command}': {err}",
image = options.image,
command = options.command.join(" "),
err = String::from_utf8_lossy(&result.stderr)
)
.into());
}
Ok(String::from_utf8_lossy(&result.stdout).to_string())
}
Ok(String::from_utf8_lossy(&result.stdout).to_string())
}
pub async fn container_create(&self, options: ContainerRunOptions) -> Result<String> {
let mut cmd = self.client_command();
cmd.args(["container", "create"]);
pub async fn container_create(&self, options: ContainerRunOptions) -> Result<String> {
let mut cmd = self.client_command();
cmd.args(["container", "create"]);
Self::apply_cmd_options(&mut cmd, &options);
Self::apply_cmd_options(&mut cmd, &options);
trace!("cmd: {:?}", cmd);
trace!("cmd: {:?}", cmd);
let result = cmd.output().await.map_err(|err| {
anyhow!(
"Failed to run container with image '{image}' and command '{command}': {err}",
image = options.image,
command = options.command.join(" "),
)
})?;
let result = cmd.output().await.map_err(|err| {
anyhow!(
"Failed to run container with image '{image}' and command '{command}': {err}",
image = options.image,
command = options.command.join(" "),
)
})?;
if !result.status.success() {
return Err(anyhow!(
"Failed to run container with image '{image}' and command '{command}': {err}",
image = options.image,
command = options.command.join(" "),
err = String::from_utf8_lossy(&result.stderr)
)
.into());
}
if !result.status.success() {
return Err(anyhow!(
"Failed to run container with image '{image}' and command '{command}': {err}",
image = options.image,
command = options.command.join(" "),
err = String::from_utf8_lossy(&result.stderr)
)
.into());
}
Ok(String::from_utf8_lossy(&result.stdout).to_string())
}
Ok(String::from_utf8_lossy(&result.stdout).to_string())
}
pub async fn container_exec<S>(
&self,
name: &str,
command: Vec<S>,
env: Option<Vec<(S, S)>>,
as_user: Option<S>,
) -> Result<ExecutionResult>
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
let mut cmd = self.client_command();
cmd.arg("exec");
pub async fn container_exec<S>(
&self,
name: &str,
command: Vec<S>,
env: Option<Vec<(S, S)>>,
as_user: Option<S>,
) -> Result<ExecutionResult>
where
S: Into<String> + std::fmt::Debug + Send + Clone,
{
let mut cmd = self.client_command();
cmd.arg("exec");
if let Some(env) = env {
for env_var in env {
cmd.args(["-e", &format!("{}={}", env_var.0.into(), env_var.1.into())]);
}
}
if let Some(env) = env {
for env_var in env {
cmd.args(["-e", &format!("{}={}", env_var.0.into(), env_var.1.into())]);
}
}
if let Some(user) = as_user {
cmd.args(["-u", user.into().as_ref()]);
}
if let Some(user) = as_user {
cmd.args(["-u", user.into().as_ref()]);
}
cmd.arg(name);
cmd.arg(name);
cmd.args(
command
.clone()
.into_iter()
.map(|s| <S as Into<String>>::into(s)),
);
cmd.args(command.clone().into_iter().map(|s| <S as Into<String>>::into(s)));
trace!("cmd is : {:?}", cmd);
trace!("cmd is : {:?}", cmd);
let result = cmd.output().await.map_err(|err| {
anyhow!(
"Failed to exec '{}' on '{}': {err}",
command
.into_iter()
.map(|s| <S as Into<String>>::into(s))
.collect::<Vec<_>>()
.join(" "),
name,
)
})?;
let result = cmd.output().await.map_err(|err| {
anyhow!(
"Failed to exec '{}' on '{}': {err}",
command
.into_iter()
.map(|s| <S as Into<String>>::into(s))
.collect::<Vec<_>>()
.join(" "),
name,
)
})?;
if !result.status.success() {
return Ok(Err((
result.status,
String::from_utf8_lossy(&result.stderr).to_string(),
)));
}
if !result.status.success() {
return Ok(Err((result.status, String::from_utf8_lossy(&result.stderr).to_string())));
}
Ok(Ok(String::from_utf8_lossy(&result.stdout).to_string()))
}
Ok(Ok(String::from_utf8_lossy(&result.stdout).to_string()))
}
pub async fn container_cp(
&self,
name: &str,
local_path: &Path,
remote_path: &Path,
) -> Result<()> {
let result = self
.client_command()
.args([
"cp",
local_path.to_string_lossy().as_ref(),
&format!("{name}:{}", remote_path.to_string_lossy().as_ref()),
])
.output()
.await
.map_err(|err| {
anyhow!(
"Failed copy file '{file}' to container '{name}': {err}",
file = local_path.to_string_lossy(),
)
})?;
pub async fn container_cp(
&self,
name: &str,
local_path: &Path,
remote_path: &Path,
) -> Result<()> {
let result = self
.client_command()
.args([
"cp",
local_path.to_string_lossy().as_ref(),
&format!("{name}:{}", remote_path.to_string_lossy().as_ref()),
])
.output()
.await
.map_err(|err| {
anyhow!(
"Failed copy file '{file}' to container '{name}': {err}",
file = local_path.to_string_lossy(),
)
})?;
if !result.status.success() {
return Err(anyhow!(
"Failed to copy file '{file}' to container '{name}': {err}",
file = local_path.to_string_lossy(),
err = String::from_utf8_lossy(&result.stderr)
)
.into());
}
if !result.status.success() {
return Err(anyhow!(
"Failed to copy file '{file}' to container '{name}': {err}",
file = local_path.to_string_lossy(),
err = String::from_utf8_lossy(&result.stderr)
)
.into());
}
Ok(())
}
Ok(())
}
pub async fn container_rm(&self, name: &str) -> Result<()> {
let result = self
.client_command()
.args(["rm", "--force", "--volumes", name])
.output()
.await
.map_err(|err| anyhow!("Failed do remove container '{name}: {err}"))?;
pub async fn container_rm(&self, name: &str) -> Result<()> {
let result = self
.client_command()
.args(["rm", "--force", "--volumes", name])
.output()
.await
.map_err(|err| anyhow!("Failed do remove container '{name}: {err}"))?;
if !result.status.success() {
return Err(anyhow!(
"Failed to remove container '{name}': {err}",
err = String::from_utf8_lossy(&result.stderr)
)
.into());
}
if !result.status.success() {
return Err(anyhow!(
"Failed to remove container '{name}': {err}",
err = String::from_utf8_lossy(&result.stderr)
)
.into());
}
Ok(())
}
Ok(())
}
pub async fn namespaced_containers_rm(&self, namespace: &str) -> Result<()> {
let container_names: Vec<String> = self
.get_containers()
.await?
.into_iter()
.filter_map(|container| match container {
Container::Docker(container) => {
if let Some(name) = container.names.first() {
if name.starts_with(namespace) {
return Some(name.to_string());
}
}
pub async fn namespaced_containers_rm(&self, namespace: &str) -> Result<()> {
let container_names: Vec<String> = self
.get_containers()
.await?
.into_iter()
.filter_map(|container| match container {
Container::Docker(container) => {
if let Some(name) = container.names.first() {
if name.starts_with(namespace) {
return Some(name.to_string());
}
}
None
},
Container::Podman(container) => {
if let Some(name) = container.names.first() {
if name.starts_with(namespace) {
return Some(name.to_string());
}
}
None
},
Container::Podman(container) => {
if let Some(name) = container.names.first() {
if name.starts_with(namespace) {
return Some(name.to_string());
}
}
None
},
})
.collect();
None
},
})
.collect();
info!("{:?}", container_names);
let futures = container_names
.iter()
.map(|name| self.container_rm(name))
.collect::<Vec<_>>();
try_join_all(futures).await?;
info!("{:?}", container_names);
let futures =
container_names.iter().map(|name| self.container_rm(name)).collect::<Vec<_>>();
try_join_all(futures).await?;
Ok(())
}
Ok(())
}
pub async fn container_ip(&self, container_name: &str) -> Result<String> {
let ip = if self.using_podman {
"127.0.0.1".into()
} else {
let mut cmd = tokio::process::Command::new("docker");
cmd.args(vec![
"inspect",
"-f",
"{{ .NetworkSettings.IPAddress }}",
container_name,
]);
pub async fn container_ip(&self, container_name: &str) -> Result<String> {
let ip = if self.using_podman {
"127.0.0.1".into()
} else {
let mut cmd = tokio::process::Command::new("docker");
cmd.args(vec!["inspect", "-f", "{{ .NetworkSettings.IPAddress }}", container_name]);
trace!("CMD: {cmd:?}");
trace!("CMD: {cmd:?}");
let res = cmd
.output()
.await
.map_err(|err| anyhow!("Failed to get docker container ip, output: {err}"))?;
let res = cmd
.output()
.await
.map_err(|err| anyhow!("Failed to get docker container ip, output: {err}"))?;
String::from_utf8(res.stdout)
.map_err(|err| anyhow!("Failed to get docker container ip, output: {err}"))?
.trim()
.into()
};
String::from_utf8(res.stdout)
.map_err(|err| anyhow!("Failed to get docker container ip, output: {err}"))?
.trim()
.into()
};
trace!("IP: {ip}");
Ok(ip)
}
trace!("IP: {ip}");
Ok(ip)
}
async fn get_containers(&self) -> Result<Vec<Container>> {
let containers = if self.using_podman {
self.get_podman_containers()
.await?
.into_iter()
.map(Container::Podman)
.collect()
} else {
self.get_docker_containers()
.await?
.into_iter()
.map(Container::Docker)
.collect()
};
async fn get_containers(&self) -> Result<Vec<Container>> {
let containers = if self.using_podman {
self.get_podman_containers().await?.into_iter().map(Container::Podman).collect()
} else {
self.get_docker_containers().await?.into_iter().map(Container::Docker).collect()
};
Ok(containers)
}
Ok(containers)
}
async fn get_podman_containers(&self) -> Result<Vec<PodmanContainer>> {
let res = tokio::process::Command::new("podman")
.args(vec!["ps", "--all", "--no-trunc", "--format", "json"])
.output()
.await
.map_err(|err| anyhow!("Failed to get podman containers output: {err}"))?;
async fn get_podman_containers(&self) -> Result<Vec<PodmanContainer>> {
let res = tokio::process::Command::new("podman")
.args(vec!["ps", "--all", "--no-trunc", "--format", "json"])
.output()
.await
.map_err(|err| anyhow!("Failed to get podman containers output: {err}"))?;
let stdout = String::from_utf8_lossy(&res.stdout);
let stdout = String::from_utf8_lossy(&res.stdout);
let containers = serde_json::from_str(&stdout)
.map_err(|err| anyhow!("Failed to parse podman containers output: {err}"))?;
let containers = serde_json::from_str(&stdout)
.map_err(|err| anyhow!("Failed to parse podman containers output: {err}"))?;
Ok(containers)
}
Ok(containers)
}
async fn get_docker_containers(&self) -> Result<Vec<DockerContainer>> {
let res = tokio::process::Command::new("docker")
.args(vec!["ps", "--all", "--no-trunc", "--format", "json"])
.output()
.await
.unwrap();
async fn get_docker_containers(&self) -> Result<Vec<DockerContainer>> {
let res = tokio::process::Command::new("docker")
.args(vec!["ps", "--all", "--no-trunc", "--format", "json"])
.output()
.await
.unwrap();
let stdout = String::from_utf8_lossy(&res.stdout);
let stdout = String::from_utf8_lossy(&res.stdout);
let mut containers = vec![];
for line in stdout.lines() {
containers.push(
serde_json::from_str::<DockerContainer>(line)
.map_err(|err| anyhow!("Failed to parse docker container output: {err}"))?,
);
}
let mut containers = vec![];
for line in stdout.lines() {
containers.push(
serde_json::from_str::<DockerContainer>(line)
.map_err(|err| anyhow!("Failed to parse docker container output: {err}"))?,
);
}
Ok(containers)
}
Ok(containers)
}
pub(crate) async fn container_logs(&self, container_name: &str) -> Result<String> {
let output = Command::new("sh")
.arg("-c")
.arg(format!("docker logs -t '{container_name}' 2>&1"))
.stdout(Stdio::piped())
.output()
.await
.map_err(|err| {
anyhow!(
"Failed to spawn docker logs command for container '{container_name}': {err}"
)
})?;
pub(crate) async fn container_logs(&self, container_name: &str) -> Result<String> {
let output = Command::new("sh")
.arg("-c")
.arg(format!("docker logs -t '{container_name}' 2>&1"))
.stdout(Stdio::piped())
.output()
.await
.map_err(|err| {
anyhow!(
"Failed to spawn docker logs command for container '{container_name}': {err}"
)
})?;
let logs = String::from_utf8_lossy(&output.stdout).to_string();
let logs = String::from_utf8_lossy(&output.stdout).to_string();
if !output.status.success() {
// stderr was redirected to stdout, so logs should contain the error message if any
return Err(anyhow!(
"Failed to get logs for container '{name}': {logs}",
name = container_name,
logs = &logs
)
.into());
}
if !output.status.success() {
// stderr was redirected to stdout, so logs should contain the error message if any
return Err(anyhow!(
"Failed to get logs for container '{name}': {logs}",
name = container_name,
logs = &logs
)
.into());
}
Ok(logs)
}
Ok(logs)
}
fn apply_cmd_options(cmd: &mut Command, options: &ContainerRunOptions) {
if options.rm {
cmd.arg("--rm");
}
fn apply_cmd_options(cmd: &mut Command, options: &ContainerRunOptions) {
if options.rm {
cmd.arg("--rm");
}
if let Some(entrypoint) = options.entrypoint.as_ref() {
cmd.args(["--entrypoint", entrypoint]);
}
if let Some(entrypoint) = options.entrypoint.as_ref() {
cmd.args(["--entrypoint", entrypoint]);
}
if let Some(volume_mounts) = options.volume_mounts.as_ref() {
for (source, target) in volume_mounts {
cmd.args(["-v", &format!("{source}:{target}")]);
}
}
if let Some(volume_mounts) = options.volume_mounts.as_ref() {
for (source, target) in volume_mounts {
cmd.args(["-v", &format!("{source}:{target}")]);
}
}
if let Some(env) = options.env.as_ref() {
for env_var in env {
cmd.args(["-e", &format!("{}={}", env_var.0, env_var.1)]);
}
}
if let Some(env) = options.env.as_ref() {
for env_var in env {
cmd.args(["-e", &format!("{}={}", env_var.0, env_var.1)]);
}
}
// add published ports
for (container_port, host_port) in options.port_mapping.iter() {
cmd.args(["-p", &format!("{host_port}:{container_port}")]);
}
// add published ports
for (container_port, host_port) in options.port_mapping.iter() {
cmd.args(["-p", &format!("{host_port}:{container_port}")]);
}
if let Some(name) = options.name.as_ref() {
cmd.args(["--name", name]);
}
if let Some(name) = options.name.as_ref() {
cmd.args(["--name", name]);
}
cmd.arg(&options.image);
cmd.arg(&options.image);
for arg in &options.command {
cmd.arg(arg);
}
}
for arg in &options.command {
cmd.arg(arg);
}
}
}
@@ -1,8 +1,8 @@
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
thread,
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
thread,
};
use async_trait::async_trait;
@@ -12,212 +12,207 @@ use tracing::{debug, trace, warn};
use uuid::Uuid;
use super::{
client::{ContainerRunOptions, DockerClient},
node::DockerNode,
DockerProvider,
client::{ContainerRunOptions, DockerClient},
node::DockerNode,
DockerProvider,
};
use crate::{
constants::NAMESPACE_PREFIX,
docker::{
node::{DeserializableDockerNodeOptions, DockerNodeOptions},
provider,
},
shared::helpers::extract_execution_result,
types::{
GenerateFileCommand, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions,
SpawnNodeOptions,
},
DynNode, ProviderError, ProviderNamespace, ProviderNode,
constants::NAMESPACE_PREFIX,
docker::{
node::{DeserializableDockerNodeOptions, DockerNodeOptions},
provider,
},
shared::helpers::extract_execution_result,
types::{
GenerateFileCommand, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions,
SpawnNodeOptions,
},
DynNode, ProviderError, ProviderNamespace, ProviderNode,
};
pub struct DockerNamespace<FS>
where
FS: FileSystem + Send + Sync + Clone,
FS: FileSystem + Send + Sync + Clone,
{
weak: Weak<DockerNamespace<FS>>,
#[allow(dead_code)]
provider: Weak<DockerProvider<FS>>,
name: String,
base_dir: PathBuf,
capabilities: ProviderCapabilities,
docker_client: DockerClient,
filesystem: FS,
delete_on_drop: Arc<Mutex<bool>>,
pub(super) nodes: RwLock<HashMap<String, Arc<DockerNode<FS>>>>,
weak: Weak<DockerNamespace<FS>>,
#[allow(dead_code)]
provider: Weak<DockerProvider<FS>>,
name: String,
base_dir: PathBuf,
capabilities: ProviderCapabilities,
docker_client: DockerClient,
filesystem: FS,
delete_on_drop: Arc<Mutex<bool>>,
pub(super) nodes: RwLock<HashMap<String, Arc<DockerNode<FS>>>>,
}
impl<FS> DockerNamespace<FS>
where
FS: FileSystem + Send + Sync + Clone + 'static,
FS: FileSystem + Send + Sync + Clone + 'static,
{
pub(super) async fn new(
provider: &Weak<DockerProvider<FS>>,
tmp_dir: &PathBuf,
capabilities: &ProviderCapabilities,
docker_client: &DockerClient,
filesystem: &FS,
custom_base_dir: Option<&Path>,
) -> Result<Arc<Self>, ProviderError> {
let name = format!("{}{}", NAMESPACE_PREFIX, Uuid::new_v4());
let base_dir = if let Some(custom_base_dir) = custom_base_dir {
if !filesystem.exists(custom_base_dir).await {
filesystem.create_dir(custom_base_dir).await?;
} else {
warn!(
"⚠️ Using and existing directory {} as base dir",
custom_base_dir.to_string_lossy()
);
}
PathBuf::from(custom_base_dir)
} else {
let base_dir = PathBuf::from_iter([tmp_dir, &PathBuf::from(&name)]);
filesystem.create_dir(&base_dir).await?;
base_dir
};
pub(super) async fn new(
provider: &Weak<DockerProvider<FS>>,
tmp_dir: &PathBuf,
capabilities: &ProviderCapabilities,
docker_client: &DockerClient,
filesystem: &FS,
custom_base_dir: Option<&Path>,
) -> Result<Arc<Self>, ProviderError> {
let name = format!("{}{}", NAMESPACE_PREFIX, Uuid::new_v4());
let base_dir = if let Some(custom_base_dir) = custom_base_dir {
if !filesystem.exists(custom_base_dir).await {
filesystem.create_dir(custom_base_dir).await?;
} else {
warn!(
"⚠️ Using and existing directory {} as base dir",
custom_base_dir.to_string_lossy()
);
}
PathBuf::from(custom_base_dir)
} else {
let base_dir = PathBuf::from_iter([tmp_dir, &PathBuf::from(&name)]);
filesystem.create_dir(&base_dir).await?;
base_dir
};
let namespace = Arc::new_cyclic(|weak| DockerNamespace {
weak: weak.clone(),
provider: provider.clone(),
name,
base_dir,
capabilities: capabilities.clone(),
filesystem: filesystem.clone(),
docker_client: docker_client.clone(),
nodes: RwLock::new(HashMap::new()),
delete_on_drop: Arc::new(Mutex::new(true)),
});
let namespace = Arc::new_cyclic(|weak| DockerNamespace {
weak: weak.clone(),
provider: provider.clone(),
name,
base_dir,
capabilities: capabilities.clone(),
filesystem: filesystem.clone(),
docker_client: docker_client.clone(),
nodes: RwLock::new(HashMap::new()),
delete_on_drop: Arc::new(Mutex::new(true)),
});
namespace.initialize().await?;
namespace.initialize().await?;
Ok(namespace)
}
Ok(namespace)
}
pub(super) async fn attach_to_live(
provider: &Weak<DockerProvider<FS>>,
capabilities: &ProviderCapabilities,
docker_client: &DockerClient,
filesystem: &FS,
custom_base_dir: &Path,
name: &str,
) -> Result<Arc<Self>, ProviderError> {
let base_dir = custom_base_dir.to_path_buf();
pub(super) async fn attach_to_live(
provider: &Weak<DockerProvider<FS>>,
capabilities: &ProviderCapabilities,
docker_client: &DockerClient,
filesystem: &FS,
custom_base_dir: &Path,
name: &str,
) -> Result<Arc<Self>, ProviderError> {
let base_dir = custom_base_dir.to_path_buf();
let namespace = Arc::new_cyclic(|weak| DockerNamespace {
weak: weak.clone(),
provider: provider.clone(),
name: name.to_owned(),
base_dir,
capabilities: capabilities.clone(),
filesystem: filesystem.clone(),
docker_client: docker_client.clone(),
nodes: RwLock::new(HashMap::new()),
delete_on_drop: Arc::new(Mutex::new(false)),
});
let namespace = Arc::new_cyclic(|weak| DockerNamespace {
weak: weak.clone(),
provider: provider.clone(),
name: name.to_owned(),
base_dir,
capabilities: capabilities.clone(),
filesystem: filesystem.clone(),
docker_client: docker_client.clone(),
nodes: RwLock::new(HashMap::new()),
delete_on_drop: Arc::new(Mutex::new(false)),
});
Ok(namespace)
}
Ok(namespace)
}
async fn initialize(&self) -> Result<(), ProviderError> {
// let ns_scripts_shared = PathBuf::from_iter([&self.base_dir, &PathBuf::from("shared-scripts")]);
// self.filesystem.create_dir(&ns_scripts_shared).await?;
self.initialize_zombie_scripts_volume().await?;
self.initialize_helper_binaries_volume().await?;
async fn initialize(&self) -> Result<(), ProviderError> {
// let ns_scripts_shared = PathBuf::from_iter([&self.base_dir, &PathBuf::from("shared-scripts")]);
// self.filesystem.create_dir(&ns_scripts_shared).await?;
self.initialize_zombie_scripts_volume().await?;
self.initialize_helper_binaries_volume().await?;
Ok(())
}
Ok(())
}
async fn initialize_zombie_scripts_volume(&self) -> Result<(), ProviderError> {
let local_zombie_wrapper_path =
PathBuf::from_iter([&self.base_dir, &PathBuf::from("zombie-wrapper.sh")]);
async fn initialize_zombie_scripts_volume(&self) -> Result<(), ProviderError> {
let local_zombie_wrapper_path =
PathBuf::from_iter([&self.base_dir, &PathBuf::from("zombie-wrapper.sh")]);
self.filesystem
.write(
&local_zombie_wrapper_path,
include_str!("../shared/scripts/zombie-wrapper.sh"),
)
.await?;
self.filesystem
.write(&local_zombie_wrapper_path, include_str!("../shared/scripts/zombie-wrapper.sh"))
.await?;
let local_helper_binaries_downloader_path = PathBuf::from_iter([
&self.base_dir,
&PathBuf::from("helper-binaries-downloader.sh"),
]);
let local_helper_binaries_downloader_path =
PathBuf::from_iter([&self.base_dir, &PathBuf::from("helper-binaries-downloader.sh")]);
self.filesystem
.write(
&local_helper_binaries_downloader_path,
include_str!("../shared/scripts/helper-binaries-downloader.sh"),
)
.await?;
self.filesystem
.write(
&local_helper_binaries_downloader_path,
include_str!("../shared/scripts/helper-binaries-downloader.sh"),
)
.await?;
let zombie_wrapper_volume_name = format!("{}-zombie-wrapper", self.name);
let zombie_wrapper_container_name = format!("{}-scripts", self.name);
let zombie_wrapper_volume_name = format!("{}-zombie-wrapper", self.name);
let zombie_wrapper_container_name = format!("{}-scripts", self.name);
self.docker_client
.create_volume(&zombie_wrapper_volume_name)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
self.docker_client
.create_volume(&zombie_wrapper_volume_name)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
self.docker_client
.container_create(
ContainerRunOptions::new("alpine:latest", vec!["tail", "-f", "/dev/null"])
.volume_mounts(HashMap::from([(
zombie_wrapper_volume_name.as_str(),
"/scripts",
)]))
.name(&zombie_wrapper_container_name)
.rm(),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
self.docker_client
.container_create(
ContainerRunOptions::new("alpine:latest", vec!["tail", "-f", "/dev/null"])
.volume_mounts(HashMap::from([(
zombie_wrapper_volume_name.as_str(),
"/scripts",
)]))
.name(&zombie_wrapper_container_name)
.rm(),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
// copy the scripts
self.docker_client
.container_cp(
&zombie_wrapper_container_name,
&local_zombie_wrapper_path,
&PathBuf::from("/scripts/zombie-wrapper.sh"),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
// copy the scripts
self.docker_client
.container_cp(
&zombie_wrapper_container_name,
&local_zombie_wrapper_path,
&PathBuf::from("/scripts/zombie-wrapper.sh"),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
self.docker_client
.container_cp(
&zombie_wrapper_container_name,
&local_helper_binaries_downloader_path,
&PathBuf::from("/scripts/helper-binaries-downloader.sh"),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
self.docker_client
.container_cp(
&zombie_wrapper_container_name,
&local_helper_binaries_downloader_path,
&PathBuf::from("/scripts/helper-binaries-downloader.sh"),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
// set permissions for rwx on whole volume recursively
self.docker_client
.container_run(
ContainerRunOptions::new("alpine:latest", vec!["chmod", "-R", "777", "/scripts"])
.volume_mounts(HashMap::from([(
zombie_wrapper_volume_name.as_ref(),
"/scripts",
)]))
.rm(),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
// set permissions for rwx on whole volume recursively
self.docker_client
.container_run(
ContainerRunOptions::new("alpine:latest", vec!["chmod", "-R", "777", "/scripts"])
.volume_mounts(HashMap::from([(
zombie_wrapper_volume_name.as_ref(),
"/scripts",
)]))
.rm(),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
Ok(())
}
Ok(())
}
async fn initialize_helper_binaries_volume(&self) -> Result<(), ProviderError> {
let helper_binaries_volume_name = format!("{}-helper-binaries", self.name);
let zombie_wrapper_volume_name = format!("{}-zombie-wrapper", self.name);
async fn initialize_helper_binaries_volume(&self) -> Result<(), ProviderError> {
let helper_binaries_volume_name = format!("{}-helper-binaries", self.name);
let zombie_wrapper_volume_name = format!("{}-zombie-wrapper", self.name);
self.docker_client
.create_volume(&helper_binaries_volume_name)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
self.docker_client
.create_volume(&helper_binaries_volume_name)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
// download binaries to volume
self.docker_client
.container_run(
ContainerRunOptions::new(
// download binaries to volume
self.docker_client
.container_run(
ContainerRunOptions::new(
"alpine:latest",
vec!["ash", "/scripts/helper-binaries-downloader.sh"],
)
@@ -234,261 +229,242 @@ where
// wait until complete
.detach(false)
.rm(),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
// set permissions for rwx on whole volume recursively
self.docker_client
.container_run(
ContainerRunOptions::new("alpine:latest", vec!["chmod", "-R", "777", "/helpers"])
.volume_mounts(HashMap::from([(
helper_binaries_volume_name.as_ref(),
"/helpers",
)]))
.rm(),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
// set permissions for rwx on whole volume recursively
self.docker_client
.container_run(
ContainerRunOptions::new("alpine:latest", vec!["chmod", "-R", "777", "/helpers"])
.volume_mounts(HashMap::from([(
helper_binaries_volume_name.as_ref(),
"/helpers",
)]))
.rm(),
)
.await
.map_err(|err| ProviderError::CreateNamespaceFailed(self.name.clone(), err.into()))?;
Ok(())
}
Ok(())
}
pub async fn set_delete_on_drop(&self, delete_on_drop: bool) {
*self.delete_on_drop.lock().await = delete_on_drop;
}
pub async fn set_delete_on_drop(&self, delete_on_drop: bool) {
*self.delete_on_drop.lock().await = delete_on_drop;
}
pub async fn delete_on_drop(&self) -> bool {
if let Ok(delete_on_drop) = self.delete_on_drop.try_lock() {
*delete_on_drop
} else {
// if we can't lock just remove the ns
true
}
}
pub async fn delete_on_drop(&self) -> bool {
if let Ok(delete_on_drop) = self.delete_on_drop.try_lock() {
*delete_on_drop
} else {
// if we can't lock just remove the ns
true
}
}
}
#[async_trait]
impl<FS> ProviderNamespace for DockerNamespace<FS>
where
FS: FileSystem + Send + Sync + Clone + 'static,
FS: FileSystem + Send + Sync + Clone + 'static,
{
fn name(&self) -> &str {
&self.name
}
fn name(&self) -> &str {
&self.name
}
fn base_dir(&self) -> &PathBuf {
&self.base_dir
}
fn base_dir(&self) -> &PathBuf {
&self.base_dir
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
fn provider_name(&self) -> &str {
provider::PROVIDER_NAME
}
fn provider_name(&self) -> &str {
provider::PROVIDER_NAME
}
async fn detach(&self) {
self.set_delete_on_drop(false).await;
}
async fn detach(&self) {
self.set_delete_on_drop(false).await;
}
async fn is_detached(&self) -> bool {
self.delete_on_drop().await
}
async fn is_detached(&self) -> bool {
self.delete_on_drop().await
}
async fn nodes(&self) -> HashMap<String, DynNode> {
self.nodes
.read()
.await
.iter()
.map(|(name, node)| (name.clone(), node.clone() as DynNode))
.collect()
}
async fn nodes(&self) -> HashMap<String, DynNode> {
self.nodes
.read()
.await
.iter()
.map(|(name, node)| (name.clone(), node.clone() as DynNode))
.collect()
}
async fn get_node_available_args(
&self,
(command, image): (String, Option<String>),
) -> Result<String, ProviderError> {
let node_image = image.expect(&format!("image should be present when getting node available args with docker provider {THIS_IS_A_BUG}"));
async fn get_node_available_args(
&self,
(command, image): (String, Option<String>),
) -> Result<String, ProviderError> {
let node_image = image.expect(&format!("image should be present when getting node available args with docker provider {THIS_IS_A_BUG}"));
let temp_node = self
.spawn_node(
&SpawnNodeOptions::new(format!("temp-{}", Uuid::new_v4()), "cat".to_string())
.image(node_image.clone()),
)
.await?;
let temp_node = self
.spawn_node(
&SpawnNodeOptions::new(format!("temp-{}", Uuid::new_v4()), "cat".to_string())
.image(node_image.clone()),
)
.await?;
let available_args_output = temp_node
.run_command(RunCommandOptions::new(command.clone()).args(vec!["--help"]))
.await?
.map_err(|(_exit, status)| {
ProviderError::NodeAvailableArgsError(node_image, command, status)
})?;
let available_args_output = temp_node
.run_command(RunCommandOptions::new(command.clone()).args(vec!["--help"]))
.await?
.map_err(|(_exit, status)| {
ProviderError::NodeAvailableArgsError(node_image, command, status)
})?;
temp_node.destroy().await?;
temp_node.destroy().await?;
Ok(available_args_output)
}
Ok(available_args_output)
}
async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result<DynNode, ProviderError> {
debug!("spawn option {:?}", options);
async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result<DynNode, ProviderError> {
debug!("spawn option {:?}", options);
let node = DockerNode::new(DockerNodeOptions {
namespace: &self.weak,
namespace_base_dir: &self.base_dir,
name: &options.name,
image: options.image.as_ref(),
program: &options.program,
args: &options.args,
env: &options.env,
startup_files: &options.injected_files,
db_snapshot: options.db_snapshot.as_ref(),
docker_client: &self.docker_client,
container_name: format!("{}-{}", self.name, options.name),
filesystem: &self.filesystem,
port_mapping: options.port_mapping.as_ref().unwrap_or(&HashMap::default()),
})
.await?;
let node = DockerNode::new(DockerNodeOptions {
namespace: &self.weak,
namespace_base_dir: &self.base_dir,
name: &options.name,
image: options.image.as_ref(),
program: &options.program,
args: &options.args,
env: &options.env,
startup_files: &options.injected_files,
db_snapshot: options.db_snapshot.as_ref(),
docker_client: &self.docker_client,
container_name: format!("{}-{}", self.name, options.name),
filesystem: &self.filesystem,
port_mapping: options.port_mapping.as_ref().unwrap_or(&HashMap::default()),
})
.await?;
self.nodes
.write()
.await
.insert(node.name().to_string(), node.clone());
self.nodes.write().await.insert(node.name().to_string(), node.clone());
Ok(node)
}
Ok(node)
}
async fn spawn_node_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNode, ProviderError> {
let deserializable: DeserializableDockerNodeOptions =
serde_json::from_value(json_value.clone())?;
let options = DockerNodeOptions::from_deserializable(
&deserializable,
&self.weak,
&self.base_dir,
&self.docker_client,
&self.filesystem,
);
async fn spawn_node_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNode, ProviderError> {
let deserializable: DeserializableDockerNodeOptions =
serde_json::from_value(json_value.clone())?;
let options = DockerNodeOptions::from_deserializable(
&deserializable,
&self.weak,
&self.base_dir,
&self.docker_client,
&self.filesystem,
);
let node = DockerNode::attach_to_live(options).await?;
let node = DockerNode::attach_to_live(options).await?;
self.nodes
.write()
.await
.insert(node.name().to_string(), node.clone());
self.nodes.write().await.insert(node.name().to_string(), node.clone());
Ok(node)
}
Ok(node)
}
async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError> {
debug!("generate files options {options:#?}");
async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError> {
debug!("generate files options {options:#?}");
let node_name = options
.temp_name
.unwrap_or_else(|| format!("temp-{}", Uuid::new_v4()));
let node_image = options.image.expect(&format!(
"image should be present when generating files with docker provider {THIS_IS_A_BUG}"
));
let node_name = options.temp_name.unwrap_or_else(|| format!("temp-{}", Uuid::new_v4()));
let node_image = options.image.expect(&format!(
"image should be present when generating files with docker provider {THIS_IS_A_BUG}"
));
// run dummy command in a new container
let temp_node = self
.spawn_node(
&SpawnNodeOptions::new(node_name, "cat".to_string())
.injected_files(options.injected_files)
.image(node_image),
)
.await?;
// run dummy command in a new container
let temp_node = self
.spawn_node(
&SpawnNodeOptions::new(node_name, "cat".to_string())
.injected_files(options.injected_files)
.image(node_image),
)
.await?;
for GenerateFileCommand {
program,
args,
env,
local_output_path,
} in options.commands
{
let local_output_full_path = format!(
"{}{}{}",
self.base_dir.to_string_lossy(),
if local_output_path.starts_with("/") {
""
} else {
"/"
},
local_output_path.to_string_lossy()
);
for GenerateFileCommand { program, args, env, local_output_path } in options.commands {
let local_output_full_path = format!(
"{}{}{}",
self.base_dir.to_string_lossy(),
if local_output_path.starts_with("/") { "" } else { "/" },
local_output_path.to_string_lossy()
);
let contents = extract_execution_result(
&temp_node,
RunCommandOptions { program, args, env },
options.expected_path.as_ref(),
)
.await?;
self.filesystem
.write(local_output_full_path, contents)
.await
.map_err(|err| ProviderError::FileGenerationFailed(err.into()))?;
}
let contents = extract_execution_result(
&temp_node,
RunCommandOptions { program, args, env },
options.expected_path.as_ref(),
)
.await?;
self.filesystem
.write(local_output_full_path, contents)
.await
.map_err(|err| ProviderError::FileGenerationFailed(err.into()))?;
}
temp_node.destroy().await
}
temp_node.destroy().await
}
async fn static_setup(&self) -> Result<(), ProviderError> {
todo!()
}
async fn static_setup(&self) -> Result<(), ProviderError> {
todo!()
}
async fn destroy(&self) -> Result<(), ProviderError> {
let _ = self
.docker_client
.namespaced_containers_rm(&self.name)
.await
.map_err(|err| ProviderError::DeleteNamespaceFailed(self.name.clone(), err.into()))?;
async fn destroy(&self) -> Result<(), ProviderError> {
let _ =
self.docker_client.namespaced_containers_rm(&self.name).await.map_err(|err| {
ProviderError::DeleteNamespaceFailed(self.name.clone(), err.into())
})?;
if let Some(provider) = self.provider.upgrade() {
provider.namespaces.write().await.remove(&self.name);
}
if let Some(provider) = self.provider.upgrade() {
provider.namespaces.write().await.remove(&self.name);
}
Ok(())
}
Ok(())
}
}
impl<FS> Drop for DockerNamespace<FS>
where
FS: FileSystem + Send + Sync + Clone,
FS: FileSystem + Send + Sync + Clone,
{
fn drop(&mut self) {
let ns_name = self.name.clone();
if let Ok(delete_on_drop) = self.delete_on_drop.try_lock() {
if *delete_on_drop {
let client = self.docker_client.clone();
let provider = self.provider.upgrade();
fn drop(&mut self) {
let ns_name = self.name.clone();
if let Ok(delete_on_drop) = self.delete_on_drop.try_lock() {
if *delete_on_drop {
let client = self.docker_client.clone();
let provider = self.provider.upgrade();
let handler = thread::spawn(move || {
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async move {
trace!("🧟 deleting ns {ns_name} from cluster");
let _ = client.namespaced_containers_rm(&ns_name).await;
trace!("✅ deleted");
});
});
let handler = thread::spawn(move || {
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async move {
trace!("🧟 deleting ns {ns_name} from cluster");
let _ = client.namespaced_containers_rm(&ns_name).await;
trace!("✅ deleted");
});
});
if handler.join().is_ok() {
if let Some(provider) = provider {
if let Ok(mut p) = provider.namespaces.try_write() {
p.remove(&self.name);
} else {
warn!(
"⚠️ Can not acquire write lock to the provider, ns {} not removed",
self.name
);
}
}
}
} else {
trace!("⚠️ leaking ns {ns_name} in cluster");
}
};
}
if handler.join().is_ok() {
if let Some(provider) = provider {
if let Ok(mut p) = provider.namespaces.try_write() {
p.remove(&self.name);
} else {
warn!(
"⚠️ Can not acquire write lock to the provider, ns {} not removed",
self.name
);
}
}
}
} else {
trace!("⚠️ leaking ns {ns_name} in cluster");
}
};
}
}
File diff suppressed because it is too large Load Diff
@@ -1,7 +1,7 @@
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
};
use async_trait::async_trait;
@@ -10,152 +10,143 @@ use tokio::sync::RwLock;
use super::{client::DockerClient, namespace::DockerNamespace};
use crate::{
shared::helpers::extract_namespace_info, types::ProviderCapabilities, DynNamespace, Provider,
ProviderError, ProviderNamespace,
shared::helpers::extract_namespace_info, types::ProviderCapabilities, DynNamespace, Provider,
ProviderError, ProviderNamespace,
};
pub const PROVIDER_NAME: &str = "docker";
pub struct DockerProvider<FS>
where
FS: FileSystem + Send + Sync + Clone,
FS: FileSystem + Send + Sync + Clone,
{
weak: Weak<DockerProvider<FS>>,
capabilities: ProviderCapabilities,
tmp_dir: PathBuf,
docker_client: DockerClient,
filesystem: FS,
pub(super) namespaces: RwLock<HashMap<String, Arc<DockerNamespace<FS>>>>,
weak: Weak<DockerProvider<FS>>,
capabilities: ProviderCapabilities,
tmp_dir: PathBuf,
docker_client: DockerClient,
filesystem: FS,
pub(super) namespaces: RwLock<HashMap<String, Arc<DockerNamespace<FS>>>>,
}
impl<FS> DockerProvider<FS>
where
FS: FileSystem + Send + Sync + Clone + 'static,
FS: FileSystem + Send + Sync + Clone + 'static,
{
pub async fn new(filesystem: FS) -> Arc<Self> {
let docker_client = DockerClient::new().await.unwrap();
pub async fn new(filesystem: FS) -> Arc<Self> {
let docker_client = DockerClient::new().await.unwrap();
let provider = Arc::new_cyclic(|weak| DockerProvider {
weak: weak.clone(),
capabilities: ProviderCapabilities {
requires_image: true,
has_resources: false,
prefix_with_full_path: false,
use_default_ports_in_cmd: true,
},
tmp_dir: std::env::temp_dir(),
docker_client,
filesystem,
namespaces: RwLock::new(HashMap::new()),
});
let provider = Arc::new_cyclic(|weak| DockerProvider {
weak: weak.clone(),
capabilities: ProviderCapabilities {
requires_image: true,
has_resources: false,
prefix_with_full_path: false,
use_default_ports_in_cmd: true,
},
tmp_dir: std::env::temp_dir(),
docker_client,
filesystem,
namespaces: RwLock::new(HashMap::new()),
});
let cloned_provider = provider.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
for (_, ns) in cloned_provider.namespaces().await {
if ns.is_detached().await {
// best effort
let _ = ns.destroy().await;
}
}
let cloned_provider = provider.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
for (_, ns) in cloned_provider.namespaces().await {
if ns.is_detached().await {
// best effort
let _ = ns.destroy().await;
}
}
// exit the process (130, SIGINT)
std::process::exit(130)
});
// exit the process (130, SIGINT)
std::process::exit(130)
});
provider
}
provider
}
pub fn tmp_dir(mut self, tmp_dir: impl Into<PathBuf>) -> Self {
self.tmp_dir = tmp_dir.into();
self
}
pub fn tmp_dir(mut self, tmp_dir: impl Into<PathBuf>) -> Self {
self.tmp_dir = tmp_dir.into();
self
}
}
#[async_trait]
impl<FS> Provider for DockerProvider<FS>
where
FS: FileSystem + Send + Sync + Clone + 'static,
FS: FileSystem + Send + Sync + Clone + 'static,
{
fn name(&self) -> &str {
PROVIDER_NAME
}
fn name(&self) -> &str {
PROVIDER_NAME
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
async fn namespaces(&self) -> HashMap<String, DynNamespace> {
self.namespaces
.read()
.await
.iter()
.map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace))
.collect()
}
async fn namespaces(&self) -> HashMap<String, DynNamespace> {
self.namespaces
.read()
.await
.iter()
.map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace))
.collect()
}
async fn create_namespace(&self) -> Result<DynNamespace, ProviderError> {
let namespace = DockerNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.docker_client,
&self.filesystem,
None,
)
.await?;
async fn create_namespace(&self) -> Result<DynNamespace, ProviderError> {
let namespace = DockerNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.docker_client,
&self.filesystem,
None,
)
.await?;
self.namespaces
.write()
.await
.insert(namespace.name().to_string(), namespace.clone());
self.namespaces.write().await.insert(namespace.name().to_string(), namespace.clone());
Ok(namespace)
}
Ok(namespace)
}
async fn create_namespace_with_base_dir(
&self,
base_dir: &Path,
) -> Result<DynNamespace, ProviderError> {
let namespace = DockerNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.docker_client,
&self.filesystem,
Some(base_dir),
)
.await?;
async fn create_namespace_with_base_dir(
&self,
base_dir: &Path,
) -> Result<DynNamespace, ProviderError> {
let namespace = DockerNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.docker_client,
&self.filesystem,
Some(base_dir),
)
.await?;
self.namespaces
.write()
.await
.insert(namespace.name().to_string(), namespace.clone());
self.namespaces.write().await.insert(namespace.name().to_string(), namespace.clone());
Ok(namespace)
}
Ok(namespace)
}
async fn create_namespace_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNamespace, ProviderError> {
let (base_dir, name) = extract_namespace_info(json_value)?;
async fn create_namespace_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNamespace, ProviderError> {
let (base_dir, name) = extract_namespace_info(json_value)?;
let namespace = DockerNamespace::attach_to_live(
&self.weak,
&self.capabilities,
&self.docker_client,
&self.filesystem,
&base_dir,
&name,
)
.await?;
let namespace = DockerNamespace::attach_to_live(
&self.weak,
&self.capabilities,
&self.docker_client,
&self.filesystem,
&base_dir,
&name,
)
.await?;
self.namespaces
.write()
.await
.insert(namespace.name().to_string(), namespace.clone());
self.namespaces.write().await.insert(namespace.name().to_string(), namespace.clone());
Ok(namespace)
}
Ok(namespace)
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -2,76 +2,73 @@ use std::collections::BTreeMap;
use configuration::shared::resources::{ResourceQuantity, Resources};
use k8s_openapi::{
api::core::v1::{
ConfigMapVolumeSource, Container, EnvVar, PodSpec, ResourceRequirements, Volume,
VolumeMount,
},
apimachinery::pkg::api::resource::Quantity,
api::core::v1::{
ConfigMapVolumeSource, Container, EnvVar, PodSpec, ResourceRequirements, Volume,
VolumeMount,
},
apimachinery::pkg::api::resource::Quantity,
};
pub(super) struct PodSpecBuilder;
impl PodSpecBuilder {
pub(super) fn build(
name: &str,
image: &str,
resources: Option<&Resources>,
program: &str,
args: &[String],
env: &[(String, String)],
) -> PodSpec {
PodSpec {
hostname: Some(name.to_string()),
init_containers: Some(vec![Self::build_helper_binaries_setup_container()]),
containers: vec![Self::build_main_container(
name, image, resources, program, args, env,
)],
volumes: Some(Self::build_volumes()),
..Default::default()
}
}
pub(super) fn build(
name: &str,
image: &str,
resources: Option<&Resources>,
program: &str,
args: &[String],
env: &[(String, String)],
) -> PodSpec {
PodSpec {
hostname: Some(name.to_string()),
init_containers: Some(vec![Self::build_helper_binaries_setup_container()]),
containers: vec![Self::build_main_container(
name, image, resources, program, args, env,
)],
volumes: Some(Self::build_volumes()),
..Default::default()
}
}
fn build_main_container(
name: &str,
image: &str,
resources: Option<&Resources>,
program: &str,
args: &[String],
env: &[(String, String)],
) -> Container {
Container {
name: name.to_string(),
image: Some(image.to_string()),
image_pull_policy: Some("Always".to_string()),
command: Some(
[
vec!["/zombie-wrapper.sh".to_string(), program.to_string()],
args.to_vec(),
]
.concat(),
),
env: Some(
env.iter()
.map(|(name, value)| EnvVar {
name: name.clone(),
value: Some(value.clone()),
value_from: None,
})
.collect(),
),
volume_mounts: Some(Self::build_volume_mounts(vec![VolumeMount {
name: "zombie-wrapper-volume".to_string(),
mount_path: "/zombie-wrapper.sh".to_string(),
sub_path: Some("zombie-wrapper.sh".to_string()),
..Default::default()
}])),
resources: Self::build_resources_requirements(resources),
..Default::default()
}
}
fn build_main_container(
name: &str,
image: &str,
resources: Option<&Resources>,
program: &str,
args: &[String],
env: &[(String, String)],
) -> Container {
Container {
name: name.to_string(),
image: Some(image.to_string()),
image_pull_policy: Some("Always".to_string()),
command: Some(
[vec!["/zombie-wrapper.sh".to_string(), program.to_string()], args.to_vec()]
.concat(),
),
env: Some(
env.iter()
.map(|(name, value)| EnvVar {
name: name.clone(),
value: Some(value.clone()),
value_from: None,
})
.collect(),
),
volume_mounts: Some(Self::build_volume_mounts(vec![VolumeMount {
name: "zombie-wrapper-volume".to_string(),
mount_path: "/zombie-wrapper.sh".to_string(),
sub_path: Some("zombie-wrapper.sh".to_string()),
..Default::default()
}])),
resources: Self::build_resources_requirements(resources),
..Default::default()
}
}
fn build_helper_binaries_setup_container() -> Container {
Container {
fn build_helper_binaries_setup_container() -> Container {
Container {
name: "helper-binaries-setup".to_string(),
image: Some("europe-west3-docker.pkg.dev/parity-zombienet/zombienet-public-images/alpine:latest".to_string()),
image_pull_policy: Some("IfNotPresent".to_string()),
@@ -87,102 +84,93 @@ impl PodSpecBuilder {
]),
..Default::default()
}
}
}
fn build_volumes() -> Vec<Volume> {
vec![
Volume {
name: "cfg".to_string(),
..Default::default()
},
Volume {
name: "data".to_string(),
..Default::default()
},
Volume {
name: "relay-data".to_string(),
..Default::default()
},
Volume {
name: "zombie-wrapper-volume".to_string(),
config_map: Some(ConfigMapVolumeSource {
name: Some("zombie-wrapper".to_string()),
default_mode: Some(0o755),
..Default::default()
}),
..Default::default()
},
Volume {
name: "helper-binaries-downloader-volume".to_string(),
config_map: Some(ConfigMapVolumeSource {
name: Some("helper-binaries-downloader".to_string()),
default_mode: Some(0o755),
..Default::default()
}),
..Default::default()
},
]
}
fn build_volumes() -> Vec<Volume> {
vec![
Volume { name: "cfg".to_string(), ..Default::default() },
Volume { name: "data".to_string(), ..Default::default() },
Volume { name: "relay-data".to_string(), ..Default::default() },
Volume {
name: "zombie-wrapper-volume".to_string(),
config_map: Some(ConfigMapVolumeSource {
name: Some("zombie-wrapper".to_string()),
default_mode: Some(0o755),
..Default::default()
}),
..Default::default()
},
Volume {
name: "helper-binaries-downloader-volume".to_string(),
config_map: Some(ConfigMapVolumeSource {
name: Some("helper-binaries-downloader".to_string()),
default_mode: Some(0o755),
..Default::default()
}),
..Default::default()
},
]
}
fn build_volume_mounts(non_default_mounts: Vec<VolumeMount>) -> Vec<VolumeMount> {
[
vec![
VolumeMount {
name: "cfg".to_string(),
mount_path: "/cfg".to_string(),
read_only: Some(false),
..Default::default()
},
VolumeMount {
name: "data".to_string(),
mount_path: "/data".to_string(),
read_only: Some(false),
..Default::default()
},
VolumeMount {
name: "relay-data".to_string(),
mount_path: "/relay-data".to_string(),
read_only: Some(false),
..Default::default()
},
],
non_default_mounts,
]
.concat()
}
fn build_volume_mounts(non_default_mounts: Vec<VolumeMount>) -> Vec<VolumeMount> {
[
vec![
VolumeMount {
name: "cfg".to_string(),
mount_path: "/cfg".to_string(),
read_only: Some(false),
..Default::default()
},
VolumeMount {
name: "data".to_string(),
mount_path: "/data".to_string(),
read_only: Some(false),
..Default::default()
},
VolumeMount {
name: "relay-data".to_string(),
mount_path: "/relay-data".to_string(),
read_only: Some(false),
..Default::default()
},
],
non_default_mounts,
]
.concat()
}
fn build_resources_requirements(resources: Option<&Resources>) -> Option<ResourceRequirements> {
resources.map(|resources| ResourceRequirements {
limits: Self::build_resources_requirements_quantities(
resources.limit_cpu(),
resources.limit_memory(),
),
requests: Self::build_resources_requirements_quantities(
resources.request_cpu(),
resources.request_memory(),
),
..Default::default()
})
}
fn build_resources_requirements(resources: Option<&Resources>) -> Option<ResourceRequirements> {
resources.map(|resources| ResourceRequirements {
limits: Self::build_resources_requirements_quantities(
resources.limit_cpu(),
resources.limit_memory(),
),
requests: Self::build_resources_requirements_quantities(
resources.request_cpu(),
resources.request_memory(),
),
..Default::default()
})
}
fn build_resources_requirements_quantities(
cpu: Option<&ResourceQuantity>,
memory: Option<&ResourceQuantity>,
) -> Option<BTreeMap<String, Quantity>> {
let mut quantities = BTreeMap::new();
fn build_resources_requirements_quantities(
cpu: Option<&ResourceQuantity>,
memory: Option<&ResourceQuantity>,
) -> Option<BTreeMap<String, Quantity>> {
let mut quantities = BTreeMap::new();
if let Some(cpu) = cpu {
quantities.insert("cpu".to_string(), Quantity(cpu.as_str().to_string()));
}
if let Some(cpu) = cpu {
quantities.insert("cpu".to_string(), Quantity(cpu.as_str().to_string()));
}
if let Some(memory) = memory {
quantities.insert("memory".to_string(), Quantity(memory.as_str().to_string()));
}
if let Some(memory) = memory {
quantities.insert("memory".to_string(), Quantity(memory.as_str().to_string()));
}
if !quantities.is_empty() {
Some(quantities)
} else {
None
}
}
if !quantities.is_empty() {
Some(quantities)
} else {
None
}
}
}
@@ -1,7 +1,7 @@
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
};
use async_trait::async_trait;
@@ -10,136 +10,127 @@ use tokio::sync::RwLock;
use super::{client::KubernetesClient, namespace::KubernetesNamespace};
use crate::{
shared::helpers::extract_namespace_info, types::ProviderCapabilities, DynNamespace, Provider,
ProviderError, ProviderNamespace,
shared::helpers::extract_namespace_info, types::ProviderCapabilities, DynNamespace, Provider,
ProviderError, ProviderNamespace,
};
pub const PROVIDER_NAME: &str = "k8s";
pub struct KubernetesProvider<FS>
where
FS: FileSystem + Send + Sync + Clone,
FS: FileSystem + Send + Sync + Clone,
{
weak: Weak<KubernetesProvider<FS>>,
capabilities: ProviderCapabilities,
tmp_dir: PathBuf,
k8s_client: KubernetesClient,
filesystem: FS,
pub(super) namespaces: RwLock<HashMap<String, Arc<KubernetesNamespace<FS>>>>,
weak: Weak<KubernetesProvider<FS>>,
capabilities: ProviderCapabilities,
tmp_dir: PathBuf,
k8s_client: KubernetesClient,
filesystem: FS,
pub(super) namespaces: RwLock<HashMap<String, Arc<KubernetesNamespace<FS>>>>,
}
impl<FS> KubernetesProvider<FS>
where
FS: FileSystem + Send + Sync + Clone,
FS: FileSystem + Send + Sync + Clone,
{
pub async fn new(filesystem: FS) -> Arc<Self> {
let k8s_client = KubernetesClient::new().await.unwrap();
pub async fn new(filesystem: FS) -> Arc<Self> {
let k8s_client = KubernetesClient::new().await.unwrap();
Arc::new_cyclic(|weak| KubernetesProvider {
weak: weak.clone(),
capabilities: ProviderCapabilities {
requires_image: true,
has_resources: true,
prefix_with_full_path: false,
use_default_ports_in_cmd: true,
},
tmp_dir: std::env::temp_dir(),
k8s_client,
filesystem,
namespaces: RwLock::new(HashMap::new()),
})
}
Arc::new_cyclic(|weak| KubernetesProvider {
weak: weak.clone(),
capabilities: ProviderCapabilities {
requires_image: true,
has_resources: true,
prefix_with_full_path: false,
use_default_ports_in_cmd: true,
},
tmp_dir: std::env::temp_dir(),
k8s_client,
filesystem,
namespaces: RwLock::new(HashMap::new()),
})
}
pub fn tmp_dir(mut self, tmp_dir: impl Into<PathBuf>) -> Self {
self.tmp_dir = tmp_dir.into();
self
}
pub fn tmp_dir(mut self, tmp_dir: impl Into<PathBuf>) -> Self {
self.tmp_dir = tmp_dir.into();
self
}
}
#[async_trait]
impl<FS> Provider for KubernetesProvider<FS>
where
FS: FileSystem + Send + Sync + Clone + 'static,
FS: FileSystem + Send + Sync + Clone + 'static,
{
fn name(&self) -> &str {
PROVIDER_NAME
}
fn name(&self) -> &str {
PROVIDER_NAME
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
async fn namespaces(&self) -> HashMap<String, DynNamespace> {
self.namespaces
.read()
.await
.iter()
.map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace))
.collect()
}
async fn namespaces(&self) -> HashMap<String, DynNamespace> {
self.namespaces
.read()
.await
.iter()
.map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace))
.collect()
}
async fn create_namespace(&self) -> Result<DynNamespace, ProviderError> {
let namespace = KubernetesNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.k8s_client,
&self.filesystem,
None,
)
.await?;
async fn create_namespace(&self) -> Result<DynNamespace, ProviderError> {
let namespace = KubernetesNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.k8s_client,
&self.filesystem,
None,
)
.await?;
self.namespaces
.write()
.await
.insert(namespace.name().to_string(), namespace.clone());
self.namespaces.write().await.insert(namespace.name().to_string(), namespace.clone());
Ok(namespace)
}
Ok(namespace)
}
async fn create_namespace_with_base_dir(
&self,
base_dir: &Path,
) -> Result<DynNamespace, ProviderError> {
let namespace = KubernetesNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.k8s_client,
&self.filesystem,
Some(base_dir),
)
.await?;
async fn create_namespace_with_base_dir(
&self,
base_dir: &Path,
) -> Result<DynNamespace, ProviderError> {
let namespace = KubernetesNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.k8s_client,
&self.filesystem,
Some(base_dir),
)
.await?;
self.namespaces
.write()
.await
.insert(namespace.name().to_string(), namespace.clone());
self.namespaces.write().await.insert(namespace.name().to_string(), namespace.clone());
Ok(namespace)
}
Ok(namespace)
}
async fn create_namespace_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNamespace, ProviderError> {
let (base_dir, name) = extract_namespace_info(json_value)?;
async fn create_namespace_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNamespace, ProviderError> {
let (base_dir, name) = extract_namespace_info(json_value)?;
let namespace = KubernetesNamespace::attach_to_live(
&self.weak,
&self.capabilities,
&self.k8s_client,
&self.filesystem,
&base_dir,
&name,
)
.await?;
let namespace = KubernetesNamespace::attach_to_live(
&self.weak,
&self.capabilities,
&self.k8s_client,
&self.filesystem,
&base_dir,
&name,
)
.await?;
self.namespaces
.write()
.await
.insert(namespace.name().to_string(), namespace.clone());
self.namespaces.write().await.insert(namespace.name().to_string(), namespace.clone());
Ok(namespace)
}
Ok(namespace)
}
}
+154 -154
View File
@@ -5,253 +5,253 @@ mod native;
pub mod shared;
use std::{
collections::HashMap,
net::IpAddr,
path::{Path, PathBuf},
sync::Arc,
time::Duration,
collections::HashMap,
net::IpAddr,
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use async_trait::async_trait;
use shared::{
constants::LOCALHOST,
types::{
ExecutionResult, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions,
RunScriptOptions, SpawnNodeOptions,
},
constants::LOCALHOST,
types::{
ExecutionResult, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions,
RunScriptOptions, SpawnNodeOptions,
},
};
use support::fs::FileSystemError;
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
pub enum ProviderError {
#[error("Failed to create client '{0}': {1}")]
CreateClientFailed(String, anyhow::Error),
#[error("Failed to create client '{0}': {1}")]
CreateClientFailed(String, anyhow::Error),
#[error("Failed to create namespace '{0}': {1}")]
CreateNamespaceFailed(String, anyhow::Error),
#[error("Failed to create namespace '{0}': {1}")]
CreateNamespaceFailed(String, anyhow::Error),
#[error("Failed to spawn node '{0}': {1}")]
NodeSpawningFailed(String, anyhow::Error),
#[error("Failed to spawn node '{0}': {1}")]
NodeSpawningFailed(String, anyhow::Error),
#[error("Error running command '{0}' {1}: {2}")]
RunCommandError(String, String, anyhow::Error),
#[error("Error running command '{0}' {1}: {2}")]
RunCommandError(String, String, anyhow::Error),
#[error("Error running script'{0}': {1}")]
RunScriptError(String, anyhow::Error),
#[error("Error running script'{0}': {1}")]
RunScriptError(String, anyhow::Error),
#[error("Invalid network configuration field {0}")]
InvalidConfig(String),
#[error("Invalid network configuration field {0}")]
InvalidConfig(String),
#[error("Failed to retrieve node available args using image {0} and command {1}: {2}")]
NodeAvailableArgsError(String, String, String),
#[error("Failed to retrieve node available args using image {0} and command {1}: {2}")]
NodeAvailableArgsError(String, String, String),
#[error("Can not recover node: {0}")]
MissingNode(String),
#[error("Can not recover node: {0}")]
MissingNode(String),
#[error("Can not recover node: {0} info, field: {1}")]
MissingNodeInfo(String, String),
#[error("Can not recover node: {0} info, field: {1}")]
MissingNodeInfo(String, String),
#[error("File generation failed: {0}")]
FileGenerationFailed(anyhow::Error),
#[error("File generation failed: {0}")]
FileGenerationFailed(anyhow::Error),
#[error(transparent)]
FileSystemError(#[from] FileSystemError),
#[error(transparent)]
FileSystemError(#[from] FileSystemError),
#[error("Invalid script path for {0}")]
InvalidScriptPath(anyhow::Error),
#[error("Invalid script path for {0}")]
InvalidScriptPath(anyhow::Error),
#[error("Script with path {0} not found")]
ScriptNotFound(PathBuf),
#[error("Script with path {0} not found")]
ScriptNotFound(PathBuf),
#[error("Failed to retrieve process ID for node '{0}'")]
ProcessIdRetrievalFailed(String),
#[error("Failed to retrieve process ID for node '{0}'")]
ProcessIdRetrievalFailed(String),
#[error("Failed to pause node '{0}': {1}")]
PauseNodeFailed(String, anyhow::Error),
#[error("Failed to pause node '{0}': {1}")]
PauseNodeFailed(String, anyhow::Error),
#[error("Failed to resume node '{0}': {1}")]
ResumeNodeFailed(String, anyhow::Error),
#[error("Failed to resume node '{0}': {1}")]
ResumeNodeFailed(String, anyhow::Error),
#[error("Failed to kill node '{0}': {1}")]
KillNodeFailed(String, anyhow::Error),
#[error("Failed to kill node '{0}': {1}")]
KillNodeFailed(String, anyhow::Error),
#[error("Failed to restart node '{0}': {1}")]
RestartNodeFailed(String, anyhow::Error),
#[error("Failed to restart node '{0}': {1}")]
RestartNodeFailed(String, anyhow::Error),
#[error("Failed to destroy node '{0}': {1}")]
DestroyNodeFailed(String, anyhow::Error),
#[error("Failed to destroy node '{0}': {1}")]
DestroyNodeFailed(String, anyhow::Error),
#[error("Failed to get logs for node '{0}': {1}")]
GetLogsFailed(String, anyhow::Error),
#[error("Failed to get logs for node '{0}': {1}")]
GetLogsFailed(String, anyhow::Error),
#[error("Failed to dump logs for node '{0}': {1}")]
DumpLogsFailed(String, anyhow::Error),
#[error("Failed to dump logs for node '{0}': {1}")]
DumpLogsFailed(String, anyhow::Error),
#[error("Failed to copy file from node '{0}': {1}")]
CopyFileFromNodeError(String, anyhow::Error),
#[error("Failed to copy file from node '{0}': {1}")]
CopyFileFromNodeError(String, anyhow::Error),
#[error("Failed to setup fileserver: {0}")]
FileServerSetupError(anyhow::Error),
#[error("Failed to setup fileserver: {0}")]
FileServerSetupError(anyhow::Error),
#[error("Error uploading file: '{0}': {1}")]
UploadFile(String, anyhow::Error),
#[error("Error uploading file: '{0}': {1}")]
UploadFile(String, anyhow::Error),
#[error("Error downloading file: '{0}': {1}")]
DownloadFile(String, anyhow::Error),
#[error("Error downloading file: '{0}': {1}")]
DownloadFile(String, anyhow::Error),
#[error("Error sending file '{0}' to {1}: {2}")]
SendFile(String, String, anyhow::Error),
#[error("Error sending file '{0}' to {1}: {2}")]
SendFile(String, String, anyhow::Error),
#[error("Error creating port-forward '{0}:{1}': {2}")]
PortForwardError(u16, u16, anyhow::Error),
#[error("Error creating port-forward '{0}:{1}': {2}")]
PortForwardError(u16, u16, anyhow::Error),
#[error("Failed to delete namespace '{0}': {1}")]
DeleteNamespaceFailed(String, anyhow::Error),
#[error("Failed to delete namespace '{0}': {1}")]
DeleteNamespaceFailed(String, anyhow::Error),
#[error("Serialization error")]
SerializationError(#[from] serde_json::Error),
#[error("Serialization error")]
SerializationError(#[from] serde_json::Error),
#[error("Failed to acquire lock: {0}")]
FailedToAcquireLock(String),
#[error("Failed to acquire lock: {0}")]
FailedToAcquireLock(String),
}
#[async_trait]
pub trait Provider {
fn name(&self) -> &str;
fn name(&self) -> &str;
fn capabilities(&self) -> &ProviderCapabilities;
fn capabilities(&self) -> &ProviderCapabilities;
async fn namespaces(&self) -> HashMap<String, DynNamespace>;
async fn namespaces(&self) -> HashMap<String, DynNamespace>;
async fn create_namespace(&self) -> Result<DynNamespace, ProviderError>;
async fn create_namespace(&self) -> Result<DynNamespace, ProviderError>;
async fn create_namespace_with_base_dir(
&self,
base_dir: &Path,
) -> Result<DynNamespace, ProviderError>;
async fn create_namespace_with_base_dir(
&self,
base_dir: &Path,
) -> Result<DynNamespace, ProviderError>;
async fn create_namespace_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNamespace, ProviderError>;
async fn create_namespace_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNamespace, ProviderError>;
}
pub type DynProvider = Arc<dyn Provider + Send + Sync>;
#[async_trait]
pub trait ProviderNamespace {
fn name(&self) -> &str;
fn name(&self) -> &str;
fn base_dir(&self) -> &PathBuf;
fn base_dir(&self) -> &PathBuf;
fn capabilities(&self) -> &ProviderCapabilities;
fn capabilities(&self) -> &ProviderCapabilities;
fn provider_name(&self) -> &str;
fn provider_name(&self) -> &str;
async fn detach(&self) {
// noop by default
warn!("Detach is not implemented for {}", self.name());
}
async fn detach(&self) {
// noop by default
warn!("Detach is not implemented for {}", self.name());
}
async fn is_detached(&self) -> bool {
// false by default
false
}
async fn is_detached(&self) -> bool {
// false by default
false
}
async fn nodes(&self) -> HashMap<String, DynNode>;
async fn nodes(&self) -> HashMap<String, DynNode>;
async fn get_node_available_args(
&self,
options: (String, Option<String>),
) -> Result<String, ProviderError>;
async fn get_node_available_args(
&self,
options: (String, Option<String>),
) -> Result<String, ProviderError>;
async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result<DynNode, ProviderError>;
async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result<DynNode, ProviderError>;
async fn spawn_node_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNode, ProviderError>;
async fn spawn_node_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNode, ProviderError>;
async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError>;
async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError>;
async fn destroy(&self) -> Result<(), ProviderError>;
async fn destroy(&self) -> Result<(), ProviderError>;
async fn static_setup(&self) -> Result<(), ProviderError>;
async fn static_setup(&self) -> Result<(), ProviderError>;
}
pub type DynNamespace = Arc<dyn ProviderNamespace + Send + Sync>;
#[async_trait]
pub trait ProviderNode: erased_serde::Serialize {
fn name(&self) -> &str;
fn name(&self) -> &str;
fn args(&self) -> Vec<&str>;
fn args(&self) -> Vec<&str>;
fn base_dir(&self) -> &PathBuf;
fn base_dir(&self) -> &PathBuf;
fn config_dir(&self) -> &PathBuf;
fn config_dir(&self) -> &PathBuf;
fn data_dir(&self) -> &PathBuf;
fn data_dir(&self) -> &PathBuf;
fn relay_data_dir(&self) -> &PathBuf;
fn relay_data_dir(&self) -> &PathBuf;
fn scripts_dir(&self) -> &PathBuf;
fn scripts_dir(&self) -> &PathBuf;
fn log_path(&self) -> &PathBuf;
fn log_path(&self) -> &PathBuf;
fn log_cmd(&self) -> String;
fn log_cmd(&self) -> String;
// Return the absolute path to the file in the `node` perspective
// TODO: purpose?
fn path_in_node(&self, file: &Path) -> PathBuf;
// Return the absolute path to the file in the `node` perspective
// TODO: purpose?
fn path_in_node(&self, file: &Path) -> PathBuf;
async fn logs(&self) -> Result<String, ProviderError>;
async fn logs(&self) -> Result<String, ProviderError>;
async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError>;
async fn dump_logs(&self, local_dest: PathBuf) -> Result<(), ProviderError>;
// By default return localhost, should be overrided for k8s
async fn ip(&self) -> Result<IpAddr, ProviderError> {
Ok(LOCALHOST)
}
// By default return localhost, should be overrided for k8s
async fn ip(&self) -> Result<IpAddr, ProviderError> {
Ok(LOCALHOST)
}
// Noop by default (native/docker provider)
async fn create_port_forward(
&self,
_local_port: u16,
_remote_port: u16,
) -> Result<Option<u16>, ProviderError> {
Ok(None)
}
// Noop by default (native/docker provider)
async fn create_port_forward(
&self,
_local_port: u16,
_remote_port: u16,
) -> Result<Option<u16>, ProviderError> {
Ok(None)
}
async fn run_command(
&self,
options: RunCommandOptions,
) -> Result<ExecutionResult, ProviderError>;
async fn run_command(
&self,
options: RunCommandOptions,
) -> Result<ExecutionResult, ProviderError>;
async fn run_script(&self, options: RunScriptOptions)
-> Result<ExecutionResult, ProviderError>;
async fn run_script(&self, options: RunScriptOptions)
-> Result<ExecutionResult, ProviderError>;
async fn send_file(
&self,
local_file_path: &Path,
remote_file_path: &Path,
mode: &str,
) -> Result<(), ProviderError>;
async fn send_file(
&self,
local_file_path: &Path,
remote_file_path: &Path,
mode: &str,
) -> Result<(), ProviderError>;
async fn receive_file(
&self,
remote_file_path: &Path,
local_file_path: &Path,
) -> Result<(), ProviderError>;
async fn receive_file(
&self,
remote_file_path: &Path,
local_file_path: &Path,
) -> Result<(), ProviderError>;
async fn pause(&self) -> Result<(), ProviderError>;
async fn pause(&self) -> Result<(), ProviderError>;
async fn resume(&self) -> Result<(), ProviderError>;
async fn resume(&self) -> Result<(), ProviderError>;
async fn restart(&self, after: Option<Duration>) -> Result<(), ProviderError>;
async fn restart(&self, after: Option<Duration>) -> Result<(), ProviderError>;
async fn destroy(&self) -> Result<(), ProviderError>;
async fn destroy(&self) -> Result<(), ProviderError>;
}
pub type DynNode = Arc<dyn ProviderNode + Send + Sync>;
@@ -1,7 +1,7 @@
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
};
use async_trait::async_trait;
@@ -12,363 +12,337 @@ use uuid::Uuid;
use super::node::{NativeNode, NativeNodeOptions};
use crate::{
constants::NAMESPACE_PREFIX,
native::{node::DeserializableNativeNodeOptions, provider},
shared::helpers::extract_execution_result,
types::{
GenerateFileCommand, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions,
SpawnNodeOptions,
},
DynNode, NativeProvider, ProviderError, ProviderNamespace, ProviderNode,
constants::NAMESPACE_PREFIX,
native::{node::DeserializableNativeNodeOptions, provider},
shared::helpers::extract_execution_result,
types::{
GenerateFileCommand, GenerateFilesOptions, ProviderCapabilities, RunCommandOptions,
SpawnNodeOptions,
},
DynNode, NativeProvider, ProviderError, ProviderNamespace, ProviderNode,
};
pub(super) struct NativeNamespace<FS>
where
FS: FileSystem + Send + Sync + Clone,
FS: FileSystem + Send + Sync + Clone,
{
weak: Weak<NativeNamespace<FS>>,
name: String,
provider: Weak<NativeProvider<FS>>,
base_dir: PathBuf,
capabilities: ProviderCapabilities,
filesystem: FS,
pub(super) nodes: RwLock<HashMap<String, Arc<NativeNode<FS>>>>,
weak: Weak<NativeNamespace<FS>>,
name: String,
provider: Weak<NativeProvider<FS>>,
base_dir: PathBuf,
capabilities: ProviderCapabilities,
filesystem: FS,
pub(super) nodes: RwLock<HashMap<String, Arc<NativeNode<FS>>>>,
}
impl<FS> NativeNamespace<FS>
where
FS: FileSystem + Send + Sync + Clone + 'static,
FS: FileSystem + Send + Sync + Clone + 'static,
{
pub(super) async fn new(
provider: &Weak<NativeProvider<FS>>,
tmp_dir: &PathBuf,
capabilities: &ProviderCapabilities,
filesystem: &FS,
custom_base_dir: Option<&Path>,
) -> Result<Arc<Self>, ProviderError> {
let name = format!("{}{}", NAMESPACE_PREFIX, Uuid::new_v4());
let base_dir = if let Some(custom_base_dir) = custom_base_dir {
if !filesystem.exists(custom_base_dir).await {
filesystem.create_dir_all(custom_base_dir).await?;
} else {
warn!(
"⚠️ Using and existing directory {} as base dir",
custom_base_dir.to_string_lossy()
);
}
PathBuf::from(custom_base_dir)
} else {
let base_dir = PathBuf::from_iter([tmp_dir, &PathBuf::from(&name)]);
filesystem.create_dir(&base_dir).await?;
base_dir
};
pub(super) async fn new(
provider: &Weak<NativeProvider<FS>>,
tmp_dir: &PathBuf,
capabilities: &ProviderCapabilities,
filesystem: &FS,
custom_base_dir: Option<&Path>,
) -> Result<Arc<Self>, ProviderError> {
let name = format!("{}{}", NAMESPACE_PREFIX, Uuid::new_v4());
let base_dir = if let Some(custom_base_dir) = custom_base_dir {
if !filesystem.exists(custom_base_dir).await {
filesystem.create_dir_all(custom_base_dir).await?;
} else {
warn!(
"⚠️ Using and existing directory {} as base dir",
custom_base_dir.to_string_lossy()
);
}
PathBuf::from(custom_base_dir)
} else {
let base_dir = PathBuf::from_iter([tmp_dir, &PathBuf::from(&name)]);
filesystem.create_dir(&base_dir).await?;
base_dir
};
Ok(Arc::new_cyclic(|weak| NativeNamespace {
weak: weak.clone(),
provider: provider.clone(),
name,
base_dir,
capabilities: capabilities.clone(),
filesystem: filesystem.clone(),
nodes: RwLock::new(HashMap::new()),
}))
}
Ok(Arc::new_cyclic(|weak| NativeNamespace {
weak: weak.clone(),
provider: provider.clone(),
name,
base_dir,
capabilities: capabilities.clone(),
filesystem: filesystem.clone(),
nodes: RwLock::new(HashMap::new()),
}))
}
pub(super) async fn attach_to_live(
provider: &Weak<NativeProvider<FS>>,
capabilities: &ProviderCapabilities,
filesystem: &FS,
custom_base_dir: &Path,
name: &str,
) -> Result<Arc<Self>, ProviderError> {
let base_dir = custom_base_dir.to_path_buf();
pub(super) async fn attach_to_live(
provider: &Weak<NativeProvider<FS>>,
capabilities: &ProviderCapabilities,
filesystem: &FS,
custom_base_dir: &Path,
name: &str,
) -> Result<Arc<Self>, ProviderError> {
let base_dir = custom_base_dir.to_path_buf();
Ok(Arc::new_cyclic(|weak| NativeNamespace {
weak: weak.clone(),
provider: provider.clone(),
name: name.to_string(),
base_dir,
capabilities: capabilities.clone(),
filesystem: filesystem.clone(),
nodes: RwLock::new(HashMap::new()),
}))
}
Ok(Arc::new_cyclic(|weak| NativeNamespace {
weak: weak.clone(),
provider: provider.clone(),
name: name.to_string(),
base_dir,
capabilities: capabilities.clone(),
filesystem: filesystem.clone(),
nodes: RwLock::new(HashMap::new()),
}))
}
}
#[async_trait]
impl<FS> ProviderNamespace for NativeNamespace<FS>
where
FS: FileSystem + Send + Sync + Clone + 'static,
FS: FileSystem + Send + Sync + Clone + 'static,
{
fn name(&self) -> &str {
&self.name
}
fn name(&self) -> &str {
&self.name
}
fn base_dir(&self) -> &PathBuf {
&self.base_dir
}
fn base_dir(&self) -> &PathBuf {
&self.base_dir
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
fn provider_name(&self) -> &str {
provider::PROVIDER_NAME
}
fn provider_name(&self) -> &str {
provider::PROVIDER_NAME
}
async fn nodes(&self) -> HashMap<String, DynNode> {
self.nodes
.read()
.await
.iter()
.map(|(name, node)| (name.clone(), node.clone() as DynNode))
.collect()
}
async fn nodes(&self) -> HashMap<String, DynNode> {
self.nodes
.read()
.await
.iter()
.map(|(name, node)| (name.clone(), node.clone() as DynNode))
.collect()
}
async fn get_node_available_args(
&self,
(command, _image): (String, Option<String>),
) -> Result<String, ProviderError> {
let temp_node = self
.spawn_node(
&SpawnNodeOptions::new(format!("temp-{}", Uuid::new_v4()), "bash".to_string())
.args(vec!["-c", "while :; do sleep 1; done"]),
)
.await?;
async fn get_node_available_args(
&self,
(command, _image): (String, Option<String>),
) -> Result<String, ProviderError> {
let temp_node = self
.spawn_node(
&SpawnNodeOptions::new(format!("temp-{}", Uuid::new_v4()), "bash".to_string())
.args(vec!["-c", "while :; do sleep 1; done"]),
)
.await?;
let available_args_output = temp_node
.run_command(RunCommandOptions::new(command.clone()).args(vec!["--help"]))
.await?
.map_err(|(_exit, status)| {
ProviderError::NodeAvailableArgsError("".to_string(), command, status)
})?;
let available_args_output = temp_node
.run_command(RunCommandOptions::new(command.clone()).args(vec!["--help"]))
.await?
.map_err(|(_exit, status)| {
ProviderError::NodeAvailableArgsError("".to_string(), command, status)
})?;
temp_node.destroy().await?;
temp_node.destroy().await?;
Ok(available_args_output)
}
Ok(available_args_output)
}
async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result<DynNode, ProviderError> {
trace!("spawn node options {options:?}");
async fn spawn_node(&self, options: &SpawnNodeOptions) -> Result<DynNode, ProviderError> {
trace!("spawn node options {options:?}");
let node = NativeNode::new(NativeNodeOptions {
namespace: &self.weak,
namespace_base_dir: &self.base_dir,
name: &options.name,
program: &options.program,
args: &options.args,
env: &options.env,
startup_files: &options.injected_files,
created_paths: &options.created_paths,
db_snapshot: options.db_snapshot.as_ref(),
filesystem: &self.filesystem,
node_log_path: options.node_log_path.as_ref(),
})
.await?;
let node = NativeNode::new(NativeNodeOptions {
namespace: &self.weak,
namespace_base_dir: &self.base_dir,
name: &options.name,
program: &options.program,
args: &options.args,
env: &options.env,
startup_files: &options.injected_files,
created_paths: &options.created_paths,
db_snapshot: options.db_snapshot.as_ref(),
filesystem: &self.filesystem,
node_log_path: options.node_log_path.as_ref(),
})
.await?;
self.nodes
.write()
.await
.insert(options.name.clone(), node.clone());
self.nodes.write().await.insert(options.name.clone(), node.clone());
Ok(node)
}
Ok(node)
}
async fn spawn_node_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNode, ProviderError> {
let deserializable: DeserializableNativeNodeOptions =
serde_json::from_value(json_value.clone())?;
let options = NativeNodeOptions::from_deserializable(
&deserializable,
&self.weak,
&self.base_dir,
&self.filesystem,
);
async fn spawn_node_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNode, ProviderError> {
let deserializable: DeserializableNativeNodeOptions =
serde_json::from_value(json_value.clone())?;
let options = NativeNodeOptions::from_deserializable(
&deserializable,
&self.weak,
&self.base_dir,
&self.filesystem,
);
let pid = json_value
.get("process_handle")
.and_then(|v| v.as_i64())
.ok_or_else(|| ProviderError::InvalidConfig("Missing pid field".to_string()))?
as i32;
let node = NativeNode::attach_to_live(options, pid).await?;
let pid = json_value
.get("process_handle")
.and_then(|v| v.as_i64())
.ok_or_else(|| ProviderError::InvalidConfig("Missing pid field".to_string()))?
as i32;
let node = NativeNode::attach_to_live(options, pid).await?;
self.nodes
.write()
.await
.insert(node.name().to_string(), node.clone());
self.nodes.write().await.insert(node.name().to_string(), node.clone());
Ok(node)
}
Ok(node)
}
async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError> {
let node_name = if let Some(name) = options.temp_name {
name
} else {
format!("temp-{}", Uuid::new_v4())
};
async fn generate_files(&self, options: GenerateFilesOptions) -> Result<(), ProviderError> {
let node_name = if let Some(name) = options.temp_name {
name
} else {
format!("temp-{}", Uuid::new_v4())
};
// we spawn a node doing nothing but looping so we can execute our commands
let temp_node = self
.spawn_node(
&SpawnNodeOptions::new(node_name, "bash".to_string())
.args(vec!["-c", "while :; do sleep 1; done"])
.injected_files(options.injected_files),
)
.await?;
// we spawn a node doing nothing but looping so we can execute our commands
let temp_node = self
.spawn_node(
&SpawnNodeOptions::new(node_name, "bash".to_string())
.args(vec!["-c", "while :; do sleep 1; done"])
.injected_files(options.injected_files),
)
.await?;
for GenerateFileCommand {
program,
args,
env,
local_output_path,
} in options.commands
{
trace!(
"🏗 building file {:?} in path {} with command {} {}",
local_output_path.as_os_str(),
self.base_dir.to_string_lossy(),
program,
args.join(" ")
);
let local_output_full_path = format!(
"{}{}{}",
self.base_dir.to_string_lossy(),
if local_output_path.starts_with("/") {
""
} else {
"/"
},
local_output_path.to_string_lossy()
);
for GenerateFileCommand { program, args, env, local_output_path } in options.commands {
trace!(
"🏗 building file {:?} in path {} with command {} {}",
local_output_path.as_os_str(),
self.base_dir.to_string_lossy(),
program,
args.join(" ")
);
let local_output_full_path = format!(
"{}{}{}",
self.base_dir.to_string_lossy(),
if local_output_path.starts_with("/") { "" } else { "/" },
local_output_path.to_string_lossy()
);
let contents = extract_execution_result(
&temp_node,
RunCommandOptions { program, args, env },
options.expected_path.as_ref(),
)
.await?;
self.filesystem
.write(local_output_full_path, contents)
.await
.map_err(|err| ProviderError::FileGenerationFailed(err.into()))?;
}
let contents = extract_execution_result(
&temp_node,
RunCommandOptions { program, args, env },
options.expected_path.as_ref(),
)
.await?;
self.filesystem
.write(local_output_full_path, contents)
.await
.map_err(|err| ProviderError::FileGenerationFailed(err.into()))?;
}
temp_node.destroy().await
}
temp_node.destroy().await
}
async fn static_setup(&self) -> Result<(), ProviderError> {
// no static setup exists for native provider
todo!()
}
async fn static_setup(&self) -> Result<(), ProviderError> {
// no static setup exists for native provider
todo!()
}
async fn destroy(&self) -> Result<(), ProviderError> {
let mut names = vec![];
async fn destroy(&self) -> Result<(), ProviderError> {
let mut names = vec![];
for node in self.nodes.read().await.values() {
node.abort()
.await
.map_err(|err| ProviderError::DestroyNodeFailed(node.name().to_string(), err))?;
names.push(node.name().to_string());
}
for node in self.nodes.read().await.values() {
node.abort()
.await
.map_err(|err| ProviderError::DestroyNodeFailed(node.name().to_string(), err))?;
names.push(node.name().to_string());
}
let mut nodes = self.nodes.write().await;
for name in names {
nodes.remove(&name);
}
let mut nodes = self.nodes.write().await;
for name in names {
nodes.remove(&name);
}
if let Some(provider) = self.provider.upgrade() {
provider.namespaces.write().await.remove(&self.name);
}
if let Some(provider) = self.provider.upgrade() {
provider.namespaces.write().await.remove(&self.name);
}
Ok(())
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use support::fs::local::LocalFileSystem;
use support::fs::local::LocalFileSystem;
use super::*;
use crate::{
types::{GenerateFileCommand, GenerateFilesOptions},
NativeProvider, Provider,
};
use super::*;
use crate::{
types::{GenerateFileCommand, GenerateFilesOptions},
NativeProvider, Provider,
};
fn unique_temp_dir() -> PathBuf {
let mut base = std::env::temp_dir();
base.push(format!("znet_native_ns_test_{}", uuid::Uuid::new_v4()));
base
}
fn unique_temp_dir() -> PathBuf {
let mut base = std::env::temp_dir();
base.push(format!("znet_native_ns_test_{}", uuid::Uuid::new_v4()));
base
}
#[tokio::test]
async fn generate_files_uses_expected_path_when_provided() {
let fs = LocalFileSystem;
let provider = NativeProvider::new(fs.clone());
let base_dir = unique_temp_dir();
// Namespace builder will create directory if needed
let ns = provider
.create_namespace_with_base_dir(&base_dir)
.await
.expect("namespace should be created");
#[tokio::test]
async fn generate_files_uses_expected_path_when_provided() {
let fs = LocalFileSystem;
let provider = NativeProvider::new(fs.clone());
let base_dir = unique_temp_dir();
// Namespace builder will create directory if needed
let ns = provider
.create_namespace_with_base_dir(&base_dir)
.await
.expect("namespace should be created");
// Create a unique on-host path that the native node will write to
let expected_path =
std::env::temp_dir().join(format!("znet_expected_{}.json", uuid::Uuid::new_v4()));
// Create a unique on-host path that the native node will write to
let expected_path =
std::env::temp_dir().join(format!("znet_expected_{}.json", uuid::Uuid::new_v4()));
// Command will write JSON into expected_path; stdout will be something else to ensure we don't read it
let program = "bash".to_string();
let script = format!(
"echo -n '{{\"hello\":\"world\"}}' > {} && echo should_not_be_used",
expected_path.to_string_lossy()
);
let args: Vec<String> = vec!["-lc".into(), script];
// Command will write JSON into expected_path; stdout will be something else to ensure we don't read it
let program = "bash".to_string();
let script = format!(
"echo -n '{{\"hello\":\"world\"}}' > {} && echo should_not_be_used",
expected_path.to_string_lossy()
);
let args: Vec<String> = vec!["-lc".into(), script];
let out_name = PathBuf::from("result_expected.json");
let cmd = GenerateFileCommand::new(program, out_name.clone()).args(args);
let options = GenerateFilesOptions::new(vec![cmd], None, Some(expected_path.clone()));
let out_name = PathBuf::from("result_expected.json");
let cmd = GenerateFileCommand::new(program, out_name.clone()).args(args);
let options = GenerateFilesOptions::new(vec![cmd], None, Some(expected_path.clone()));
ns.generate_files(options)
.await
.expect("generation should succeed");
ns.generate_files(options).await.expect("generation should succeed");
// Read produced file from namespace base_dir
let produced_path = base_dir.join(out_name);
let produced = fs
.read_to_string(&produced_path)
.await
.expect("should read produced file");
assert_eq!(produced, "{\"hello\":\"world\"}");
}
// Read produced file from namespace base_dir
let produced_path = base_dir.join(out_name);
let produced = fs.read_to_string(&produced_path).await.expect("should read produced file");
assert_eq!(produced, "{\"hello\":\"world\"}");
}
#[tokio::test]
async fn generate_files_uses_stdout_when_expected_path_absent() {
let fs = LocalFileSystem;
let provider = NativeProvider::new(fs.clone());
let base_dir = unique_temp_dir();
let ns = provider
.create_namespace_with_base_dir(&base_dir)
.await
.expect("namespace should be created");
#[tokio::test]
async fn generate_files_uses_stdout_when_expected_path_absent() {
let fs = LocalFileSystem;
let provider = NativeProvider::new(fs.clone());
let base_dir = unique_temp_dir();
let ns = provider
.create_namespace_with_base_dir(&base_dir)
.await
.expect("namespace should be created");
// Command prints to stdout only
let program = "bash".to_string();
let args: Vec<String> = vec!["-lc".into(), "echo -n 42".into()];
// Command prints to stdout only
let program = "bash".to_string();
let args: Vec<String> = vec!["-lc".into(), "echo -n 42".into()];
let out_name = PathBuf::from("result_stdout.txt");
let cmd = GenerateFileCommand::new(program, out_name.clone()).args(args);
let options = GenerateFilesOptions::new(vec![cmd], None, None);
let out_name = PathBuf::from("result_stdout.txt");
let cmd = GenerateFileCommand::new(program, out_name.clone()).args(args);
let options = GenerateFilesOptions::new(vec![cmd], None, None);
ns.generate_files(options)
.await
.expect("generation should succeed");
ns.generate_files(options).await.expect("generation should succeed");
let produced_path = base_dir.join(out_name);
let produced = fs
.read_to_string(&produced_path)
.await
.expect("should read produced file");
assert_eq!(produced, "42");
}
let produced_path = base_dir.join(out_name);
let produced = fs.read_to_string(&produced_path).await.expect("should read produced file");
assert_eq!(produced, "42");
}
}
File diff suppressed because it is too large Load Diff
@@ -1,7 +1,7 @@
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Weak},
};
use async_trait::async_trait;
@@ -10,133 +10,124 @@ use tokio::sync::RwLock;
use super::namespace::NativeNamespace;
use crate::{
shared::helpers::extract_namespace_info, types::ProviderCapabilities, DynNamespace, Provider,
ProviderError, ProviderNamespace,
shared::helpers::extract_namespace_info, types::ProviderCapabilities, DynNamespace, Provider,
ProviderError, ProviderNamespace,
};
pub const PROVIDER_NAME: &str = "native";
pub struct NativeProvider<FS>
where
FS: FileSystem + Send + Sync + Clone,
FS: FileSystem + Send + Sync + Clone,
{
weak: Weak<NativeProvider<FS>>,
capabilities: ProviderCapabilities,
tmp_dir: PathBuf,
filesystem: FS,
pub(super) namespaces: RwLock<HashMap<String, Arc<NativeNamespace<FS>>>>,
weak: Weak<NativeProvider<FS>>,
capabilities: ProviderCapabilities,
tmp_dir: PathBuf,
filesystem: FS,
pub(super) namespaces: RwLock<HashMap<String, Arc<NativeNamespace<FS>>>>,
}
impl<FS> NativeProvider<FS>
where
FS: FileSystem + Send + Sync + Clone,
FS: FileSystem + Send + Sync + Clone,
{
pub fn new(filesystem: FS) -> Arc<Self> {
Arc::new_cyclic(|weak| NativeProvider {
weak: weak.clone(),
capabilities: ProviderCapabilities {
has_resources: false,
requires_image: false,
prefix_with_full_path: true,
use_default_ports_in_cmd: false,
},
// NOTE: temp_dir in linux return `/tmp` but on mac something like
// `/var/folders/rz/1cyx7hfj31qgb98d8_cg7jwh0000gn/T/`, having
// one `trailing slash` and the other no can cause issues if
// you try to build a fullpath by concatenate. Use Pathbuf to prevent the issue.
tmp_dir: std::env::temp_dir(),
filesystem,
namespaces: RwLock::new(HashMap::new()),
})
}
pub fn new(filesystem: FS) -> Arc<Self> {
Arc::new_cyclic(|weak| NativeProvider {
weak: weak.clone(),
capabilities: ProviderCapabilities {
has_resources: false,
requires_image: false,
prefix_with_full_path: true,
use_default_ports_in_cmd: false,
},
// NOTE: temp_dir in linux return `/tmp` but on mac something like
// `/var/folders/rz/1cyx7hfj31qgb98d8_cg7jwh0000gn/T/`, having
// one `trailing slash` and the other no can cause issues if
// you try to build a fullpath by concatenate. Use Pathbuf to prevent the issue.
tmp_dir: std::env::temp_dir(),
filesystem,
namespaces: RwLock::new(HashMap::new()),
})
}
pub fn tmp_dir(mut self, tmp_dir: impl Into<PathBuf>) -> Self {
self.tmp_dir = tmp_dir.into();
self
}
pub fn tmp_dir(mut self, tmp_dir: impl Into<PathBuf>) -> Self {
self.tmp_dir = tmp_dir.into();
self
}
}
#[async_trait]
impl<FS> Provider for NativeProvider<FS>
where
FS: FileSystem + Send + Sync + Clone + 'static,
FS: FileSystem + Send + Sync + Clone + 'static,
{
fn name(&self) -> &str {
PROVIDER_NAME
}
fn name(&self) -> &str {
PROVIDER_NAME
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
fn capabilities(&self) -> &ProviderCapabilities {
&self.capabilities
}
async fn namespaces(&self) -> HashMap<String, DynNamespace> {
self.namespaces
.read()
.await
.iter()
.map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace))
.collect()
}
async fn namespaces(&self) -> HashMap<String, DynNamespace> {
self.namespaces
.read()
.await
.iter()
.map(|(name, namespace)| (name.clone(), namespace.clone() as DynNamespace))
.collect()
}
async fn create_namespace(&self) -> Result<DynNamespace, ProviderError> {
let namespace = NativeNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.filesystem,
None,
)
.await?;
async fn create_namespace(&self) -> Result<DynNamespace, ProviderError> {
let namespace = NativeNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.filesystem,
None,
)
.await?;
self.namespaces
.write()
.await
.insert(namespace.name().to_string(), namespace.clone());
self.namespaces.write().await.insert(namespace.name().to_string(), namespace.clone());
Ok(namespace)
}
Ok(namespace)
}
async fn create_namespace_with_base_dir(
&self,
base_dir: &Path,
) -> Result<DynNamespace, ProviderError> {
let namespace = NativeNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.filesystem,
Some(base_dir),
)
.await?;
async fn create_namespace_with_base_dir(
&self,
base_dir: &Path,
) -> Result<DynNamespace, ProviderError> {
let namespace = NativeNamespace::new(
&self.weak,
&self.tmp_dir,
&self.capabilities,
&self.filesystem,
Some(base_dir),
)
.await?;
self.namespaces
.write()
.await
.insert(namespace.name().to_string(), namespace.clone());
self.namespaces.write().await.insert(namespace.name().to_string(), namespace.clone());
Ok(namespace)
}
Ok(namespace)
}
async fn create_namespace_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNamespace, ProviderError> {
let (base_dir, name) = extract_namespace_info(json_value)?;
async fn create_namespace_from_json(
&self,
json_value: &serde_json::Value,
) -> Result<DynNamespace, ProviderError> {
let (base_dir, name) = extract_namespace_info(json_value)?;
let namespace = NativeNamespace::attach_to_live(
&self.weak,
&self.capabilities,
&self.filesystem,
&base_dir,
&name,
)
.await?;
let namespace = NativeNamespace::attach_to_live(
&self.weak,
&self.capabilities,
&self.filesystem,
&base_dir,
&name,
)
.await?;
self.namespaces
.write()
.await
.insert(namespace.name().to_string(), namespace.clone());
self.namespaces.write().await.insert(namespace.name().to_string(), namespace.clone());
Ok(namespace)
}
Ok(namespace)
}
}
@@ -6,74 +6,69 @@ use crate::{types::RunCommandOptions, DynNode, ProviderError};
/// Check if we are running in `CI` by checking the 'RUN_IN_CI' env var
pub fn running_in_ci() -> bool {
env::var("RUN_IN_CI").unwrap_or_default() == "1"
env::var("RUN_IN_CI").unwrap_or_default() == "1"
}
/// Executes a command on a temporary node and extracts the execution result either from the
/// standard output or a file.
pub async fn extract_execution_result(
temp_node: &DynNode,
options: RunCommandOptions,
expected_path: Option<&PathBuf>,
temp_node: &DynNode,
options: RunCommandOptions,
expected_path: Option<&PathBuf>,
) -> Result<String, ProviderError> {
let output_contents = temp_node
.run_command(options)
.await?
.map_err(|(_, msg)| ProviderError::FileGenerationFailed(anyhow!("{msg}")))?;
let output_contents = temp_node
.run_command(options)
.await?
.map_err(|(_, msg)| ProviderError::FileGenerationFailed(anyhow!("{msg}")))?;
// If an expected_path is provided, read the file contents from inside the container
if let Some(expected_path) = expected_path.as_ref() {
Ok(temp_node
.run_command(
RunCommandOptions::new("cat")
.args(vec![expected_path.to_string_lossy().to_string()]),
)
.await?
.map_err(|(_, msg)| {
ProviderError::FileGenerationFailed(anyhow!(format!(
"failed reading expected_path {}: {}",
expected_path.display(),
msg
)))
})?)
} else {
Ok(output_contents)
}
// If an expected_path is provided, read the file contents from inside the container
if let Some(expected_path) = expected_path.as_ref() {
Ok(temp_node
.run_command(
RunCommandOptions::new("cat")
.args(vec![expected_path.to_string_lossy().to_string()]),
)
.await?
.map_err(|(_, msg)| {
ProviderError::FileGenerationFailed(anyhow!(format!(
"failed reading expected_path {}: {}",
expected_path.display(),
msg
)))
})?)
} else {
Ok(output_contents)
}
}
pub fn extract_namespace_info(
json_value: &serde_json::Value,
json_value: &serde_json::Value,
) -> Result<(PathBuf, String), ProviderError> {
let base_dir = json_value
.get("local_base_dir")
.and_then(|v| v.as_str())
.map(PathBuf::from)
.ok_or(ProviderError::InvalidConfig(
"`field local_base_dir` is missing from zombie.json".to_string(),
))?;
let base_dir =
json_value.get("local_base_dir").and_then(|v| v.as_str()).map(PathBuf::from).ok_or(
ProviderError::InvalidConfig(
"`field local_base_dir` is missing from zombie.json".to_string(),
),
)?;
let name =
json_value
.get("ns")
.and_then(|v| v.as_str())
.ok_or(ProviderError::InvalidConfig(
"field `ns` is missing from zombie.json".to_string(),
))?;
let name = json_value.get("ns").and_then(|v| v.as_str()).ok_or(
ProviderError::InvalidConfig("field `ns` is missing from zombie.json".to_string()),
)?;
Ok((base_dir, name.to_string()))
Ok((base_dir, name.to_string()))
}
#[cfg(test)]
mod tests {
use super::*;
use super::*;
#[test]
fn check_runing_in_ci_env_var() {
assert!(!running_in_ci());
// now set the env var
env::set_var("RUN_IN_CI", "1");
assert!(running_in_ci());
// reset
env::set_var("RUN_IN_CI", "");
}
#[test]
fn check_runing_in_ci_env_var() {
assert!(!running_in_ci());
// now set the env var
env::set_var("RUN_IN_CI", "1");
assert!(running_in_ci());
// reset
env::set_var("RUN_IN_CI", "");
}
}
+286 -297
View File
@@ -1,7 +1,7 @@
use std::{
collections::HashMap,
path::{Path, PathBuf},
process::ExitStatus,
collections::HashMap,
path::{Path, PathBuf},
process::ExitStatus,
};
use configuration::{shared::resources::Resources, types::AssetLocation};
@@ -13,363 +13,352 @@ pub type ExecutionResult = Result<String, (ExitStatus, String)>;
#[derive(Debug, Clone, PartialEq)]
pub struct ProviderCapabilities {
// default ports internal
/// Ensure that we have an image for each node (k8s/podman/docker)
pub requires_image: bool,
/// Allow to customize the resources through manifest (k8s).
pub has_resources: bool,
/// Used in native to prefix filepath with fullpath
pub prefix_with_full_path: bool,
/// Use default ports in node cmd/args.
/// NOTE: generally used in k8s/dockers since the images expose those ports.
pub use_default_ports_in_cmd: bool,
// default ports internal
/// Ensure that we have an image for each node (k8s/podman/docker)
pub requires_image: bool,
/// Allow to customize the resources through manifest (k8s).
pub has_resources: bool,
/// Used in native to prefix filepath with fullpath
pub prefix_with_full_path: bool,
/// Use default ports in node cmd/args.
/// NOTE: generally used in k8s/dockers since the images expose those ports.
pub use_default_ports_in_cmd: bool,
}
#[derive(Debug, Clone)]
pub struct SpawnNodeOptions {
/// Name of the node
pub name: String,
/// Image of the node (IFF is supported by the provider)
pub image: Option<String>,
/// Resources to apply to the node (IFF is supported by the provider)
pub resources: Option<Resources>,
/// Main command to execute
pub program: String,
/// Arguments to pass to the main command
pub args: Vec<String>,
/// Environment to set when running the `program`
pub env: Vec<(String, String)>,
// TODO: rename startup_files
/// Files to inject at startup
pub injected_files: Vec<TransferedFile>,
/// Paths to create before start the node (e.g keystore)
/// should be created with `create_dir_all` in order
/// to create the full path even when we have missing parts
pub created_paths: Vec<PathBuf>,
/// Database snapshot to be injected (should be a tgz file)
/// Could be a local or remote asset
pub db_snapshot: Option<AssetLocation>,
pub port_mapping: Option<HashMap<Port, Port>>,
/// Optionally specify a log path for the node
pub node_log_path: Option<PathBuf>,
/// Name of the node
pub name: String,
/// Image of the node (IFF is supported by the provider)
pub image: Option<String>,
/// Resources to apply to the node (IFF is supported by the provider)
pub resources: Option<Resources>,
/// Main command to execute
pub program: String,
/// Arguments to pass to the main command
pub args: Vec<String>,
/// Environment to set when running the `program`
pub env: Vec<(String, String)>,
// TODO: rename startup_files
/// Files to inject at startup
pub injected_files: Vec<TransferedFile>,
/// Paths to create before start the node (e.g keystore)
/// should be created with `create_dir_all` in order
/// to create the full path even when we have missing parts
pub created_paths: Vec<PathBuf>,
/// Database snapshot to be injected (should be a tgz file)
/// Could be a local or remote asset
pub db_snapshot: Option<AssetLocation>,
pub port_mapping: Option<HashMap<Port, Port>>,
/// Optionally specify a log path for the node
pub node_log_path: Option<PathBuf>,
}
impl SpawnNodeOptions {
pub fn new<S>(name: S, program: S) -> Self
where
S: AsRef<str>,
{
Self {
name: name.as_ref().to_string(),
image: None,
resources: None,
program: program.as_ref().to_string(),
args: vec![],
env: vec![],
injected_files: vec![],
created_paths: vec![],
db_snapshot: None,
port_mapping: None,
node_log_path: None,
}
}
pub fn new<S>(name: S, program: S) -> Self
where
S: AsRef<str>,
{
Self {
name: name.as_ref().to_string(),
image: None,
resources: None,
program: program.as_ref().to_string(),
args: vec![],
env: vec![],
injected_files: vec![],
created_paths: vec![],
db_snapshot: None,
port_mapping: None,
node_log_path: None,
}
}
pub fn image<S>(mut self, image: S) -> Self
where
S: AsRef<str>,
{
self.image = Some(image.as_ref().to_string());
self
}
pub fn image<S>(mut self, image: S) -> Self
where
S: AsRef<str>,
{
self.image = Some(image.as_ref().to_string());
self
}
pub fn resources(mut self, resources: Resources) -> Self {
self.resources = Some(resources);
self
}
pub fn resources(mut self, resources: Resources) -> Self {
self.resources = Some(resources);
self
}
pub fn db_snapshot(mut self, db_snap: Option<AssetLocation>) -> Self {
self.db_snapshot = db_snap;
self
}
pub fn db_snapshot(mut self, db_snap: Option<AssetLocation>) -> Self {
self.db_snapshot = db_snap;
self
}
pub fn args<S, I>(mut self, args: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = S>,
{
self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
pub fn args<S, I>(mut self, args: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = S>,
{
self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
pub fn env<S, I>(mut self, env: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = (S, S)>,
{
self.env = env
.into_iter()
.map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string()))
.collect();
self
}
pub fn env<S, I>(mut self, env: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = (S, S)>,
{
self.env = env
.into_iter()
.map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string()))
.collect();
self
}
pub fn injected_files<I>(mut self, injected_files: I) -> Self
where
I: IntoIterator<Item = TransferedFile>,
{
self.injected_files = injected_files.into_iter().collect();
self
}
pub fn injected_files<I>(mut self, injected_files: I) -> Self
where
I: IntoIterator<Item = TransferedFile>,
{
self.injected_files = injected_files.into_iter().collect();
self
}
pub fn created_paths<P, I>(mut self, created_paths: I) -> Self
where
P: AsRef<Path>,
I: IntoIterator<Item = P>,
{
self.created_paths = created_paths
.into_iter()
.map(|path| path.as_ref().into())
.collect();
self
}
pub fn created_paths<P, I>(mut self, created_paths: I) -> Self
where
P: AsRef<Path>,
I: IntoIterator<Item = P>,
{
self.created_paths = created_paths.into_iter().map(|path| path.as_ref().into()).collect();
self
}
pub fn port_mapping(mut self, ports: HashMap<Port, Port>) -> Self {
self.port_mapping = Some(ports);
self
}
pub fn port_mapping(mut self, ports: HashMap<Port, Port>) -> Self {
self.port_mapping = Some(ports);
self
}
pub fn node_log_path(mut self, path: Option<PathBuf>) -> Self {
self.node_log_path = path;
self
}
pub fn node_log_path(mut self, path: Option<PathBuf>) -> Self {
self.node_log_path = path;
self
}
}
#[derive(Debug)]
pub struct GenerateFileCommand {
pub program: String,
pub args: Vec<String>,
pub env: Vec<(String, String)>,
pub local_output_path: PathBuf,
pub program: String,
pub args: Vec<String>,
pub env: Vec<(String, String)>,
pub local_output_path: PathBuf,
}
impl GenerateFileCommand {
pub fn new<S, P>(program: S, local_output_path: P) -> Self
where
S: AsRef<str>,
P: AsRef<Path>,
{
Self {
program: program.as_ref().to_string(),
args: vec![],
env: vec![],
local_output_path: local_output_path.as_ref().into(),
}
}
pub fn new<S, P>(program: S, local_output_path: P) -> Self
where
S: AsRef<str>,
P: AsRef<Path>,
{
Self {
program: program.as_ref().to_string(),
args: vec![],
env: vec![],
local_output_path: local_output_path.as_ref().into(),
}
}
pub fn args<S, I>(mut self, args: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = S>,
{
self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
pub fn args<S, I>(mut self, args: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = S>,
{
self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
pub fn env<S, I>(mut self, env: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = (S, S)>,
{
self.env = env
.into_iter()
.map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string()))
.collect();
self
}
pub fn env<S, I>(mut self, env: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = (S, S)>,
{
self.env = env
.into_iter()
.map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string()))
.collect();
self
}
}
#[derive(Debug)]
pub struct GenerateFilesOptions {
pub commands: Vec<GenerateFileCommand>,
pub image: Option<String>,
pub injected_files: Vec<TransferedFile>,
// Allow to control the name of the node used to create the files.
pub temp_name: Option<String>,
pub expected_path: Option<PathBuf>,
pub commands: Vec<GenerateFileCommand>,
pub image: Option<String>,
pub injected_files: Vec<TransferedFile>,
// Allow to control the name of the node used to create the files.
pub temp_name: Option<String>,
pub expected_path: Option<PathBuf>,
}
impl GenerateFilesOptions {
pub fn new<I>(commands: I, image: Option<String>, expected_path: Option<PathBuf>) -> Self
where
I: IntoIterator<Item = GenerateFileCommand>,
{
Self {
commands: commands.into_iter().collect(),
injected_files: vec![],
image,
temp_name: None,
expected_path,
}
}
pub fn new<I>(commands: I, image: Option<String>, expected_path: Option<PathBuf>) -> Self
where
I: IntoIterator<Item = GenerateFileCommand>,
{
Self {
commands: commands.into_iter().collect(),
injected_files: vec![],
image,
temp_name: None,
expected_path,
}
}
pub fn with_files<I>(
commands: I,
image: Option<String>,
injected_files: &[TransferedFile],
expected_path: Option<PathBuf>,
) -> Self
where
I: IntoIterator<Item = GenerateFileCommand>,
{
Self {
commands: commands.into_iter().collect(),
injected_files: injected_files.into(),
image,
temp_name: None,
expected_path,
}
}
pub fn with_files<I>(
commands: I,
image: Option<String>,
injected_files: &[TransferedFile],
expected_path: Option<PathBuf>,
) -> Self
where
I: IntoIterator<Item = GenerateFileCommand>,
{
Self {
commands: commands.into_iter().collect(),
injected_files: injected_files.into(),
image,
temp_name: None,
expected_path,
}
}
pub fn image<S>(mut self, image: S) -> Self
where
S: AsRef<str>,
{
self.image = Some(image.as_ref().to_string());
self
}
pub fn image<S>(mut self, image: S) -> Self
where
S: AsRef<str>,
{
self.image = Some(image.as_ref().to_string());
self
}
pub fn injected_files<I>(mut self, injected_files: I) -> Self
where
I: IntoIterator<Item = TransferedFile>,
{
self.injected_files = injected_files.into_iter().collect();
self
}
pub fn injected_files<I>(mut self, injected_files: I) -> Self
where
I: IntoIterator<Item = TransferedFile>,
{
self.injected_files = injected_files.into_iter().collect();
self
}
pub fn temp_name(mut self, name: impl Into<String>) -> Self {
self.temp_name = Some(name.into());
self
}
pub fn temp_name(mut self, name: impl Into<String>) -> Self {
self.temp_name = Some(name.into());
self
}
}
#[derive(Debug)]
pub struct RunCommandOptions {
pub program: String,
pub args: Vec<String>,
pub env: Vec<(String, String)>,
pub program: String,
pub args: Vec<String>,
pub env: Vec<(String, String)>,
}
impl RunCommandOptions {
pub fn new<S>(program: S) -> Self
where
S: AsRef<str>,
{
Self {
program: program.as_ref().to_string(),
args: vec![],
env: vec![],
}
}
pub fn new<S>(program: S) -> Self
where
S: AsRef<str>,
{
Self { program: program.as_ref().to_string(), args: vec![], env: vec![] }
}
pub fn args<S, I>(mut self, args: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = S>,
{
self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
pub fn args<S, I>(mut self, args: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = S>,
{
self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
pub fn env<S, I>(mut self, env: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = (S, S)>,
{
self.env = env
.into_iter()
.map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string()))
.collect();
self
}
pub fn env<S, I>(mut self, env: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = (S, S)>,
{
self.env = env
.into_iter()
.map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string()))
.collect();
self
}
}
pub struct RunScriptOptions {
pub local_script_path: PathBuf,
pub args: Vec<String>,
pub env: Vec<(String, String)>,
pub local_script_path: PathBuf,
pub args: Vec<String>,
pub env: Vec<(String, String)>,
}
impl RunScriptOptions {
pub fn new<P>(local_script_path: P) -> Self
where
P: AsRef<Path>,
{
Self {
local_script_path: local_script_path.as_ref().into(),
args: vec![],
env: vec![],
}
}
pub fn new<P>(local_script_path: P) -> Self
where
P: AsRef<Path>,
{
Self { local_script_path: local_script_path.as_ref().into(), args: vec![], env: vec![] }
}
pub fn args<S, I>(mut self, args: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = S>,
{
self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
pub fn args<S, I>(mut self, args: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = S>,
{
self.args = args.into_iter().map(|s| s.as_ref().to_string()).collect();
self
}
pub fn env<S, I>(mut self, env: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = (S, S)>,
{
self.env = env
.into_iter()
.map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string()))
.collect();
self
}
pub fn env<S, I>(mut self, env: I) -> Self
where
S: AsRef<str>,
I: IntoIterator<Item = (S, S)>,
{
self.env = env
.into_iter()
.map(|(name, value)| (name.as_ref().to_string(), value.as_ref().to_string()))
.collect();
self
}
}
// TODO(team): I think we can rename it to FileMap?
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TransferedFile {
pub local_path: PathBuf,
pub remote_path: PathBuf,
// TODO: Can be narrowed to have strict typing on this?
pub mode: String,
pub local_path: PathBuf,
pub remote_path: PathBuf,
// TODO: Can be narrowed to have strict typing on this?
pub mode: String,
}
impl TransferedFile {
pub fn new<P>(local_path: P, remote_path: P) -> Self
where
P: AsRef<Path>,
{
Self {
local_path: local_path.as_ref().into(),
remote_path: remote_path.as_ref().into(),
mode: "0644".to_string(), // default to rw-r--r--
}
}
pub fn new<P>(local_path: P, remote_path: P) -> Self
where
P: AsRef<Path>,
{
Self {
local_path: local_path.as_ref().into(),
remote_path: remote_path.as_ref().into(),
mode: "0644".to_string(), // default to rw-r--r--
}
}
pub fn mode<S>(mut self, mode: S) -> Self
where
S: AsRef<str>,
{
self.mode = mode.as_ref().to_string();
self
}
pub fn mode<S>(mut self, mode: S) -> Self
where
S: AsRef<str>,
{
self.mode = mode.as_ref().to_string();
self
}
}
impl std::fmt::Display for TransferedFile {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"File to transfer (local: {}, remote: {})",
self.local_path.display(),
self.remote_path.display()
)
}
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"File to transfer (local: {}, remote: {})",
self.local_path.display(),
self.remote_path.display()
)
}
}
+47 -47
View File
@@ -2,8 +2,8 @@
use std::{env, future::Future, path::PathBuf, pin::Pin};
use crate::{
AttachToLive, AttachToLiveNetwork, LocalFileSystem, Network, NetworkConfig, NetworkConfigExt,
OrchestratorError,
AttachToLive, AttachToLiveNetwork, LocalFileSystem, Network, NetworkConfig, NetworkConfigExt,
OrchestratorError,
};
const DEFAULT_POLKADOT_IMAGE: &str = "docker.io/parity/polkadot:latest";
@@ -11,80 +11,80 @@ const DEFAULT_CUMULUS_IMAGE: &str = "docker.io/parity/polkadot-parachain:latest"
#[derive(Debug, Default)]
pub struct Images {
pub polkadot: String,
pub cumulus: String,
pub polkadot: String,
pub cumulus: String,
}
impl Images {
/// Alias for polkadot field - returns reference to pezkuwi/polkadot image
pub fn pezkuwi(&self) -> &str {
&self.polkadot
}
/// Alias for polkadot field - returns reference to pezkuwi/polkadot image
pub fn pezkuwi(&self) -> &str {
&self.polkadot
}
/// Alias for cumulus field - returns reference to pezcumulus/cumulus image
pub fn pezcumulus(&self) -> &str {
&self.cumulus
}
/// Alias for cumulus field - returns reference to pezcumulus/cumulus image
pub fn pezcumulus(&self) -> &str {
&self.cumulus
}
}
pub enum Provider {
Native,
K8s,
Docker,
Native,
K8s,
Docker,
}
impl Provider {
pub fn get_spawn_fn(
&self,
) -> fn(NetworkConfig) -> Pin<Box<dyn Future<Output = SpawnResult> + Send>> {
match self {
Provider::Native => NetworkConfigExt::spawn_native,
Provider::K8s => NetworkConfigExt::spawn_k8s,
Provider::Docker => NetworkConfigExt::spawn_docker,
}
}
pub fn get_spawn_fn(
&self,
) -> fn(NetworkConfig) -> Pin<Box<dyn Future<Output = SpawnResult> + Send>> {
match self {
Provider::Native => NetworkConfigExt::spawn_native,
Provider::K8s => NetworkConfigExt::spawn_k8s,
Provider::Docker => NetworkConfigExt::spawn_docker,
}
}
}
// Use `docker` as default provider
impl From<String> for Provider {
fn from(value: String) -> Self {
match value.to_ascii_lowercase().as_ref() {
"native" => Provider::Native,
"k8s" => Provider::K8s,
_ => Provider::Docker, // default provider
}
}
fn from(value: String) -> Self {
match value.to_ascii_lowercase().as_ref() {
"native" => Provider::Native,
"k8s" => Provider::K8s,
_ => Provider::Docker, // default provider
}
}
}
pub fn get_images_from_env() -> Images {
let polkadot = env::var("POLKADOT_IMAGE").unwrap_or(DEFAULT_POLKADOT_IMAGE.into());
let cumulus = env::var("CUMULUS_IMAGE").unwrap_or(DEFAULT_CUMULUS_IMAGE.into());
Images { polkadot, cumulus }
let polkadot = env::var("POLKADOT_IMAGE").unwrap_or(DEFAULT_POLKADOT_IMAGE.into());
let cumulus = env::var("CUMULUS_IMAGE").unwrap_or(DEFAULT_CUMULUS_IMAGE.into());
Images { polkadot, cumulus }
}
pub fn get_provider_from_env() -> Provider {
env::var("ZOMBIE_PROVIDER").unwrap_or_default().into()
env::var("ZOMBIE_PROVIDER").unwrap_or_default().into()
}
pub type SpawnResult = Result<Network<LocalFileSystem>, OrchestratorError>;
pub fn get_spawn_fn() -> fn(NetworkConfig) -> Pin<Box<dyn Future<Output = SpawnResult> + Send>> {
let provider = get_provider_from_env();
let provider = get_provider_from_env();
match provider {
Provider::Native => NetworkConfigExt::spawn_native,
Provider::K8s => NetworkConfigExt::spawn_k8s,
Provider::Docker => NetworkConfigExt::spawn_docker,
}
match provider {
Provider::Native => NetworkConfigExt::spawn_native,
Provider::K8s => NetworkConfigExt::spawn_k8s,
Provider::Docker => NetworkConfigExt::spawn_docker,
}
}
pub type AttachResult = Result<Network<LocalFileSystem>, OrchestratorError>;
pub fn get_attach_fn() -> fn(PathBuf) -> Pin<Box<dyn Future<Output = AttachResult> + Send>> {
let provider = get_provider_from_env();
let provider = get_provider_from_env();
match provider {
Provider::Native => AttachToLiveNetwork::attach_native,
Provider::K8s => AttachToLiveNetwork::attach_k8s,
Provider::Docker => AttachToLiveNetwork::attach_docker,
}
match provider {
Provider::Native => AttachToLiveNetwork::attach_native,
Provider::K8s => AttachToLiveNetwork::attach_k8s,
Provider::Docker => AttachToLiveNetwork::attach_docker,
}
}
+86 -86
View File
@@ -2,20 +2,20 @@ use std::path::PathBuf;
use async_trait::async_trait;
pub use configuration::{
GlobalSettings, GlobalSettingsBuilder, NetworkConfig, NetworkConfigBuilder,
RegistrationStrategy, WithRelaychain,
GlobalSettings, GlobalSettingsBuilder, NetworkConfig, NetworkConfigBuilder,
RegistrationStrategy, WithRelaychain,
};
pub use orchestrator::{
errors::OrchestratorError,
network::{node::NetworkNode, Network},
pezsc_chain_spec, AddCollatorOptions, AddNodeOptions, Orchestrator,
errors::OrchestratorError,
network::{node::NetworkNode, Network},
pezsc_chain_spec, AddCollatorOptions, AddNodeOptions, Orchestrator,
};
// Helpers used for interact with the network
pub mod tx_helper {
pub use orchestrator::{
network::chain_upgrade::ChainUpgrade, shared::types::RuntimeUpgradeOptions,
};
pub use orchestrator::{
network::chain_upgrade::ChainUpgrade, shared::types::RuntimeUpgradeOptions,
};
}
use provider::{DockerProvider, KubernetesProvider, NativeProvider};
@@ -32,100 +32,100 @@ pub use pezkuwi_subxt_signer as subxt_signer;
#[async_trait]
pub trait NetworkConfigExt {
/// Spawns a network using the native or k8s provider.
///
/// # Example:
/// ```rust
/// # use zombienet_sdk::{NetworkConfig, NetworkConfigExt};
/// # async fn example() -> Result<(), zombienet_sdk::OrchestratorError> {
/// let network = NetworkConfig::load_from_toml("config.toml")?
/// .spawn_native()
/// .await?;
/// # Ok(())
/// # }
/// ```
async fn spawn_native(self) -> Result<Network<LocalFileSystem>, OrchestratorError>;
async fn spawn_k8s(self) -> Result<Network<LocalFileSystem>, OrchestratorError>;
async fn spawn_docker(self) -> Result<Network<LocalFileSystem>, OrchestratorError>;
/// Spawns a network using the native or k8s provider.
///
/// # Example:
/// ```rust
/// # use zombienet_sdk::{NetworkConfig, NetworkConfigExt};
/// # async fn example() -> Result<(), zombienet_sdk::OrchestratorError> {
/// let network = NetworkConfig::load_from_toml("config.toml")?
/// .spawn_native()
/// .await?;
/// # Ok(())
/// # }
/// ```
async fn spawn_native(self) -> Result<Network<LocalFileSystem>, OrchestratorError>;
async fn spawn_k8s(self) -> Result<Network<LocalFileSystem>, OrchestratorError>;
async fn spawn_docker(self) -> Result<Network<LocalFileSystem>, OrchestratorError>;
}
#[async_trait]
pub trait AttachToLive {
/// Attaches to a running live network using the native, docker or k8s provider.
///
/// # Example:
/// ```rust
/// # use zombienet_sdk::{AttachToLive, AttachToLiveNetwork};
/// # use std::path::PathBuf;
/// # async fn example() -> Result<(), zombienet_sdk::OrchestratorError> {
/// let zombie_json_path = PathBuf::from("some/path/zombie.json");
/// let network = AttachToLiveNetwork::attach_native(zombie_json_path).await?;
/// # Ok(())
/// # }
/// ```
async fn attach_native(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError>;
async fn attach_k8s(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError>;
async fn attach_docker(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError>;
/// Attaches to a running live network using the native, docker or k8s provider.
///
/// # Example:
/// ```rust
/// # use zombienet_sdk::{AttachToLive, AttachToLiveNetwork};
/// # use std::path::PathBuf;
/// # async fn example() -> Result<(), zombienet_sdk::OrchestratorError> {
/// let zombie_json_path = PathBuf::from("some/path/zombie.json");
/// let network = AttachToLiveNetwork::attach_native(zombie_json_path).await?;
/// # Ok(())
/// # }
/// ```
async fn attach_native(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError>;
async fn attach_k8s(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError>;
async fn attach_docker(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError>;
}
#[async_trait]
impl NetworkConfigExt for NetworkConfig {
async fn spawn_native(self) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = NativeProvider::new(filesystem.clone());
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.spawn(self).await
}
async fn spawn_native(self) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = NativeProvider::new(filesystem.clone());
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.spawn(self).await
}
async fn spawn_k8s(self) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = KubernetesProvider::new(filesystem.clone()).await;
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.spawn(self).await
}
async fn spawn_k8s(self) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = KubernetesProvider::new(filesystem.clone()).await;
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.spawn(self).await
}
async fn spawn_docker(self) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = DockerProvider::new(filesystem.clone()).await;
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.spawn(self).await
}
async fn spawn_docker(self) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = DockerProvider::new(filesystem.clone()).await;
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.spawn(self).await
}
}
pub struct AttachToLiveNetwork;
#[async_trait]
impl AttachToLive for AttachToLiveNetwork {
async fn attach_native(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = NativeProvider::new(filesystem.clone());
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.attach_to_live(zombie_json_path.as_ref()).await
}
async fn attach_native(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = NativeProvider::new(filesystem.clone());
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.attach_to_live(zombie_json_path.as_ref()).await
}
async fn attach_k8s(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = KubernetesProvider::new(filesystem.clone()).await;
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.attach_to_live(zombie_json_path.as_ref()).await
}
async fn attach_k8s(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = KubernetesProvider::new(filesystem.clone()).await;
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.attach_to_live(zombie_json_path.as_ref()).await
}
async fn attach_docker(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = DockerProvider::new(filesystem.clone()).await;
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.attach_to_live(zombie_json_path.as_ref()).await
}
async fn attach_docker(
zombie_json_path: PathBuf,
) -> Result<Network<LocalFileSystem>, OrchestratorError> {
let filesystem = LocalFileSystem;
let provider = DockerProvider::new(filesystem.clone()).await;
let orchestrator = Orchestrator::new(filesystem, provider);
orchestrator.attach_to_live(zombie_json_path.as_ref()).await
}
}
@@ -5,9 +5,9 @@ const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}";
#[tokio::test(flavor = "multi_thread")]
async fn rococo_local_with_omni_node_and_wasm_runtime() {
let _ = tracing_subscriber::fmt::try_init();
let _ = tracing_subscriber::fmt::try_init();
let config = NetworkConfigBuilder::new()
let config = NetworkConfigBuilder::new()
.with_relaychain(|relaychain| {
relaychain
.with_chain("rococo-local")
@@ -29,47 +29,32 @@ async fn rococo_local_with_omni_node_and_wasm_runtime() {
.build()
.unwrap();
let spawn_fn = get_spawn_fn();
let network = spawn_fn(config).await.unwrap();
let spawn_fn = get_spawn_fn();
let network = spawn_fn(config).await.unwrap();
println!("🚀🚀🚀🚀 network deployed");
println!("🚀🚀🚀🚀 network deployed");
// wait 2 blocks
let alice = network.get_node("alice").unwrap();
assert!(alice
.wait_metric(BEST_BLOCK_METRIC, |b| b > 2_f64)
.await
.is_ok());
// wait 2 blocks
let alice = network.get_node("alice").unwrap();
assert!(alice.wait_metric(BEST_BLOCK_METRIC, |b| b > 2_f64).await.is_ok());
// omni-collator-1
let collator = network.get_node("omni-collator-1").unwrap();
let client = collator
.wait_client::<subxt::PolkadotConfig>()
.await
.unwrap();
// omni-collator-1
let collator = network.get_node("omni-collator-1").unwrap();
let client = collator.wait_client::<subxt::PolkadotConfig>().await.unwrap();
// wait 1 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1);
while let Some(block) = blocks.next().await {
println!(
"Block (omni-collator-1) #{}",
block.unwrap().header().number
);
}
// wait 1 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1);
while let Some(block) = blocks.next().await {
println!("Block (omni-collator-1) #{}", block.unwrap().header().number);
}
// omni-collator-2
let collator = network.get_node("omni-collator-2").unwrap();
let client = collator
.wait_client::<subxt::PolkadotConfig>()
.await
.unwrap();
// omni-collator-2
let collator = network.get_node("omni-collator-2").unwrap();
let client = collator.wait_client::<subxt::PolkadotConfig>().await.unwrap();
// wait 1 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1);
while let Some(block) = blocks.next().await {
println!(
"Block (omni-collator-2) #{}",
block.unwrap().header().number
);
}
// wait 1 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1);
while let Some(block) = blocks.next().await {
println!("Block (omni-collator-2) #{}", block.unwrap().header().number);
}
}
@@ -5,9 +5,9 @@ const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}";
#[tokio::test(flavor = "multi_thread")]
async fn polkadot_local_with_chain_spec_runtime() {
let _ = tracing_subscriber::fmt::try_init();
let _ = tracing_subscriber::fmt::try_init();
let config = NetworkConfigBuilder::new()
let config = NetworkConfigBuilder::new()
.with_relaychain(|relaychain| {
relaychain
.with_chain("polkadot-local")
@@ -30,47 +30,32 @@ async fn polkadot_local_with_chain_spec_runtime() {
.build()
.unwrap();
let spawn_fn = get_spawn_fn();
let network = spawn_fn(config).await.unwrap();
let spawn_fn = get_spawn_fn();
let network = spawn_fn(config).await.unwrap();
println!("🚀🚀🚀🚀 network deployed");
println!("🚀🚀🚀🚀 network deployed");
// wait 2 blocks
let alice = network.get_node("alice").unwrap();
assert!(alice
.wait_metric(BEST_BLOCK_METRIC, |b| b > 2_f64)
.await
.is_ok());
// wait 2 blocks
let alice = network.get_node("alice").unwrap();
assert!(alice.wait_metric(BEST_BLOCK_METRIC, |b| b > 2_f64).await.is_ok());
// asset-hub-collator-1
let collator = network.get_node("asset-hub-collator-1").unwrap();
let client = collator
.wait_client::<subxt::PolkadotConfig>()
.await
.unwrap();
// asset-hub-collator-1
let collator = network.get_node("asset-hub-collator-1").unwrap();
let client = collator.wait_client::<subxt::PolkadotConfig>().await.unwrap();
// wait 1 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1);
while let Some(block) = blocks.next().await {
println!(
"Block (asset-hub-collator-1) #{}",
block.unwrap().header().number
);
}
// wait 1 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1);
while let Some(block) = blocks.next().await {
println!("Block (asset-hub-collator-1) #{}", block.unwrap().header().number);
}
// asset-hub-collator-2
let collator = network.get_node("asset-hub-collator-2").unwrap();
let client = collator
.wait_client::<subxt::PolkadotConfig>()
.await
.unwrap();
// asset-hub-collator-2
let collator = network.get_node("asset-hub-collator-2").unwrap();
let client = collator.wait_client::<subxt::PolkadotConfig>().await.unwrap();
// wait 1 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1);
while let Some(block) = blocks.next().await {
println!(
"Block (asset-hub-collator-2) #{}",
block.unwrap().header().number
);
}
// wait 1 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(1);
while let Some(block) = blocks.next().await {
println!("Block (asset-hub-collator-2) #{}", block.unwrap().header().number);
}
}
+16 -19
View File
@@ -4,7 +4,7 @@ use configuration::{NetworkConfig, NetworkConfigBuilder};
use zombienet_sdk::environment::get_spawn_fn;
fn small_network() -> NetworkConfig {
NetworkConfigBuilder::new()
NetworkConfigBuilder::new()
.with_relaychain(|r| {
r.with_chain("rococo-local")
.with_default_command("polkadot")
@@ -31,27 +31,24 @@ fn small_network() -> NetworkConfig {
#[tokio::test(flavor = "multi_thread")]
async fn ci_native_smoke_should_works() {
tracing_subscriber::fmt::init();
const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}";
let now = Instant::now();
let config = small_network();
let spawn_fn = get_spawn_fn();
tracing_subscriber::fmt::init();
const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}";
let now = Instant::now();
let config = small_network();
let spawn_fn = get_spawn_fn();
let network = spawn_fn(config).await.unwrap();
let network = spawn_fn(config).await.unwrap();
let elapsed = now.elapsed();
println!("🚀🚀🚀🚀 network deployed in {elapsed:.2?}");
let elapsed = now.elapsed();
println!("🚀🚀🚀🚀 network deployed in {elapsed:.2?}");
network.wait_until_is_up(20).await.unwrap();
network.wait_until_is_up(20).await.unwrap();
let elapsed = now.elapsed();
println!("✅✅✅✅ network is up in {elapsed:.2?}");
let elapsed = now.elapsed();
println!("✅✅✅✅ network is up in {elapsed:.2?}");
// Get a ref to the node
let alice = network.get_node("alice").unwrap();
// wait 10 blocks
alice
.wait_metric(BEST_BLOCK_METRIC, |x| x > 9_f64)
.await
.unwrap();
// Get a ref to the node
let alice = network.get_node("alice").unwrap();
// wait 10 blocks
alice.wait_metric(BEST_BLOCK_METRIC, |x| x > 9_f64).await.unwrap();
}
+109 -137
View File
@@ -6,174 +6,146 @@ use orchestrator::{AddCollatorOptions, AddNodeOptions};
use zombienet_sdk::environment::{get_attach_fn, get_spawn_fn};
fn small_network() -> NetworkConfig {
NetworkConfigBuilder::new()
.with_relaychain(|r| {
r.with_chain("rococo-local")
.with_default_command("polkadot")
.with_default_image("docker.io/parity/polkadot:v1.20.2")
.with_validator(|node| node.with_name("alice"))
.with_validator(|node| node.with_name("bob"))
})
.with_parachain(|p| {
p.with_id(2000).cumulus_based(true).with_collator(|n| {
n.with_name("collator")
.with_command("polkadot-parachain")
.with_image("docker.io/parity/polkadot-parachain:1.7.0")
})
})
.with_parachain(|p| {
p.with_id(3000).cumulus_based(true).with_collator(|n| {
n.with_name("collator-new")
.with_command("polkadot-parachain")
.with_image("docker.io/parity/polkadot-parachain:v1.20.2")
})
})
.with_global_settings(|g| {
g.with_base_dir(PathBuf::from("/tmp/zombie-1"))
.with_tear_down_on_failure(false)
})
.build()
.unwrap()
NetworkConfigBuilder::new()
.with_relaychain(|r| {
r.with_chain("rococo-local")
.with_default_command("polkadot")
.with_default_image("docker.io/parity/polkadot:v1.20.2")
.with_validator(|node| node.with_name("alice"))
.with_validator(|node| node.with_name("bob"))
})
.with_parachain(|p| {
p.with_id(2000).cumulus_based(true).with_collator(|n| {
n.with_name("collator")
.with_command("polkadot-parachain")
.with_image("docker.io/parity/polkadot-parachain:1.7.0")
})
})
.with_parachain(|p| {
p.with_id(3000).cumulus_based(true).with_collator(|n| {
n.with_name("collator-new")
.with_command("polkadot-parachain")
.with_image("docker.io/parity/polkadot-parachain:v1.20.2")
})
})
.with_global_settings(|g| {
g.with_base_dir(PathBuf::from("/tmp/zombie-1")).with_tear_down_on_failure(false)
})
.build()
.unwrap()
}
#[tokio::test(flavor = "multi_thread")]
async fn ci_k8s_basic_functionalities_should_works() {
let _ = tracing_subscriber::fmt::try_init();
let _ = tracing_subscriber::fmt::try_init();
const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}";
let now = Instant::now();
const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}";
let now = Instant::now();
let config = small_network();
let spawn_fn = get_spawn_fn();
let config = small_network();
let spawn_fn = get_spawn_fn();
let network = spawn_fn(config).await.unwrap();
let network = spawn_fn(config).await.unwrap();
let elapsed = now.elapsed();
println!("🚀🚀🚀🚀 network deployed in {elapsed:.2?}");
let elapsed = now.elapsed();
println!("🚀🚀🚀🚀 network deployed in {elapsed:.2?}");
// detach and attach to running
network.detach().await;
drop(network);
let attach_fn = get_attach_fn();
let zombie_path = PathBuf::from("/tmp/zombie-1/zombie.json");
let mut network = attach_fn(zombie_path).await.unwrap();
// detach and attach to running
network.detach().await;
drop(network);
let attach_fn = get_attach_fn();
let zombie_path = PathBuf::from("/tmp/zombie-1/zombie.json");
let mut network = attach_fn(zombie_path).await.unwrap();
// Get a ref to the node
let alice = network.get_node("alice").unwrap();
// Get a ref to the node
let alice = network.get_node("alice").unwrap();
let (_best_block_pass, client) = try_join!(
alice.wait_metric(BEST_BLOCK_METRIC, |x| x > 5_f64),
alice.wait_client::<subxt::PolkadotConfig>()
)
.unwrap();
let (_best_block_pass, client) = try_join!(
alice.wait_metric(BEST_BLOCK_METRIC, |x| x > 5_f64),
alice.wait_client::<subxt::PolkadotConfig>()
)
.unwrap();
alice
.wait_log_line_count("*rted #1*", true, 10)
.await
.unwrap();
alice.wait_log_line_count("*rted #1*", true, 10).await.unwrap();
// check best block through metrics with timeout
assert!(alice
.wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 10_f64, 45_u32)
.await
.is_ok());
// check best block through metrics with timeout
assert!(alice
.wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 10_f64, 45_u32)
.await
.is_ok());
// ensure timeout error
let best_block = alice.reports(BEST_BLOCK_METRIC).await.unwrap();
let res = alice
.wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > (best_block * 2_f64), 10_u32)
.await;
// ensure timeout error
let best_block = alice.reports(BEST_BLOCK_METRIC).await.unwrap();
let res = alice
.wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > (best_block * 2_f64), 10_u32)
.await;
assert!(res.is_err());
assert!(res.is_err());
// get single metric
let role = alice.reports("node_roles").await.unwrap();
println!("Role is {role}");
assert_eq!(role, 4.0);
// get single metric
let role = alice.reports("node_roles").await.unwrap();
println!("Role is {role}");
assert_eq!(role, 4.0);
// subxt
// wait 3 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3);
while let Some(block) = blocks.next().await {
println!("Block #{}", block.unwrap().header().number);
}
// subxt
// wait 3 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3);
while let Some(block) = blocks.next().await {
println!("Block #{}", block.unwrap().header().number);
}
// drop the client
drop(client);
// drop the client
drop(client);
// check best block through metrics
let best_block = alice
.reports("block_height{status=\"best\"}")
.await
.unwrap();
// check best block through metrics
let best_block = alice.reports("block_height{status=\"best\"}").await.unwrap();
assert!(best_block >= 2.0, "Current best {best_block}");
assert!(best_block >= 2.0, "Current best {best_block}");
// collator
let collator = network.get_node("collator").unwrap();
let client = collator
.wait_client::<subxt::PolkadotConfig>()
.await
.unwrap();
// collator
let collator = network.get_node("collator").unwrap();
let client = collator.wait_client::<subxt::PolkadotConfig>().await.unwrap();
// wait 3 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3);
while let Some(block) = blocks.next().await {
println!("Block (para) #{}", block.unwrap().header().number);
}
// wait 3 blocks
let mut blocks = client.blocks().subscribe_finalized().await.unwrap().take(3);
while let Some(block) = blocks.next().await {
println!("Block (para) #{}", block.unwrap().header().number);
}
// add node
let opts = AddNodeOptions {
rpc_port: Some(9444),
is_validator: true,
..Default::default()
};
// add node
let opts = AddNodeOptions { rpc_port: Some(9444), is_validator: true, ..Default::default() };
network.add_node("new1", opts).await.unwrap();
network.add_node("new1", opts).await.unwrap();
// add collator
let col_opts = AddCollatorOptions {
command: Some("polkadot-parachain".try_into().unwrap()),
image: Some(
"docker.io/parity/polkadot-parachain:1.7.0"
.try_into()
.unwrap(),
),
..Default::default()
};
// add collator
let col_opts = AddCollatorOptions {
command: Some("polkadot-parachain".try_into().unwrap()),
image: Some("docker.io/parity/polkadot-parachain:1.7.0".try_into().unwrap()),
..Default::default()
};
network
.add_collator("new-col-1", col_opts, 2000)
.await
.unwrap();
network.add_collator("new-col-1", col_opts, 2000).await.unwrap();
// pause / resume
let alice = network.get_node("alice").unwrap();
alice.pause().await.unwrap();
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
// pause / resume
let alice = network.get_node("alice").unwrap();
alice.pause().await.unwrap();
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
let res_err = alice
.wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 5_f64, 5_u32)
.await;
let res_err = alice.wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 5_f64, 5_u32).await;
assert!(res_err.is_err());
assert!(res_err.is_err());
alice.resume().await.unwrap();
alice
.wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 5_f64, 5_u32)
.await
.unwrap();
alice.resume().await.unwrap();
alice.wait_metric_with_timeout(BEST_BLOCK_METRIC, |x| x > 5_f64, 5_u32).await.unwrap();
// timeout connecting ws
let collator = network.get_node("collator").unwrap();
collator.pause().await.unwrap();
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
// timeout connecting ws
let collator = network.get_node("collator").unwrap();
collator.pause().await.unwrap();
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
let r = collator
.wait_client_with_timeout::<subxt::PolkadotConfig>(1_u32)
.await;
assert!(r.is_err());
let r = collator.wait_client_with_timeout::<subxt::PolkadotConfig>(1_u32).await;
assert!(r.is_err());
// tear down (optional if you don't detach the network)
network.destroy().await.unwrap();
// tear down (optional if you don't detach the network)
network.destroy().await.unwrap();
}
@@ -2,43 +2,43 @@ use zombienet_sdk::{environment::get_spawn_fn, NetworkConfigBuilder};
#[tokio::test(flavor = "multi_thread")]
async fn two_paras_same_id() {
tracing_subscriber::fmt::init();
let spawn_fn = get_spawn_fn();
let config = NetworkConfigBuilder::new()
.with_relaychain(|r| {
r.with_chain("rococo-local")
.with_default_command("polkadot")
.with_default_image("docker.io/parity/polkadot:v1.7.0")
.with_validator(|node| node.with_name("alice"))
.with_validator(|node| node.with_name("bob"))
})
.with_parachain(|p| {
p.with_id(2000)
.with_default_command("polkadot-parachain")
.with_default_image("docker.io/parity/polkadot-parachain:1.7.0")
.with_collator(|n| n.with_name("collator"))
})
.with_parachain(|p| {
p.with_id(2000)
.with_default_command("polkadot-parachain")
.with_default_image("docker.io/parity/polkadot-parachain:1.7.0")
.with_registration_strategy(zombienet_sdk::RegistrationStrategy::Manual)
.with_collator(|n| n.with_name("collator1"))
})
.build()
.unwrap();
tracing_subscriber::fmt::init();
let spawn_fn = get_spawn_fn();
let config = NetworkConfigBuilder::new()
.with_relaychain(|r| {
r.with_chain("rococo-local")
.with_default_command("polkadot")
.with_default_image("docker.io/parity/polkadot:v1.7.0")
.with_validator(|node| node.with_name("alice"))
.with_validator(|node| node.with_name("bob"))
})
.with_parachain(|p| {
p.with_id(2000)
.with_default_command("polkadot-parachain")
.with_default_image("docker.io/parity/polkadot-parachain:1.7.0")
.with_collator(|n| n.with_name("collator"))
})
.with_parachain(|p| {
p.with_id(2000)
.with_default_command("polkadot-parachain")
.with_default_image("docker.io/parity/polkadot-parachain:1.7.0")
.with_registration_strategy(zombienet_sdk::RegistrationStrategy::Manual)
.with_collator(|n| n.with_name("collator1"))
})
.build()
.unwrap();
let network = spawn_fn(config).await.unwrap();
let network = spawn_fn(config).await.unwrap();
assert!(network.get_node("collator").is_ok());
assert!(network.get_node("collator1").is_ok());
assert!(network.get_node("collator").is_ok());
assert!(network.get_node("collator1").is_ok());
// First parachain (out of two) is fetched
assert_eq!(network.parachain(2000).unwrap().unique_id(), "2000");
// First parachain (out of two) is fetched
assert_eq!(network.parachain(2000).unwrap().unique_id(), "2000");
// First and second parachain hav the same para_id
assert_eq!(
network.parachain_by_unique_id("2000").unwrap().para_id(),
network.parachain_by_unique_id("2000-1").unwrap().para_id(),
);
// First and second parachain hav the same para_id
assert_eq!(
network.parachain_by_unique_id("2000").unwrap().para_id(),
network.parachain_by_unique_id("2000-1").unwrap().para_id(),
);
}
@@ -11,14 +11,14 @@ pub const VALIDATION_CHECK: &str = "validation failed ";
pub const PREFIX_CANT_BE_NONE: &str = "name prefix can't be None if a value exists ";
pub const GRAPH_CONTAINS_NAME: &str =
"graph contains node name; we initialize it with all node names";
"graph contains node name; we initialize it with all node names";
pub const GRAPH_CONTAINS_DEP: &str = "graph contains dep_name; we filter out deps not contained in by_name and populate the graph with all nodes";
pub const INDEGREE_CONTAINS_NAME: &str =
"indegree contains node name; we initialize it with all node names";
"indegree contains node name; we initialize it with all node names";
pub const QUEUE_NOT_EMPTY: &str = "queue is not empty; we're looping over its length";
pub const THIS_IS_A_BUG: &str =
"- this is a bug please report it: https://github.com/paritytech/zombienet-sdk/issues";
"- this is a bug please report it: https://github.com/paritytech/zombienet-sdk/issues";
/// environment variable which can be used to override node spawn timeout
pub const ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS: &str = "ZOMBIE_NODE_SPAWN_TIMEOUT_SECONDS";
+33 -33
View File
@@ -10,51 +10,51 @@ pub mod local;
pub struct FileSystemError(#[from] anyhow::Error);
impl From<std::io::Error> for FileSystemError {
fn from(error: std::io::Error) -> Self {
Self(error.into())
}
fn from(error: std::io::Error) -> Self {
Self(error.into())
}
}
pub type FileSystemResult<T> = Result<T, FileSystemError>;
#[async_trait]
pub trait FileSystem {
async fn create_dir<P>(&self, path: P) -> FileSystemResult<()>
where
P: AsRef<Path> + Send;
async fn create_dir<P>(&self, path: P) -> FileSystemResult<()>
where
P: AsRef<Path> + Send;
async fn create_dir_all<P>(&self, path: P) -> FileSystemResult<()>
where
P: AsRef<Path> + Send;
async fn create_dir_all<P>(&self, path: P) -> FileSystemResult<()>
where
P: AsRef<Path> + Send;
async fn read<P>(&self, path: P) -> FileSystemResult<Vec<u8>>
where
P: AsRef<Path> + Send;
async fn read<P>(&self, path: P) -> FileSystemResult<Vec<u8>>
where
P: AsRef<Path> + Send;
async fn read_to_string<P>(&self, path: P) -> FileSystemResult<String>
where
P: AsRef<Path> + Send;
async fn read_to_string<P>(&self, path: P) -> FileSystemResult<String>
where
P: AsRef<Path> + Send;
async fn write<P, C>(&self, path: P, contents: C) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
C: AsRef<[u8]> + Send;
async fn write<P, C>(&self, path: P, contents: C) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
C: AsRef<[u8]> + Send;
async fn append<P, C>(&self, path: P, contents: C) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
C: AsRef<[u8]> + Send;
async fn append<P, C>(&self, path: P, contents: C) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
C: AsRef<[u8]> + Send;
async fn copy<P1, P2>(&self, from: P1, to: P2) -> FileSystemResult<()>
where
P1: AsRef<Path> + Send,
P2: AsRef<Path> + Send;
async fn copy<P1, P2>(&self, from: P1, to: P2) -> FileSystemResult<()>
where
P1: AsRef<Path> + Send,
P2: AsRef<Path> + Send;
async fn set_mode<P>(&self, path: P, perm: u32) -> FileSystemResult<()>
where
P: AsRef<Path> + Send;
async fn set_mode<P>(&self, path: P, perm: u32) -> FileSystemResult<()>
where
P: AsRef<Path> + Send;
async fn exists<P>(&self, path: P) -> bool
where
P: AsRef<Path> + Send;
async fn exists<P>(&self, path: P) -> bool
where
P: AsRef<Path> + Send;
}
File diff suppressed because it is too large Load Diff
+286 -299
View File
@@ -10,381 +10,368 @@ pub struct LocalFileSystem;
#[async_trait]
impl FileSystem for LocalFileSystem {
async fn create_dir<P>(&self, path: P) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
{
tokio::fs::create_dir(path).await.map_err(Into::into)
}
async fn create_dir<P>(&self, path: P) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
{
tokio::fs::create_dir(path).await.map_err(Into::into)
}
async fn create_dir_all<P>(&self, path: P) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
{
tokio::fs::create_dir_all(path).await.map_err(Into::into)
}
async fn create_dir_all<P>(&self, path: P) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
{
tokio::fs::create_dir_all(path).await.map_err(Into::into)
}
async fn read<P>(&self, path: P) -> FileSystemResult<Vec<u8>>
where
P: AsRef<Path> + Send,
{
tokio::fs::read(path).await.map_err(Into::into)
}
async fn read<P>(&self, path: P) -> FileSystemResult<Vec<u8>>
where
P: AsRef<Path> + Send,
{
tokio::fs::read(path).await.map_err(Into::into)
}
async fn read_to_string<P>(&self, path: P) -> FileSystemResult<String>
where
P: AsRef<Path> + Send,
{
tokio::fs::read_to_string(path).await.map_err(Into::into)
}
async fn read_to_string<P>(&self, path: P) -> FileSystemResult<String>
where
P: AsRef<Path> + Send,
{
tokio::fs::read_to_string(path).await.map_err(Into::into)
}
async fn write<P, C>(&self, path: P, contents: C) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
C: AsRef<[u8]> + Send,
{
tokio::fs::write(path, contents).await.map_err(Into::into)
}
async fn write<P, C>(&self, path: P, contents: C) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
C: AsRef<[u8]> + Send,
{
tokio::fs::write(path, contents).await.map_err(Into::into)
}
async fn append<P, C>(&self, path: P, contents: C) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
C: AsRef<[u8]> + Send,
{
let contents = contents.as_ref();
let mut file = tokio::fs::OpenOptions::new()
.create(true)
.append(true)
.open(path)
.await
.map_err(Into::<FileSystemError>::into)?;
async fn append<P, C>(&self, path: P, contents: C) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
C: AsRef<[u8]> + Send,
{
let contents = contents.as_ref();
let mut file = tokio::fs::OpenOptions::new()
.create(true)
.append(true)
.open(path)
.await
.map_err(Into::<FileSystemError>::into)?;
file.write_all(contents)
.await
.map_err(Into::<FileSystemError>::into)?;
file.write_all(contents).await.map_err(Into::<FileSystemError>::into)?;
file.flush().await.and(Ok(())).map_err(Into::into)
}
file.flush().await.and(Ok(())).map_err(Into::into)
}
async fn copy<P1, P2>(&self, from: P1, to: P2) -> FileSystemResult<()>
where
P1: AsRef<Path> + Send,
P2: AsRef<Path> + Send,
{
tokio::fs::copy(from, to)
.await
.and(Ok(()))
.map_err(Into::into)
}
async fn copy<P1, P2>(&self, from: P1, to: P2) -> FileSystemResult<()>
where
P1: AsRef<Path> + Send,
P2: AsRef<Path> + Send,
{
tokio::fs::copy(from, to).await.and(Ok(())).map_err(Into::into)
}
async fn set_mode<P>(&self, path: P, mode: u32) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
{
tokio::fs::set_permissions(path, Permissions::from_mode(mode))
.await
.map_err(Into::into)
}
async fn set_mode<P>(&self, path: P, mode: u32) -> FileSystemResult<()>
where
P: AsRef<Path> + Send,
{
tokio::fs::set_permissions(path, Permissions::from_mode(mode)).await.map_err(Into::into)
}
async fn exists<P>(&self, path: P) -> bool
where
P: AsRef<Path> + Send,
{
path.as_ref().exists()
}
async fn exists<P>(&self, path: P) -> bool
where
P: AsRef<Path> + Send,
{
path.as_ref().exists()
}
}
#[cfg(test)]
mod tests {
use uuid::Uuid;
use uuid::Uuid;
use super::*;
use super::*;
const FILE_BITS: u32 = 0o100000;
const DIR_BITS: u32 = 0o40000;
const FILE_BITS: u32 = 0o100000;
const DIR_BITS: u32 = 0o40000;
fn setup() -> String {
let test_dir = format!("/tmp/unit_test_{}", Uuid::new_v4());
std::fs::create_dir(&test_dir).unwrap();
test_dir
}
fn setup() -> String {
let test_dir = format!("/tmp/unit_test_{}", Uuid::new_v4());
std::fs::create_dir(&test_dir).unwrap();
test_dir
}
fn teardown(test_dir: String) {
std::fs::remove_dir_all(test_dir).unwrap();
}
fn teardown(test_dir: String) {
std::fs::remove_dir_all(test_dir).unwrap();
}
#[tokio::test]
async fn create_dir_should_create_a_new_directory_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn create_dir_should_create_a_new_directory_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
let new_dir = format!("{test_dir}/mynewdir");
fs.create_dir(&new_dir).await.unwrap();
let new_dir = format!("{test_dir}/mynewdir");
fs.create_dir(&new_dir).await.unwrap();
let new_dir_path = Path::new(&new_dir);
assert!(new_dir_path.exists() && new_dir_path.is_dir());
teardown(test_dir);
}
let new_dir_path = Path::new(&new_dir);
assert!(new_dir_path.exists() && new_dir_path.is_dir());
teardown(test_dir);
}
#[tokio::test]
async fn create_dir_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn create_dir_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
let new_dir = format!("{test_dir}/mynewdir");
// intentionally create new dir before calling function to force error
std::fs::create_dir(&new_dir).unwrap();
let err = fs.create_dir(&new_dir).await.unwrap_err();
let new_dir = format!("{test_dir}/mynewdir");
// intentionally create new dir before calling function to force error
std::fs::create_dir(&new_dir).unwrap();
let err = fs.create_dir(&new_dir).await.unwrap_err();
assert_eq!(err.to_string(), "File exists (os error 17)");
teardown(test_dir);
}
assert_eq!(err.to_string(), "File exists (os error 17)");
teardown(test_dir);
}
#[tokio::test]
async fn create_dir_all_should_create_a_new_directory_and_all_of_it_ancestors_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn create_dir_all_should_create_a_new_directory_and_all_of_it_ancestors_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
let new_dir = format!("{test_dir}/the/path/to/mynewdir");
fs.create_dir_all(&new_dir).await.unwrap();
let new_dir = format!("{test_dir}/the/path/to/mynewdir");
fs.create_dir_all(&new_dir).await.unwrap();
let new_dir_path = Path::new(&new_dir);
assert!(new_dir_path.exists() && new_dir_path.is_dir());
teardown(test_dir);
}
let new_dir_path = Path::new(&new_dir);
assert!(new_dir_path.exists() && new_dir_path.is_dir());
teardown(test_dir);
}
#[tokio::test]
async fn create_dir_all_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn create_dir_all_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
let new_dir = format!("{test_dir}/the/path/to/mynewdir");
// intentionally create new file as ancestor before calling function to force error
std::fs::write(format!("{test_dir}/the"), b"test").unwrap();
let err = fs.create_dir_all(&new_dir).await.unwrap_err();
let new_dir = format!("{test_dir}/the/path/to/mynewdir");
// intentionally create new file as ancestor before calling function to force error
std::fs::write(format!("{test_dir}/the"), b"test").unwrap();
let err = fs.create_dir_all(&new_dir).await.unwrap_err();
assert_eq!(err.to_string(), "Not a directory (os error 20)");
teardown(test_dir);
}
assert_eq!(err.to_string(), "Not a directory (os error 20)");
teardown(test_dir);
}
#[tokio::test]
async fn read_should_return_the_contents_of_the_file_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn read_should_return_the_contents_of_the_file_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
std::fs::write(&file_path, b"Test").unwrap();
let contents = fs.read(file_path).await.unwrap();
let file_path = format!("{test_dir}/myfile");
std::fs::write(&file_path, b"Test").unwrap();
let contents = fs.read(file_path).await.unwrap();
assert_eq!(contents, b"Test");
teardown(test_dir);
}
assert_eq!(contents, b"Test");
teardown(test_dir);
}
#[tokio::test]
async fn read_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn read_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
// intentionally forget to create file to force error
let err = fs.read(file_path).await.unwrap_err();
let file_path = format!("{test_dir}/myfile");
// intentionally forget to create file to force error
let err = fs.read(file_path).await.unwrap_err();
assert_eq!(err.to_string(), "No such file or directory (os error 2)");
teardown(test_dir);
}
assert_eq!(err.to_string(), "No such file or directory (os error 2)");
teardown(test_dir);
}
#[tokio::test]
async fn read_to_string_should_return_the_contents_of_the_file_at_path_as_string() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn read_to_string_should_return_the_contents_of_the_file_at_path_as_string() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
std::fs::write(&file_path, b"Test").unwrap();
let contents = fs.read_to_string(file_path).await.unwrap();
let file_path = format!("{test_dir}/myfile");
std::fs::write(&file_path, b"Test").unwrap();
let contents = fs.read_to_string(file_path).await.unwrap();
assert_eq!(contents, "Test");
teardown(test_dir);
}
assert_eq!(contents, "Test");
teardown(test_dir);
}
#[tokio::test]
async fn read_to_string_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn read_to_string_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
// intentionally forget to create file to force error
let err = fs.read_to_string(file_path).await.unwrap_err();
let file_path = format!("{test_dir}/myfile");
// intentionally forget to create file to force error
let err = fs.read_to_string(file_path).await.unwrap_err();
assert_eq!(err.to_string(), "No such file or directory (os error 2)");
teardown(test_dir);
}
assert_eq!(err.to_string(), "No such file or directory (os error 2)");
teardown(test_dir);
}
#[tokio::test]
async fn write_should_create_a_new_file_at_path_with_contents() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn write_should_create_a_new_file_at_path_with_contents() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
fs.write(&file_path, "Test").await.unwrap();
let file_path = format!("{test_dir}/myfile");
fs.write(&file_path, "Test").await.unwrap();
assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test");
teardown(test_dir);
}
assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test");
teardown(test_dir);
}
#[tokio::test]
async fn write_should_overwrite_an_existing_file_with_contents() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn write_should_overwrite_an_existing_file_with_contents() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
std::fs::write(&file_path, "Test").unwrap();
assert_eq!(std::fs::read_to_string(&file_path).unwrap(), "Test");
fs.write(&file_path, "Test updated").await.unwrap();
let file_path = format!("{test_dir}/myfile");
std::fs::write(&file_path, "Test").unwrap();
assert_eq!(std::fs::read_to_string(&file_path).unwrap(), "Test");
fs.write(&file_path, "Test updated").await.unwrap();
assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test updated");
teardown(test_dir);
}
assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test updated");
teardown(test_dir);
}
#[tokio::test]
async fn write_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn write_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
// intentionally create directory instead of file to force error
std::fs::create_dir(&file_path).unwrap();
let err = fs.write(&file_path, "Test").await.unwrap_err();
let file_path = format!("{test_dir}/myfile");
// intentionally create directory instead of file to force error
std::fs::create_dir(&file_path).unwrap();
let err = fs.write(&file_path, "Test").await.unwrap_err();
assert_eq!(err.to_string(), "Is a directory (os error 21)");
teardown(test_dir);
}
assert_eq!(err.to_string(), "Is a directory (os error 21)");
teardown(test_dir);
}
#[tokio::test]
async fn append_should_create_a_new_file_at_path_with_contents() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn append_should_create_a_new_file_at_path_with_contents() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
fs.append(&file_path, "Test").await.unwrap();
let file_path = format!("{test_dir}/myfile");
fs.append(&file_path, "Test").await.unwrap();
assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test");
teardown(test_dir);
}
assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test");
teardown(test_dir);
}
#[tokio::test]
async fn append_should_updates_an_existing_file_by_appending_contents() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn append_should_updates_an_existing_file_by_appending_contents() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
std::fs::write(&file_path, "Test").unwrap();
assert_eq!(std::fs::read_to_string(&file_path).unwrap(), "Test");
fs.append(&file_path, " updated").await.unwrap();
let file_path = format!("{test_dir}/myfile");
std::fs::write(&file_path, "Test").unwrap();
assert_eq!(std::fs::read_to_string(&file_path).unwrap(), "Test");
fs.append(&file_path, " updated").await.unwrap();
assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test updated");
teardown(test_dir);
}
assert_eq!(std::fs::read_to_string(file_path).unwrap(), "Test updated");
teardown(test_dir);
}
#[tokio::test]
async fn append_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn append_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
let file_path = format!("{test_dir}/myfile");
// intentionally create directory instead of file to force error
std::fs::create_dir(&file_path).unwrap();
let err = fs.append(&file_path, "Test").await.unwrap_err();
let file_path = format!("{test_dir}/myfile");
// intentionally create directory instead of file to force error
std::fs::create_dir(&file_path).unwrap();
let err = fs.append(&file_path, "Test").await.unwrap_err();
assert_eq!(err.to_string(), "Is a directory (os error 21)");
teardown(test_dir);
}
assert_eq!(err.to_string(), "Is a directory (os error 21)");
teardown(test_dir);
}
#[tokio::test]
async fn copy_should_create_a_duplicate_of_source() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn copy_should_create_a_duplicate_of_source() {
let test_dir = setup();
let fs = LocalFileSystem;
let from_path = format!("{test_dir}/myfile");
std::fs::write(&from_path, "Test").unwrap();
let to_path = format!("{test_dir}/mycopy");
fs.copy(&from_path, &to_path).await.unwrap();
let from_path = format!("{test_dir}/myfile");
std::fs::write(&from_path, "Test").unwrap();
let to_path = format!("{test_dir}/mycopy");
fs.copy(&from_path, &to_path).await.unwrap();
assert_eq!(std::fs::read_to_string(to_path).unwrap(), "Test");
teardown(test_dir);
}
assert_eq!(std::fs::read_to_string(to_path).unwrap(), "Test");
teardown(test_dir);
}
#[tokio::test]
async fn copy_should_ovewrite_destination_if_alread_exists() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn copy_should_ovewrite_destination_if_alread_exists() {
let test_dir = setup();
let fs = LocalFileSystem;
let from_path = format!("{test_dir}/myfile");
std::fs::write(&from_path, "Test").unwrap();
let to_path = format!("{test_dir}/mycopy");
std::fs::write(&from_path, "Some content").unwrap();
fs.copy(&from_path, &to_path).await.unwrap();
let from_path = format!("{test_dir}/myfile");
std::fs::write(&from_path, "Test").unwrap();
let to_path = format!("{test_dir}/mycopy");
std::fs::write(&from_path, "Some content").unwrap();
fs.copy(&from_path, &to_path).await.unwrap();
assert_eq!(std::fs::read_to_string(to_path).unwrap(), "Some content");
teardown(test_dir);
}
assert_eq!(std::fs::read_to_string(to_path).unwrap(), "Some content");
teardown(test_dir);
}
#[tokio::test]
async fn copy_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
#[tokio::test]
async fn copy_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
let from_path = format!("{test_dir}/nonexistentfile");
let to_path = format!("{test_dir}/mycopy");
let err = fs.copy(&from_path, &to_path).await.unwrap_err();
let from_path = format!("{test_dir}/nonexistentfile");
let to_path = format!("{test_dir}/mycopy");
let err = fs.copy(&from_path, &to_path).await.unwrap_err();
assert_eq!(err.to_string(), "No such file or directory (os error 2)");
teardown(test_dir);
}
assert_eq!(err.to_string(), "No such file or directory (os error 2)");
teardown(test_dir);
}
#[tokio::test]
async fn set_mode_should_update_the_file_mode_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
let path = format!("{test_dir}/myfile");
std::fs::write(&path, "Test").unwrap();
assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (FILE_BITS + 0o400));
#[tokio::test]
async fn set_mode_should_update_the_file_mode_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
let path = format!("{test_dir}/myfile");
std::fs::write(&path, "Test").unwrap();
assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (FILE_BITS + 0o400));
fs.set_mode(&path, 0o400).await.unwrap();
fs.set_mode(&path, 0o400).await.unwrap();
assert_eq!(
std::fs::metadata(&path).unwrap().permissions().mode(),
FILE_BITS + 0o400
);
teardown(test_dir);
}
assert_eq!(std::fs::metadata(&path).unwrap().permissions().mode(), FILE_BITS + 0o400);
teardown(test_dir);
}
#[tokio::test]
async fn set_mode_should_update_the_directory_mode_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
let path = format!("{test_dir}/mydir");
std::fs::create_dir(&path).unwrap();
assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (DIR_BITS + 0o700));
#[tokio::test]
async fn set_mode_should_update_the_directory_mode_at_path() {
let test_dir = setup();
let fs = LocalFileSystem;
let path = format!("{test_dir}/mydir");
std::fs::create_dir(&path).unwrap();
assert!(std::fs::metadata(&path).unwrap().permissions().mode() != (DIR_BITS + 0o700));
fs.set_mode(&path, 0o700).await.unwrap();
fs.set_mode(&path, 0o700).await.unwrap();
assert_eq!(
std::fs::metadata(&path).unwrap().permissions().mode(),
DIR_BITS + 0o700
);
teardown(test_dir);
}
assert_eq!(std::fs::metadata(&path).unwrap().permissions().mode(), DIR_BITS + 0o700);
teardown(test_dir);
}
#[tokio::test]
async fn set_mode_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
let path = format!("{test_dir}/somemissingfile");
// intentionnally don't create file
#[tokio::test]
async fn set_mode_should_bubble_up_error_if_some_happens() {
let test_dir = setup();
let fs = LocalFileSystem;
let path = format!("{test_dir}/somemissingfile");
// intentionnally don't create file
let err = fs.set_mode(&path, 0o400).await.unwrap_err();
let err = fs.set_mode(&path, 0o400).await.unwrap_err();
assert_eq!(err.to_string(), "No such file or directory (os error 2)");
teardown(test_dir);
}
assert_eq!(err.to_string(), "No such file or directory (os error 2)");
teardown(test_dir);
}
}
+38 -38
View File
@@ -8,53 +8,53 @@ use crate::constants::THIS_IS_A_BUG;
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
pub async fn download_file(url: String, dest: String) -> Result<()> {
let response = reqwest::get(url).await?;
let mut file = std::fs::File::create(dest)?;
let mut content = Cursor::new(response.bytes().await?);
std::io::copy(&mut content, &mut file)?;
Ok(())
let response = reqwest::get(url).await?;
let mut file = std::fs::File::create(dest)?;
let mut content = Cursor::new(response.bytes().await?);
std::io::copy(&mut content, &mut file)?;
Ok(())
}
pub async fn wait_ws_ready(url: &str) -> Result<()> {
let mut parsed = Url::from_str(url)?;
parsed
.set_scheme("http")
.map_err(|_| anyhow::anyhow!("Can not set the scheme, {THIS_IS_A_BUG}"))?;
let mut parsed = Url::from_str(url)?;
parsed
.set_scheme("http")
.map_err(|_| anyhow::anyhow!("Can not set the scheme, {THIS_IS_A_BUG}"))?;
let http_client = reqwest::Client::new();
loop {
let req = Request::new(Method::OPTIONS, parsed.clone());
let res = http_client.execute(req).await;
match res {
Ok(res) => {
if res.status() == StatusCode::OK {
// ready to go!
break;
}
let http_client = reqwest::Client::new();
loop {
let req = Request::new(Method::OPTIONS, parsed.clone());
let res = http_client.execute(req).await;
match res {
Ok(res) => {
if res.status() == StatusCode::OK {
// ready to go!
break;
}
trace!("http_client status: {}, continuing...", res.status());
},
Err(e) => {
if !skip_err_while_waiting(&e) {
return Err(e.into());
}
trace!("http_client status: {}, continuing...", res.status());
},
Err(e) => {
if !skip_err_while_waiting(&e) {
return Err(e.into());
}
trace!("http_client err: {}, continuing... ", e.to_string());
},
}
trace!("http_client err: {}, continuing... ", e.to_string());
},
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
Ok(())
Ok(())
}
pub fn skip_err_while_waiting(e: &reqwest::Error) -> bool {
// if the error is connecting/request could be the case that the node
// is not listening yet, so we keep waiting
// Skipped errs like:
// 'tcp connect error: Connection refused (os error 61)'
// 'operation was canceled: connection closed before message completed'
// 'connection error: Connection reset by peer (os error 54)'
e.is_connect() || e.is_request()
// if the error is connecting/request could be the case that the node
// is not listening yet, so we keep waiting
// Skipped errs like:
// 'tcp connect error: Connection refused (os error 61)'
// 'operation was canceled: connection closed before message completed'
// 'connection error: Connection reset by peer (os error 54)'
e.is_connect() || e.is_request()
}
+138 -140
View File
@@ -7,191 +7,189 @@ use tracing::{trace, warn};
use crate::constants::{SHOULD_COMPILE, THIS_IS_A_BUG};
lazy_static! {
static ref RE: Regex = Regex::new(r#"\{\{([a-zA-Z0-9_]*)\}\}"#)
.unwrap_or_else(|_| panic!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}"));
static ref TOKEN_PLACEHOLDER: Regex = Regex::new(r#"\{\{ZOMBIE:(.*?):(.*?)\}\}"#)
.unwrap_or_else(|_| panic!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}"));
static ref PLACEHOLDER_COMPAT: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
m.insert("multiAddress", "multiaddr");
m.insert("wsUri", "ws_uri");
m.insert("prometheusUri", "prometheus_uri");
static ref RE: Regex = Regex::new(r#"\{\{([a-zA-Z0-9_]*)\}\}"#)
.unwrap_or_else(|_| panic!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}"));
static ref TOKEN_PLACEHOLDER: Regex = Regex::new(r#"\{\{ZOMBIE:(.*?):(.*?)\}\}"#)
.unwrap_or_else(|_| panic!("{SHOULD_COMPILE}, {THIS_IS_A_BUG}"));
static ref PLACEHOLDER_COMPAT: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
m.insert("multiAddress", "multiaddr");
m.insert("wsUri", "ws_uri");
m.insert("prometheusUri", "prometheus_uri");
m
};
m
};
}
/// Return true if the text contains any TOKEN_PLACEHOLDER
pub fn has_tokens(text: &str) -> bool {
TOKEN_PLACEHOLDER.is_match(text)
TOKEN_PLACEHOLDER.is_match(text)
}
pub fn apply_replacements(text: &str, replacements: &HashMap<&str, &str>) -> String {
let augmented_text = RE.replace_all(text, |caps: &Captures| {
if let Some(replacements_value) = replacements.get(&caps[1]) {
replacements_value.to_string()
} else {
caps[0].to_string()
}
});
let augmented_text = RE.replace_all(text, |caps: &Captures| {
if let Some(replacements_value) = replacements.get(&caps[1]) {
replacements_value.to_string()
} else {
caps[0].to_string()
}
});
augmented_text.to_string()
augmented_text.to_string()
}
pub fn apply_env_replacements(text: &str) -> String {
let augmented_text = RE.replace_all(text, |caps: &Captures| {
if let Ok(replacements_value) = std::env::var(&caps[1]) {
replacements_value
} else {
caps[0].to_string()
}
});
let augmented_text = RE.replace_all(text, |caps: &Captures| {
if let Ok(replacements_value) = std::env::var(&caps[1]) {
replacements_value
} else {
caps[0].to_string()
}
});
augmented_text.to_string()
augmented_text.to_string()
}
pub fn apply_running_network_replacements(text: &str, network: &serde_json::Value) -> String {
let augmented_text = TOKEN_PLACEHOLDER.replace_all(text, |caps: &Captures| {
trace!("appling replacements for caps: {caps:#?}");
if let Some(node) = network.get(&caps[1]) {
trace!("caps1 {} - node: {node}", &caps[1]);
let field = *PLACEHOLDER_COMPAT.get(&caps[2]).unwrap_or(&&caps[2]);
if let Some(val) = node.get(field) {
trace!("caps2 {} - node: {node}", field);
val.as_str().unwrap_or("Invalid string").to_string()
} else {
warn!(
"⚠️ The node with name {} doesn't have the value {} in context",
&caps[1], &caps[2]
);
caps[0].to_string()
}
} else {
warn!("⚠️ No node with name {} in context", &caps[1]);
caps[0].to_string()
}
});
let augmented_text = TOKEN_PLACEHOLDER.replace_all(text, |caps: &Captures| {
trace!("appling replacements for caps: {caps:#?}");
if let Some(node) = network.get(&caps[1]) {
trace!("caps1 {} - node: {node}", &caps[1]);
let field = *PLACEHOLDER_COMPAT.get(&caps[2]).unwrap_or(&&caps[2]);
if let Some(val) = node.get(field) {
trace!("caps2 {} - node: {node}", field);
val.as_str().unwrap_or("Invalid string").to_string()
} else {
warn!(
"⚠️ The node with name {} doesn't have the value {} in context",
&caps[1], &caps[2]
);
caps[0].to_string()
}
} else {
warn!("⚠️ No node with name {} in context", &caps[1]);
caps[0].to_string()
}
});
augmented_text.to_string()
augmented_text.to_string()
}
pub fn get_tokens_to_replace(text: &str) -> HashSet<String> {
let mut tokens = HashSet::new();
let mut tokens = HashSet::new();
TOKEN_PLACEHOLDER
.captures_iter(text)
.for_each(|caps: Captures| {
tokens.insert(caps[1].to_string());
});
TOKEN_PLACEHOLDER.captures_iter(text).for_each(|caps: Captures| {
tokens.insert(caps[1].to_string());
});
tokens
tokens
}
#[cfg(test)]
mod tests {
use serde_json::json;
use serde_json::json;
use super::*;
use super::*;
#[test]
fn replace_should_works() {
let text = "some {{namespace}}";
let mut replacements = HashMap::new();
replacements.insert("namespace", "demo-123");
let res = apply_replacements(text, &replacements);
assert_eq!("some demo-123".to_string(), res);
}
#[test]
fn replace_should_works() {
let text = "some {{namespace}}";
let mut replacements = HashMap::new();
replacements.insert("namespace", "demo-123");
let res = apply_replacements(text, &replacements);
assert_eq!("some demo-123".to_string(), res);
}
#[test]
fn replace_env_should_works() {
let text = "some {{namespace}}";
std::env::set_var("namespace", "demo-123");
// let mut replacements = HashMap::new();
// replacements.insert("namespace", "demo-123");
let res = apply_env_replacements(text);
assert_eq!("some demo-123".to_string(), res);
}
#[test]
fn replace_env_should_works() {
let text = "some {{namespace}}";
std::env::set_var("namespace", "demo-123");
// let mut replacements = HashMap::new();
// replacements.insert("namespace", "demo-123");
let res = apply_env_replacements(text);
assert_eq!("some demo-123".to_string(), res);
}
#[test]
fn replace_multiple_should_works() {
let text = r#"some {{namespace}}
#[test]
fn replace_multiple_should_works() {
let text = r#"some {{namespace}}
other is {{other}}"#;
let augmented_text = r#"some demo-123
let augmented_text = r#"some demo-123
other is other-123"#;
let mut replacements = HashMap::new();
replacements.insert("namespace", "demo-123");
replacements.insert("other", "other-123");
let res = apply_replacements(text, &replacements);
assert_eq!(augmented_text, res);
}
let mut replacements = HashMap::new();
replacements.insert("namespace", "demo-123");
replacements.insert("other", "other-123");
let res = apply_replacements(text, &replacements);
assert_eq!(augmented_text, res);
}
#[test]
fn replace_multiple_with_missing_should_works() {
let text = r#"some {{namespace}}
#[test]
fn replace_multiple_with_missing_should_works() {
let text = r#"some {{namespace}}
other is {{other}}"#;
let augmented_text = r#"some demo-123
let augmented_text = r#"some demo-123
other is {{other}}"#;
let mut replacements = HashMap::new();
replacements.insert("namespace", "demo-123");
let mut replacements = HashMap::new();
replacements.insert("namespace", "demo-123");
let res = apply_replacements(text, &replacements);
assert_eq!(augmented_text, res);
}
let res = apply_replacements(text, &replacements);
assert_eq!(augmented_text, res);
}
#[test]
fn replace_without_replacement_should_leave_text_unchanged() {
let text = "some {{namespace}}";
let mut replacements = HashMap::new();
replacements.insert("other", "demo-123");
let res = apply_replacements(text, &replacements);
assert_eq!(text.to_string(), res);
}
#[test]
fn replace_without_replacement_should_leave_text_unchanged() {
let text = "some {{namespace}}";
let mut replacements = HashMap::new();
replacements.insert("other", "demo-123");
let res = apply_replacements(text, &replacements);
assert_eq!(text.to_string(), res);
}
#[test]
fn replace_running_network_should_work() {
let network = json!({
"alice" : {
"multiaddr": "some/demo/127.0.0.1"
}
});
#[test]
fn replace_running_network_should_work() {
let network = json!({
"alice" : {
"multiaddr": "some/demo/127.0.0.1"
}
});
let res = apply_running_network_replacements("{{ZOMBIE:alice:multiaddr}}", &network);
assert_eq!(res.as_str(), "some/demo/127.0.0.1");
}
let res = apply_running_network_replacements("{{ZOMBIE:alice:multiaddr}}", &network);
assert_eq!(res.as_str(), "some/demo/127.0.0.1");
}
#[test]
fn replace_running_network_with_compat_should_work() {
let network = json!({
"alice" : {
"multiaddr": "some/demo/127.0.0.1"
}
});
#[test]
fn replace_running_network_with_compat_should_work() {
let network = json!({
"alice" : {
"multiaddr": "some/demo/127.0.0.1"
}
});
let res = apply_running_network_replacements("{{ZOMBIE:alice:multiAddress}}", &network);
assert_eq!(res.as_str(), "some/demo/127.0.0.1");
}
let res = apply_running_network_replacements("{{ZOMBIE:alice:multiAddress}}", &network);
assert_eq!(res.as_str(), "some/demo/127.0.0.1");
}
#[test]
fn replace_running_network_with_missing_field_should_not_replace_nothing() {
let network = json!({
"alice" : {
"multiaddr": "some/demo/127.0.0.1"
}
});
#[test]
fn replace_running_network_with_missing_field_should_not_replace_nothing() {
let network = json!({
"alice" : {
"multiaddr": "some/demo/127.0.0.1"
}
});
let res = apply_running_network_replacements("{{ZOMBIE:alice:someField}}", &network);
assert_eq!(res.as_str(), "{{ZOMBIE:alice:someField}}");
}
let res = apply_running_network_replacements("{{ZOMBIE:alice:someField}}", &network);
assert_eq!(res.as_str(), "{{ZOMBIE:alice:someField}}");
}
#[test]
fn get_tokens_to_replace_should_work() {
let res = get_tokens_to_replace("{{ZOMBIE:alice:multiaddr}} {{ZOMBIE:bob:multiaddr}}");
let mut expected = HashSet::new();
expected.insert("alice".to_string());
expected.insert("bob".to_string());
#[test]
fn get_tokens_to_replace_should_work() {
let res = get_tokens_to_replace("{{ZOMBIE:alice:multiaddr}} {{ZOMBIE:bob:multiaddr}}");
let mut expected = HashSet::new();
expected.insert("alice".to_string());
expected.insert("bob".to_string());
assert_eq!(res, expected);
}
assert_eq!(res, expected);
}
}
+14 -21
View File
@@ -1,26 +1,19 @@
# https://rust-lang.github.io/rustfmt/?version=v1.7.0
# Pezkuwi ZombieNet SDK - Stable Rustfmt Configuration
# Only stable features for compatibility with CI
# general
indent_style = "Block"
# Basic (stable)
hard_tabs = true
max_width = 100
use_small_heuristics = "Max"
# rewriting
condense_wildcard_suffixes = true
# Imports (stable)
reorder_imports = true
reorder_modules = true
# Consistency (stable)
newline_style = "Unix"
# Misc (stable)
match_block_trailing_comma = true
use_field_init_shorthand = true
use_try_shorthand = true
# normalization
normalize_comments = true
normalize_doc_attributes = true
# reordering
reorder_impl_items = true
reorder_imports = true
reorder_modules = true
imports_granularity = "Crate"
group_imports = "StdExternalCrate"
# additional formating
format_code_in_doc_comments = true
format_macro_matchers = true
format_macro_bodies = true