diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index c75f96ee2e..dcffa09aaf 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -287,14 +287,13 @@ where let task_manager = service::build_full( config, service::NewFullParams { - is_collator: service::IsCollator::No, + is_parachain_node: service::IsParachainNode::No, grandpa_pause, jaeger_agent, telemetry_worker_handle: None, node_version, workers_path: cli.run.workers_path, workers_names: None, - overseer_enable_anyways: false, overseer_gen, overseer_message_channel_capacity_override: cli .run diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 7e29e64c40..b29e47b4c4 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -2253,7 +2253,7 @@ where // // 1. This is not a local approval, as we don't store anything new in the approval entry. // 2. The candidate is not newly approved, as we haven't altered the approval entry's - // approved flag with `mark_approved` above. + // approved flag with `mark_approved` above. // 3. The approver, if any, had already approved the candidate, as we haven't altered the // bitfield. if transition.is_local_approval() || newly_approved || !already_approved_by.unwrap_or(true) diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 6f632a0ae9..7a14de18a8 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -55,8 +55,8 @@ use tokio::{io, net::UnixStream}; // // There are quirks to that configuration knob: // -// 1. It only limits the amount of stack space consumed by wasm but does not ensure nor check -// that the stack space is actually available. +// 1. It only limits the amount of stack space consumed by wasm but does not ensure nor check that +// the stack space is actually available. // // That means, if the calling thread has 1 MiB of stack space left and the wasm code consumes // more, then the wasmtime limit will **not** trigger. Instead, the wasm code will hit the diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 803a562514..b94ebb2822 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -1319,13 +1319,13 @@ impl State { } // Here we're leaning on a few behaviors of assignment propagation: - // 1. At this point, the only peer we're aware of which has the approval - // message is the source peer. - // 2. We have sent the assignment message to every peer in the required routing - // which is aware of this block _unless_ the peer we originally received the - // assignment from was part of the required routing. In that case, we've sent - // the assignment to all aware peers in the required routing _except_ the original - // source of the assignment. Hence the `in_topology_check`. + // 1. At this point, the only peer we're aware of which has the approval message is + // the source peer. + // 2. We have sent the assignment message to every peer in the required routing which + // is aware of this block _unless_ the peer we originally received the assignment + // from was part of the required routing. In that case, we've sent the assignment + // to all aware peers in the required routing _except_ the original source of the + // assignment. Hence the `in_topology_check`. // 3. Any randomly selected peers have been sent the assignment already. let in_topology = topology .map_or(false, |t| t.local_grid_neighbors().route_to_peer(required_routing, peer)); diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 979f0ada4e..422157a1ed 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -463,8 +463,8 @@ fn delay_reputation_change() { /// /// /// 1. Send a view update that removes block B from their view. -/// 2. Send a message from B that they incur `COST_UNEXPECTED_MESSAGE` for, -/// but then they receive `BENEFIT_VALID_MESSAGE`. +/// 2. Send a message from B that they incur `COST_UNEXPECTED_MESSAGE` for, but then they receive +/// `BENEFIT_VALID_MESSAGE`. /// 3. Send all other messages related to B. #[test] fn spam_attack_results_in_negative_reputation_change() { diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs index 8e710a26ad..68d882be6f 100644 --- a/polkadot/node/network/collator-protocol/src/lib.rs +++ b/polkadot/node/network/collator-protocol/src/lib.rs @@ -37,7 +37,7 @@ use polkadot_node_network_protocol::{ }; use polkadot_primitives::CollatorPair; -use polkadot_node_subsystem::{errors::SubsystemError, overseer, SpawnedSubsystem}; +use polkadot_node_subsystem::{errors::SubsystemError, overseer, DummySubsystem, SpawnedSubsystem}; mod error; @@ -82,6 +82,8 @@ pub enum ProtocolSide { IncomingRequestReceiver, collator_side::Metrics, ), + /// No protocol side, just disable it. + None, } /// The collator protocol subsystem. @@ -98,24 +100,22 @@ impl CollatorProtocolSubsystem { pub fn new(protocol_side: ProtocolSide) -> Self { Self { protocol_side } } - - async fn run(self, ctx: Context) -> std::result::Result<(), error::FatalError> { - match self.protocol_side { - ProtocolSide::Validator { keystore, eviction_policy, metrics } => - validator_side::run(ctx, keystore, eviction_policy, metrics).await, - ProtocolSide::Collator(local_peer_id, collator_pair, req_receiver, metrics) => - collator_side::run(ctx, local_peer_id, collator_pair, req_receiver, metrics).await, - } - } } #[overseer::subsystem(CollatorProtocol, error=SubsystemError, prefix=self::overseer)] impl CollatorProtocolSubsystem { fn start(self, ctx: Context) -> SpawnedSubsystem { - let future = self - .run(ctx) - .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) - .boxed(); + let future = match self.protocol_side { + ProtocolSide::Validator { keystore, eviction_policy, metrics } => + validator_side::run(ctx, keystore, eviction_policy, metrics) + .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) + .boxed(), + ProtocolSide::Collator(local_peer_id, collator_pair, req_receiver, metrics) => + collator_side::run(ctx, local_peer_id, collator_pair, req_receiver, metrics) + .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) + .boxed(), + ProtocolSide::None => return DummySubsystem.start(ctx), + }; SpawnedSubsystem { name: "collator-protocol-subsystem", future } } diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index 3c178ad9df..b92aa4e9fe 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -183,8 +183,7 @@ where } /// 1. Determine if the current session index has changed. - /// 2. If it has, determine relevant validators - /// and issue a connection request. + /// 2. If it has, determine relevant validators and issue a connection request. async fn handle_active_leaves( &mut self, sender: &mut impl overseer::GossipSupportSenderTrait, diff --git a/polkadot/node/network/statement-distribution/src/responder.rs b/polkadot/node/network/statement-distribution/src/responder.rs index 4dad10eb5e..6897643603 100644 --- a/polkadot/node/network/statement-distribution/src/responder.rs +++ b/polkadot/node/network/statement-distribution/src/responder.rs @@ -62,8 +62,8 @@ pub async fn respond( // // 1. We want some requesters to have full data fast, rather then lots of them having them // late, as each requester having the data will help distributing it. - // 2. If we take too long, the requests timing out will not yet have had any data sent, - // thus we wasted no bandwidth. + // 2. If we take too long, the requests timing out will not yet have had any data sent, thus + // we wasted no bandwidth. // 3. If the queue is full, requestes will get an immediate error instead of running in a // timeout, thus requesters can immediately try another peer and be faster. // diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index d42c737330..dab69473c6 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -627,7 +627,7 @@ where #[cfg(feature = "full-node")] pub struct NewFullParams { - pub is_collator: IsCollator, + pub is_parachain_node: IsParachainNode, pub grandpa_pause: Option<(u32, u32)>, pub jaeger_agent: Option, pub telemetry_worker_handle: Option, @@ -638,7 +638,6 @@ pub struct NewFullParams { pub workers_path: Option, /// Optional custom names for the prepare and execute workers. pub workers_names: Option<(String, String)>, - pub overseer_enable_anyways: bool, pub overseer_gen: OverseerGenerator, pub overseer_message_channel_capacity_override: Option, #[allow(dead_code)] @@ -657,32 +656,46 @@ pub struct NewFull { pub backend: Arc, } -/// Is this node a collator? +/// Is this node running as in-process node for a parachain node? #[cfg(feature = "full-node")] #[derive(Clone)] -pub enum IsCollator { - /// This node is a collator. - Yes(CollatorPair), - /// This node is not a collator. +pub enum IsParachainNode { + /// This node is running as in-process node for a parachain collator. + Collator(CollatorPair), + /// This node is running as in-process node for a parachain full node. + FullNode, + /// This node is not running as in-process node for a parachain node, aka a normal relay chain + /// node. No, } #[cfg(feature = "full-node")] -impl std::fmt::Debug for IsCollator { +impl std::fmt::Debug for IsParachainNode { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { use sp_core::Pair; match self { - IsCollator::Yes(pair) => write!(fmt, "Yes({})", pair.public()), - IsCollator::No => write!(fmt, "No"), + IsParachainNode::Collator(pair) => write!(fmt, "Collator({})", pair.public()), + IsParachainNode::FullNode => write!(fmt, "FullNode"), + IsParachainNode::No => write!(fmt, "No"), } } } #[cfg(feature = "full-node")] -impl IsCollator { - /// Is this a collator? +impl IsParachainNode { + /// Is this running alongside a collator? fn is_collator(&self) -> bool { - matches!(self, Self::Yes(_)) + matches!(self, Self::Collator(_)) + } + + /// Is this running alongside a full node? + fn is_full_node(&self) -> bool { + matches!(self, Self::FullNode) + } + + /// Is this node running alongside a relay chain node? + fn is_running_alongside_parachain_node(&self) -> bool { + self.is_collator() || self.is_full_node() } } @@ -696,11 +709,6 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig { /// This is an advanced feature and not recommended for general use. Generally, `build_full` is /// a better choice. /// -/// `overseer_enable_anyways` always enables the overseer, based on the provided -/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or -/// disputes-aware) is still determined based on the role of the node. Likewise for authority -/// discovery. -/// /// `workers_path` is used to get the path to the directory where auxiliary worker binaries reside. /// If not specified, the main binary's directory is searched first, then `/usr/lib/polkadot` is /// searched. If the path points to an executable rather then directory, that executable is used @@ -709,14 +717,13 @@ pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig { pub fn new_full( mut config: Configuration, NewFullParams { - is_collator, + is_parachain_node, grandpa_pause, jaeger_agent, telemetry_worker_handle, node_version, workers_path, workers_names, - overseer_enable_anyways, overseer_gen, overseer_message_channel_capacity_override, malus_finality_delay: _malus_finality_delay, @@ -768,8 +775,9 @@ pub fn new_full( let chain_spec = config.chain_spec.cloned_box(); let keystore = basics.keystore_container.local_keystore(); - let auth_or_collator = role.is_authority() || is_collator.is_collator(); - let pvf_checker_enabled = role.is_authority() && !is_collator.is_collator(); + let auth_or_collator = role.is_authority() || is_parachain_node.is_collator(); + // We only need to enable the pvf checker when this is a validator. + let pvf_checker_enabled = role.is_authority(); let select_chain = if auth_or_collator { let metrics = @@ -832,7 +840,12 @@ pub fn new_full( let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - if auth_or_collator || overseer_enable_anyways { + // If this is a validator or running alongside a parachain node, we need to enable the + // networking protocols. + // + // Collators and parachain full nodes require the collator and validator networking to send + // collations and to be able to recover PoVs. + if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { use polkadot_network_bridge::{peer_sets_info, IsAuthority}; let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; for config in peer_sets_info(is_authority, &peerset_protocol_names) { @@ -910,7 +923,7 @@ pub fn new_full( slot_duration_millis: slot_duration.as_millis() as u64, }; - let candidate_validation_config = if role.is_authority() && !is_collator.is_collator() { + let candidate_validation_config = if role.is_authority() { let (prep_worker_path, exec_worker_path) = workers::determine_workers_paths(workers_path, workers_names, node_version.clone())?; log::info!("🚀 Using prepare-worker binary at: {:?}", prep_worker_path); @@ -979,46 +992,50 @@ pub fn new_full( let overseer_client = client.clone(); let spawner = task_manager.spawn_handle(); - let authority_discovery_service = if auth_or_collator || overseer_enable_anyways { - use futures::StreamExt; - use sc_network::{Event, NetworkEventStream}; + let authority_discovery_service = + // We need the authority discovery if this node is either a validator or running alongside a parachain node. + // Parachains node require the authority discovery for finding relay chain validators for sending + // their PoVs or recovering PoVs. + if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { + use futures::StreamExt; + use sc_network::{Event, NetworkEventStream}; - let authority_discovery_role = if role.is_authority() { - sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()) + let authority_discovery_role = if role.is_authority() { + sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()) + } else { + // don't publish our addresses when we're not an authority (collator, cumulus, ..) + sc_authority_discovery::Role::Discover + }; + let dht_event_stream = + network.event_stream("authority-discovery").filter_map(|e| async move { + match e { + Event::Dht(e) => Some(e), + _ => None, + } + }); + let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( + sc_authority_discovery::WorkerConfig { + publish_non_global_ips: auth_disc_publish_non_global_ips, + // Require that authority discovery records are signed. + strict_record_validation: true, + ..Default::default() + }, + client.clone(), + network.clone(), + Box::pin(dht_event_stream), + authority_discovery_role, + prometheus_registry.clone(), + ); + + task_manager.spawn_handle().spawn( + "authority-discovery-worker", + Some("authority-discovery"), + Box::pin(worker.run()), + ); + Some(service) } else { - // don't publish our addresses when we're not an authority (collator, cumulus, ..) - sc_authority_discovery::Role::Discover + None }; - let dht_event_stream = - network.event_stream("authority-discovery").filter_map(|e| async move { - match e { - Event::Dht(e) => Some(e), - _ => None, - } - }); - let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( - sc_authority_discovery::WorkerConfig { - publish_non_global_ips: auth_disc_publish_non_global_ips, - // Require that authority discovery records are signed. - strict_record_validation: true, - ..Default::default() - }, - client.clone(), - network.clone(), - Box::pin(dht_event_stream), - authority_discovery_role, - prometheus_registry.clone(), - ); - - task_manager.spawn_handle().spawn( - "authority-discovery-worker", - Some("authority-discovery"), - Box::pin(worker.run()), - ); - Some(service) - } else { - None - }; let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service { let (overseer, overseer_handle) = overseer_gen @@ -1039,7 +1056,7 @@ pub fn new_full( dispute_req_receiver, registry: prometheus_registry.as_ref(), spawner, - is_collator, + is_parachain_node, approval_voting_config, availability_config: AVAILABILITY_CONFIG, candidate_validation_config, @@ -1332,11 +1349,6 @@ pub fn new_chain_ops( /// /// The actual "flavor", aka if it will use `Polkadot`, `Rococo` or `Kusama` is determined based on /// [`IdentifyVariant`] using the chain spec. -/// -/// `overseer_enable_anyways` always enables the overseer, based on the provided -/// `OverseerGenerator`, regardless of the role the node has. The relay chain selection (longest or -/// disputes-aware) is still determined based on the role of the node. Likewise for authority -/// discovery. #[cfg(feature = "full-node")] pub fn build_full( config: Configuration, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 29122ddca1..b315d2847c 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsCollator, Registry}; +use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsParachainNode, Registry}; use polkadot_node_subsystem_types::DefaultSubsystemClient; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_core::traits::SpawnNamed; @@ -108,7 +108,7 @@ where /// Task spawner to be used throughout the overseer and the APIs it provides. pub spawner: Spawner, /// Determines the behavior of the collator. - pub is_collator: IsCollator, + pub is_parachain_node: IsParachainNode, /// Configuration for the approval voting subsystem. pub approval_voting_config: ApprovalVotingConfig, /// Configuration for the availability store subsystem. @@ -149,7 +149,7 @@ pub fn prepared_overseer_builder( dispute_req_receiver, registry, spawner, - is_collator, + is_parachain_node, approval_voting_config, availability_config, candidate_validation_config, @@ -266,14 +266,15 @@ where .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) .collator_protocol({ - let side = match is_collator { - IsCollator::Yes(collator_pair) => ProtocolSide::Collator( + let side = match is_parachain_node { + IsParachainNode::Collator(collator_pair) => ProtocolSide::Collator( network_service.local_peer_id(), collator_pair, collation_req_receiver, Metrics::register(registry)?, ), - IsCollator::No => ProtocolSide::Validator { + IsParachainNode::FullNode => ProtocolSide::None, + IsParachainNode::No => ProtocolSide::Validator { keystore: keystore.clone(), eviction_policy: Default::default(), metrics: Metrics::register(registry)?, diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index ed25d28d29..932e95a7ca 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -28,7 +28,7 @@ use polkadot_overseer::Handle; use polkadot_primitives::{Balance, CollatorPair, HeadData, Id as ParaId, ValidationCode}; use polkadot_runtime_common::BlockHashCount; use polkadot_runtime_parachains::paras::{ParaGenesisArgs, ParaKind}; -use polkadot_service::{Error, FullClient, IsCollator, NewFull, PrometheusConfig}; +use polkadot_service::{Error, FullClient, IsParachainNode, NewFull, PrometheusConfig}; use polkadot_test_runtime::{ ParasCall, ParasSudoWrapperCall, Runtime, SignedExtra, SignedPayload, SudoCall, UncheckedExtrinsic, VERSION, @@ -71,7 +71,7 @@ pub use polkadot_service::{FullBackend, GetLastTimestamp}; #[sc_tracing::logging::prefix_logs_with(config.network.node_name.as_str())] pub fn new_full( config: Configuration, - is_collator: IsCollator, + is_parachain_node: IsParachainNode, workers_path: Option, ) -> Result { let workers_path = Some(workers_path.unwrap_or_else(get_relative_workers_path_for_test)); @@ -79,14 +79,13 @@ pub fn new_full( polkadot_service::new_full( config, polkadot_service::NewFullParams { - is_collator, + is_parachain_node, grandpa_pause: None, jaeger_agent: None, telemetry_worker_handle: None, node_version: None, workers_path, workers_names: None, - overseer_enable_anyways: false, overseer_gen: polkadot_service::RealOverseerGen, overseer_message_channel_capacity_override: None, malus_finality_delay: None, @@ -207,7 +206,7 @@ pub fn run_validator_node( ) -> PolkadotTestNode { let multiaddr = config.network.listen_addresses[0].clone(); let NewFull { task_manager, client, network, rpc_handlers, overseer_handle, .. } = - new_full(config, IsCollator::No, worker_program_path) + new_full(config, IsParachainNode::No, worker_program_path) .expect("could not create Polkadot test service"); let overseer_handle = overseer_handle.expect("test node must have an overseer handle"); @@ -239,7 +238,7 @@ pub fn run_collator_node( let config = node_config(storage_update_func, tokio_handle, key, boot_nodes, false); let multiaddr = config.network.listen_addresses[0].clone(); let NewFull { task_manager, client, network, rpc_handlers, overseer_handle, .. } = - new_full(config, IsCollator::Yes(collator_pair), None) + new_full(config, IsParachainNode::Collator(collator_pair), None) .expect("could not create Polkadot test service"); let overseer_handle = overseer_handle.expect("test node must have an overseer handle"); diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index 8d8a137671..f9efa9c68a 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -21,6 +21,7 @@ use polkadot_node_primitives::CollationGenerationConfig; use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; +use sc_service::Role; use sp_core::hexdisplay::HexDisplay; use test_parachain_adder_collator::Collator; @@ -57,10 +58,15 @@ fn main() -> Result<()> { let collator = Collator::new(); config.disable_beefy = true; + // Zombienet is spawning all collators currently with the same CLI, this means it + // sets `--validator` and this is wrong here. + config.role = Role::Full; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { - is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), + is_parachain_node: polkadot_service::IsParachainNode::Collator( + collator.collator_key(), + ), grandpa_pause: None, jaeger_agent: None, telemetry_worker_handle: None, @@ -70,7 +76,6 @@ fn main() -> Result<()> { workers_path: None, workers_names: None, - overseer_enable_anyways: false, overseer_gen: polkadot_service::RealOverseerGen, overseer_message_channel_capacity_override: None, malus_finality_delay: None, diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index da8205ba18..8eadc233ae 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -21,6 +21,7 @@ use polkadot_node_primitives::CollationGenerationConfig; use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; +use sc_service::Role; use sp_core::hexdisplay::HexDisplay; use test_parachain_undying_collator::Collator; @@ -57,10 +58,15 @@ fn main() -> Result<()> { let collator = Collator::new(cli.run.pov_size, cli.run.pvf_complexity); config.disable_beefy = true; + // Zombienet is spawning all collators currently with the same CLI, this means it + // sets `--validator` and this is wrong here. + config.role = Role::Full; let full_node = polkadot_service::build_full( config, polkadot_service::NewFullParams { - is_collator: polkadot_service::IsCollator::Yes(collator.collator_key()), + is_parachain_node: polkadot_service::IsParachainNode::Collator( + collator.collator_key(), + ), grandpa_pause: None, jaeger_agent: None, telemetry_worker_handle: None, @@ -70,7 +76,6 @@ fn main() -> Result<()> { workers_path: None, workers_names: None, - overseer_enable_anyways: false, overseer_gen: polkadot_service::RealOverseerGen, overseer_message_channel_capacity_override: None, malus_finality_delay: None, diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index d4ad8619f1..0631b280aa 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -1244,28 +1244,27 @@ impl Pallet { ) -> DispatchResult { let mut pending_configs = >::get(); - // 1. pending_configs = [] - // No pending configuration changes. + // 1. pending_configs = [] No pending configuration changes. // // That means we should use the active config as the base configuration. We will insert // the new pending configuration as (cur+2, new_config) into the list. // - // 2. pending_configs = [(cur+2, X)] - // There is a configuration that is pending for the scheduled session. + // 2. pending_configs = [(cur+2, X)] There is a configuration that is pending for the + // scheduled session. // // We will use X as the base configuration. We can update the pending configuration X // directly. // - // 3. pending_configs = [(cur+1, X)] - // There is a pending configuration scheduled and it will be applied in the next session. + // 3. pending_configs = [(cur+1, X)] There is a pending configuration scheduled and it will + // be applied in the next session. // // We will use X as the base configuration. We need to schedule a new configuration // change for the `scheduled_session` and use X as the base for the new configuration. // - // 4. pending_configs = [(cur+1, X), (cur+2, Y)] - // There is a pending configuration change in the next session and for the scheduled - // session. Due to case №3, we can be sure that Y is based on top of X. This means we - // can use Y as the base configuration and update Y directly. + // 4. pending_configs = [(cur+1, X), (cur+2, Y)] There is a pending configuration change in + // the next session and for the scheduled session. Due to case №3, we can be sure that Y + // is based on top of X. This means we can use Y as the base configuration and update Y + // directly. // // There cannot be (cur, X) because those are applied in the session change handler for the // current session. diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index a40a3422a6..da0b972bc9 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -977,8 +977,8 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { /// 2. If exceeded: /// 1. Check validity of all dispute statements sequentially /// 2. If not exceeded: -/// 1. If weight is exceeded by locals, pick the older ones (lower indices) -/// until the weight limit is reached. +/// 1. If weight is exceeded by locals, pick the older ones (lower indices) until the weight limit +/// is reached. /// /// Returns the consumed weight amount, that is guaranteed to be less than the provided /// `max_consumable_weight`. diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs index e22ef82585..e066ad825a 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs @@ -23,8 +23,7 @@ //! will contain methods from `vstaging`. //! The promotion consists of the following steps: //! 1. Bump the version of the stable module (e.g. `v2` becomes `v3`) -//! 2. Move methods from `vstaging` to `v3`. The new stable version should include -//! all methods from `vstaging` tagged with the new version number (e.g. all -//! `v3` methods). +//! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from +//! `vstaging` tagged with the new version number (e.g. all `v3` methods). pub mod v5; pub mod vstaging;