mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 02:57:57 +00:00
Parachains-Aura: Only produce once per slot (#3308)
Given how the block production is driven for Parachains right now, with the enabling of async backing we would produce two blocks per slot. Until we have a proper collator implementation, the "hack" is to prevent the production of multiple blocks per slot. Closes: https://github.com/paritytech/polkadot-sdk/issues/3282
This commit is contained in:
@@ -258,6 +258,7 @@ where
|
||||
pub struct SlotClaim<Pub> {
|
||||
author_pub: Pub,
|
||||
pre_digest: DigestItem,
|
||||
slot: Slot,
|
||||
timestamp: Timestamp,
|
||||
}
|
||||
|
||||
@@ -272,7 +273,7 @@ impl<Pub> SlotClaim<Pub> {
|
||||
P::Public: Codec,
|
||||
P::Signature: Codec,
|
||||
{
|
||||
SlotClaim { author_pub, timestamp, pre_digest: aura_internal::pre_digest::<P>(slot) }
|
||||
SlotClaim { author_pub, timestamp, pre_digest: aura_internal::pre_digest::<P>(slot), slot }
|
||||
}
|
||||
|
||||
/// Get the author's public key.
|
||||
@@ -285,6 +286,11 @@ impl<Pub> SlotClaim<Pub> {
|
||||
&self.pre_digest
|
||||
}
|
||||
|
||||
/// Get the slot assigned to this claim.
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.slot
|
||||
}
|
||||
|
||||
/// Get the timestamp corresponding to the relay-chain slot this claim was
|
||||
/// generated against.
|
||||
pub fn timestamp(&self) -> Timestamp {
|
||||
|
||||
@@ -141,6 +141,8 @@ where
|
||||
collator_util::Collator::<Block, P, _, _, _, _, _>::new(params)
|
||||
};
|
||||
|
||||
let mut last_processed_slot = 0;
|
||||
|
||||
while let Some(request) = collation_requests.next().await {
|
||||
macro_rules! reject_with_error {
|
||||
($err:expr) => {{
|
||||
@@ -192,6 +194,18 @@ where
|
||||
Err(e) => reject_with_error!(e),
|
||||
};
|
||||
|
||||
// With async backing this function will be called every relay chain block.
|
||||
//
|
||||
// Most parachains currently run with 12 seconds slots and thus, they would try to
|
||||
// produce multiple blocks per slot which very likely would fail on chain. Thus, we have
|
||||
// this "hack" to only produce on block per slot.
|
||||
//
|
||||
// With https://github.com/paritytech/polkadot-sdk/issues/3168 this implementation will be
|
||||
// obsolete and also the underlying issue will be fixed.
|
||||
if last_processed_slot >= *claim.slot() {
|
||||
continue
|
||||
}
|
||||
|
||||
let (parachain_inherent_data, other_inherent_data) = try_request!(
|
||||
collator
|
||||
.create_inherent_data(
|
||||
@@ -228,6 +242,8 @@ where
|
||||
request.complete(None);
|
||||
tracing::debug!(target: crate::LOG_TARGET, "No block proposal");
|
||||
}
|
||||
|
||||
last_processed_slot = *claim.slot();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,14 @@ use sp_core::crypto::Pair;
|
||||
use sp_inherents::CreateInherentDataProviders;
|
||||
use sp_keystore::KeystorePtr;
|
||||
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member, NumberFor};
|
||||
use std::{convert::TryFrom, marker::PhantomData, sync::Arc};
|
||||
use std::{
|
||||
convert::TryFrom,
|
||||
marker::PhantomData,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
mod import_queue;
|
||||
|
||||
@@ -61,6 +68,7 @@ pub struct AuraConsensus<B, CIDP, W> {
|
||||
create_inherent_data_providers: Arc<CIDP>,
|
||||
aura_worker: Arc<Mutex<W>>,
|
||||
slot_duration: SlotDuration,
|
||||
last_slot_processed: Arc<AtomicU64>,
|
||||
_phantom: PhantomData<B>,
|
||||
}
|
||||
|
||||
@@ -70,6 +78,7 @@ impl<B, CIDP, W> Clone for AuraConsensus<B, CIDP, W> {
|
||||
create_inherent_data_providers: self.create_inherent_data_providers.clone(),
|
||||
aura_worker: self.aura_worker.clone(),
|
||||
slot_duration: self.slot_duration,
|
||||
last_slot_processed: self.last_slot_processed.clone(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -156,6 +165,7 @@ where
|
||||
Box::new(AuraConsensus {
|
||||
create_inherent_data_providers: Arc::new(create_inherent_data_providers),
|
||||
aura_worker: Arc::new(Mutex::new(worker)),
|
||||
last_slot_processed: Default::default(),
|
||||
slot_duration,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
@@ -221,6 +231,18 @@ where
|
||||
Some((validation_data.max_pov_size / 2) as usize),
|
||||
);
|
||||
|
||||
// With async backing this function will be called every relay chain block.
|
||||
//
|
||||
// Most parachains currently run with 12 seconds slots and thus, they would try to produce
|
||||
// multiple blocks per slot which very likely would fail on chain. Thus, we have this "hack"
|
||||
// to only produce on block per slot.
|
||||
//
|
||||
// With https://github.com/paritytech/polkadot-sdk/issues/3168 this implementation will be
|
||||
// obsolete and also the underlying issue will be fixed.
|
||||
if self.last_slot_processed.fetch_max(*info.slot, Ordering::Relaxed) >= *info.slot {
|
||||
return None
|
||||
}
|
||||
|
||||
let res = self.aura_worker.lock().await.on_slot(info).await?;
|
||||
|
||||
Some(ParachainCandidate { block: res.block, proof: res.storage_proof })
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
title: Parachains-Aura: Only produce once per slot
|
||||
|
||||
doc:
|
||||
- audience: Node Dev
|
||||
description: |
|
||||
With the introduction of asynchronous backing the relay chain allows parachain to include blocks every 6 seconds.
|
||||
The Cumulus Aura implementations, besides the lookahead collator, are building blocks when there is a free slot for
|
||||
the parachain in the relay chain. Most parachains are still running with a 12s slot duration and not allowing
|
||||
to build multiple blocks per slot. But, the block production logic will be triggered every 6s, resulting in error
|
||||
logs like: "no space left for the block in the unincluded segment". This is solved by ensuring that we don't build
|
||||
multiple blocks per slot.
|
||||
|
||||
crates:
|
||||
- name: "cumulus-client-consensus-aura"
|
||||
Reference in New Issue
Block a user