mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-06 17:18:03 +00:00
Pov recovery for parachains (#445)
* Start with a failing integration test & some refactorings * More work * Make it "work" * Add `NullConsensus` for the test * More refactorings * Move stuff over to its own crate * Refactorings * Integrate it into `service` and make the test working * Docs and some exit condition * Use the real import queue * Fix tests * Update client/pov-recovery/src/active_candidate_recovery.rs Co-authored-by: Bernhard Schuster <bernhard@ahoi.io> * Fetch slot duration from the relay chain * Docs * Fixes Co-authored-by: Bernhard Schuster <bernhard@ahoi.io>
This commit is contained in:
Generated
+352
-282
File diff suppressed because it is too large
Load Diff
@@ -5,6 +5,7 @@ members = [
|
||||
"client/consensus/common",
|
||||
"client/consensus/relay-chain",
|
||||
"client/network",
|
||||
"client/pov-recovery",
|
||||
"client/service",
|
||||
"pallets/aura-ext",
|
||||
"pallets/dmp-queue",
|
||||
|
||||
@@ -27,7 +27,7 @@ cumulus-primitives-core = { path = "../../primitives/core" }
|
||||
# Other dependencies
|
||||
codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] }
|
||||
futures = { version = "0.3.1", features = ["compat"] }
|
||||
parking_lot = "0.9"
|
||||
parking_lot = "0.10.2"
|
||||
tracing = "0.1.25"
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -35,4 +35,4 @@ futures = { version = "0.3.8", features = ["compat"] }
|
||||
codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] }
|
||||
tracing = "0.1.22"
|
||||
async-trait = "0.1.42"
|
||||
parking_lot = "0.9"
|
||||
parking_lot = "0.10.2"
|
||||
|
||||
@@ -24,7 +24,6 @@ polkadot-runtime = { git = "https://github.com/paritytech/polkadot", branch = "m
|
||||
|
||||
# Other deps
|
||||
futures = { version = "0.3.8", features = ["compat"] }
|
||||
tokio = "0.1.22"
|
||||
codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] }
|
||||
tracing = "0.1.25"
|
||||
async-trait = "0.1.42"
|
||||
@@ -33,10 +32,19 @@ dyn-clone = "1.0.4"
|
||||
[dev-dependencies]
|
||||
# Substrate deps
|
||||
sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
|
||||
# Cumulus dependencies
|
||||
cumulus-test-runtime = { path = "../../../test/runtime" }
|
||||
cumulus-test-client = { path = "../../../test/client" }
|
||||
cumulus-test-service = { path = "../../../test/service" }
|
||||
cumulus-primitives-core = { path = "../../../primitives/core" }
|
||||
|
||||
# Polkadot deps
|
||||
polkadot-test-client = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
|
||||
# Other deps
|
||||
tokio = { version = "0.2.21", features = ["macros"] }
|
||||
futures-timer = "3.0.2"
|
||||
|
||||
@@ -14,504 +14,14 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use sc_client_api::{
|
||||
Backend, BlockBackend, BlockImportNotification, BlockchainEvents, Finalizer, UsageProvider,
|
||||
};
|
||||
use sp_api::ProvideRuntimeApi;
|
||||
use sp_blockchain::{Error as ClientError, Result as ClientResult};
|
||||
use sp_consensus::{
|
||||
BlockImport, BlockImportParams, BlockOrigin, BlockStatus, Error as ConsensusError,
|
||||
ForkChoiceStrategy, SelectChain as SelectChainT,
|
||||
};
|
||||
use sp_runtime::{
|
||||
generic::BlockId,
|
||||
traits::{Block as BlockT, Header as HeaderT},
|
||||
};
|
||||
use polkadot_primitives::v1::{Hash as PHash, PersistedValidationData};
|
||||
use sp_consensus::BlockImport;
|
||||
use sp_runtime::traits::Block as BlockT;
|
||||
|
||||
use polkadot_primitives::v1::{
|
||||
Block as PBlock, Hash as PHash, Id as ParaId, OccupiedCoreAssumption, ParachainHost,
|
||||
PersistedValidationData,
|
||||
};
|
||||
|
||||
use codec::Decode;
|
||||
use futures::{future, select, FutureExt, Stream, StreamExt};
|
||||
|
||||
use std::{marker::PhantomData, sync::Arc};
|
||||
|
||||
/// Errors that can occur while following the polkadot relay-chain.
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// An underlying client error.
|
||||
Client(ClientError),
|
||||
/// Head data returned was not for our parachain.
|
||||
InvalidHeadData,
|
||||
}
|
||||
|
||||
/// Helper for the relay chain client. This is expected to be a lightweight handle like an `Arc`.
|
||||
pub trait RelaychainClient: Clone + 'static {
|
||||
/// The error type for interacting with the Polkadot client.
|
||||
type Error: std::fmt::Debug + Send;
|
||||
|
||||
/// A stream that yields head-data for a parachain.
|
||||
type HeadStream: Stream<Item = Vec<u8>> + Send + Unpin;
|
||||
|
||||
/// Get a stream of new best heads for the given parachain.
|
||||
fn new_best_heads(&self, para_id: ParaId) -> ClientResult<Self::HeadStream>;
|
||||
|
||||
/// Get a stream of finalized heads for the given parachain.
|
||||
fn finalized_heads(&self, para_id: ParaId) -> ClientResult<Self::HeadStream>;
|
||||
|
||||
/// Returns the parachain head for the given `para_id` at the given block id.
|
||||
fn parachain_head_at(
|
||||
&self,
|
||||
at: &BlockId<PBlock>,
|
||||
para_id: ParaId,
|
||||
) -> ClientResult<Option<Vec<u8>>>;
|
||||
}
|
||||
|
||||
/// Follow the finalized head of the given parachain.
|
||||
///
|
||||
/// For every finalized block of the relay chain, it will get the included parachain header
|
||||
/// corresponding to `para_id` and will finalize it in the parachain.
|
||||
async fn follow_finalized_head<P, Block, B, R>(
|
||||
para_id: ParaId,
|
||||
parachain: Arc<P>,
|
||||
relay_chain: R,
|
||||
) -> ClientResult<()>
|
||||
where
|
||||
Block: BlockT,
|
||||
P: Finalizer<Block, B> + UsageProvider<Block>,
|
||||
R: RelaychainClient,
|
||||
B: Backend<Block>,
|
||||
{
|
||||
let mut finalized_heads = relay_chain.finalized_heads(para_id)?;
|
||||
|
||||
loop {
|
||||
let finalized_head = if let Some(h) = finalized_heads.next().await {
|
||||
h
|
||||
} else {
|
||||
tracing::debug!(target: "cumulus-consensus", "Stopping following finalized head.");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let header = match Block::Header::decode(&mut &finalized_head[..]) {
|
||||
Ok(header) => header,
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
target: "cumulus-consensus",
|
||||
error = ?err,
|
||||
"Could not decode parachain header while following finalized heads.",
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let hash = header.hash();
|
||||
|
||||
// don't finalize the same block multiple times.
|
||||
if parachain.usage_info().chain.finalized_hash != hash {
|
||||
if let Err(e) = parachain.finalize_block(BlockId::hash(hash), None, true) {
|
||||
match e {
|
||||
ClientError::UnknownBlock(_) => tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
block_hash = ?hash,
|
||||
"Could not finalize block because it is unknown.",
|
||||
),
|
||||
_ => tracing::warn!(
|
||||
target: "cumulus-consensus",
|
||||
error = ?e,
|
||||
block_hash = ?hash,
|
||||
"Failed to finalize block",
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the parachain consensus.
|
||||
///
|
||||
/// This will follow the given `relay_chain` to act as consesus for the parachain that corresponds
|
||||
/// to the given `para_id`. It will set the new best block of the parachain as it gets aware of it.
|
||||
/// The same happens for the finalized block.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This will access the backend of the parachain and thus, this future should be spawned as blocking
|
||||
/// task.
|
||||
pub async fn run_parachain_consensus<P, R, Block, B>(
|
||||
para_id: ParaId,
|
||||
parachain: Arc<P>,
|
||||
relay_chain: R,
|
||||
announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
|
||||
) -> ClientResult<()>
|
||||
where
|
||||
Block: BlockT,
|
||||
P: Finalizer<Block, B>
|
||||
+ UsageProvider<Block>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ BlockBackend<Block>
|
||||
+ BlockchainEvents<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
R: RelaychainClient,
|
||||
B: Backend<Block>,
|
||||
{
|
||||
let follow_new_best = follow_new_best(
|
||||
para_id,
|
||||
parachain.clone(),
|
||||
relay_chain.clone(),
|
||||
announce_block,
|
||||
);
|
||||
let follow_finalized_head = follow_finalized_head(para_id, parachain, relay_chain);
|
||||
select! {
|
||||
r = follow_new_best.fuse() => r,
|
||||
r = follow_finalized_head.fuse() => r,
|
||||
}
|
||||
}
|
||||
|
||||
/// Follow the relay chain new best head, to update the Parachain new best head.
|
||||
async fn follow_new_best<P, R, Block, B>(
|
||||
para_id: ParaId,
|
||||
parachain: Arc<P>,
|
||||
relay_chain: R,
|
||||
announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
|
||||
) -> ClientResult<()>
|
||||
where
|
||||
Block: BlockT,
|
||||
P: Finalizer<Block, B>
|
||||
+ UsageProvider<Block>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ BlockBackend<Block>
|
||||
+ BlockchainEvents<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
R: RelaychainClient,
|
||||
B: Backend<Block>,
|
||||
{
|
||||
let mut new_best_heads = relay_chain.new_best_heads(para_id)?.fuse();
|
||||
let mut imported_blocks = parachain.import_notification_stream().fuse();
|
||||
// The unset best header of the parachain. Will be `Some(_)` when we have imported a relay chain
|
||||
// block before the parachain block it included. In this case we need to wait for this block to
|
||||
// be imported to set it as new best.
|
||||
let mut unset_best_header = None;
|
||||
|
||||
loop {
|
||||
select! {
|
||||
h = new_best_heads.next() => {
|
||||
match h {
|
||||
Some(h) => handle_new_best_parachain_head(
|
||||
h,
|
||||
&*parachain,
|
||||
&mut unset_best_header,
|
||||
).await,
|
||||
None => {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
"Stopping following new best.",
|
||||
);
|
||||
return Ok(())
|
||||
}
|
||||
}
|
||||
},
|
||||
i = imported_blocks.next() => {
|
||||
match i {
|
||||
Some(i) => handle_new_block_imported(
|
||||
i,
|
||||
&mut unset_best_header,
|
||||
&*parachain,
|
||||
&*announce_block,
|
||||
).await,
|
||||
None => {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
"Stopping following imported blocks.",
|
||||
);
|
||||
return Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a new import block of the parachain.
|
||||
async fn handle_new_block_imported<Block, P>(
|
||||
notification: BlockImportNotification<Block>,
|
||||
unset_best_header_opt: &mut Option<Block::Header>,
|
||||
parachain: &P,
|
||||
announce_block: &(dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync),
|
||||
) where
|
||||
Block: BlockT,
|
||||
P: UsageProvider<Block> + Send + Sync + BlockBackend<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
{
|
||||
// HACK
|
||||
//
|
||||
// Remove after https://github.com/paritytech/substrate/pull/8052 or similar is merged
|
||||
if notification.origin != BlockOrigin::Own {
|
||||
announce_block(notification.hash, None);
|
||||
}
|
||||
|
||||
let unset_best_header = match (notification.is_new_best, &unset_best_header_opt) {
|
||||
// If this is the new best block or we don't have any unset block, we can end it here.
|
||||
(true, _) | (_, None) => return,
|
||||
(false, Some(ref u)) => u,
|
||||
};
|
||||
|
||||
let unset_hash = if notification.header.number() < unset_best_header.number() {
|
||||
return;
|
||||
} else if notification.header.number() == unset_best_header.number() {
|
||||
let unset_hash = unset_best_header.hash();
|
||||
|
||||
if unset_hash != notification.hash {
|
||||
return;
|
||||
} else {
|
||||
unset_hash
|
||||
}
|
||||
} else {
|
||||
unset_best_header.hash()
|
||||
};
|
||||
|
||||
match parachain.block_status(&BlockId::Hash(unset_hash)) {
|
||||
Ok(BlockStatus::InChainWithState) => {
|
||||
drop(unset_best_header);
|
||||
let unset_best_header = unset_best_header_opt
|
||||
.take()
|
||||
.expect("We checked above that the value is set; qed");
|
||||
|
||||
import_block_as_new_best(unset_hash, unset_best_header, parachain).await;
|
||||
}
|
||||
state => tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
?unset_best_header,
|
||||
?notification.header,
|
||||
?state,
|
||||
"Unexpected state for unset best header.",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the new best parachain head as extracted from the new best relay chain.
|
||||
async fn handle_new_best_parachain_head<Block, P>(
|
||||
head: Vec<u8>,
|
||||
parachain: &P,
|
||||
unset_best_header: &mut Option<Block::Header>,
|
||||
) where
|
||||
Block: BlockT,
|
||||
P: UsageProvider<Block> + Send + Sync + BlockBackend<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
{
|
||||
let parachain_head = match <<Block as BlockT>::Header>::decode(&mut &head[..]) {
|
||||
Ok(header) => header,
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
target: "cumulus-consensus",
|
||||
error = ?err,
|
||||
"Could not decode Parachain header while following best heads.",
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let hash = parachain_head.hash();
|
||||
|
||||
if parachain.usage_info().chain.best_hash == hash {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
block_hash = ?hash,
|
||||
"Skipping set new best block, because block is already the best.",
|
||||
)
|
||||
} else {
|
||||
// Make sure the block is already known or otherwise we skip setting new best.
|
||||
match parachain.block_status(&BlockId::Hash(hash)) {
|
||||
Ok(BlockStatus::InChainWithState) => {
|
||||
unset_best_header.take();
|
||||
|
||||
import_block_as_new_best(hash, parachain_head, parachain).await;
|
||||
}
|
||||
Ok(BlockStatus::InChainPruned) => {
|
||||
tracing::error!(
|
||||
target: "cumulus-collator",
|
||||
block_hash = ?hash,
|
||||
"Trying to set pruned block as new best!",
|
||||
);
|
||||
}
|
||||
Ok(BlockStatus::Unknown) => {
|
||||
*unset_best_header = Some(parachain_head);
|
||||
|
||||
tracing::debug!(
|
||||
target: "cumulus-collator",
|
||||
block_hash = ?hash,
|
||||
"Parachain block not yet imported, waiting for import to enact as best block.",
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
target: "cumulus-collator",
|
||||
block_hash = ?hash,
|
||||
error = ?e,
|
||||
"Failed to get block status of block.",
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn import_block_as_new_best<Block, P>(hash: Block::Hash, header: Block::Header, parachain: &P)
|
||||
where
|
||||
Block: BlockT,
|
||||
P: UsageProvider<Block> + Send + Sync + BlockBackend<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
{
|
||||
// Make it the new best block
|
||||
let mut block_import_params = BlockImportParams::new(BlockOrigin::ConsensusBroadcast, header);
|
||||
block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(true));
|
||||
block_import_params.import_existing = true;
|
||||
|
||||
if let Err(err) = (&*parachain)
|
||||
.import_block(block_import_params, Default::default())
|
||||
.await
|
||||
{
|
||||
tracing::warn!(
|
||||
target: "cumulus-consensus",
|
||||
block_hash = ?hash,
|
||||
error = ?err,
|
||||
"Failed to set new best block.",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> RelaychainClient for Arc<T>
|
||||
where
|
||||
T: sc_client_api::BlockchainEvents<PBlock> + ProvideRuntimeApi<PBlock> + 'static + Send + Sync,
|
||||
<T as ProvideRuntimeApi<PBlock>>::Api: ParachainHost<PBlock>,
|
||||
{
|
||||
type Error = ClientError;
|
||||
|
||||
type HeadStream = Box<dyn Stream<Item = Vec<u8>> + Send + Unpin>;
|
||||
|
||||
fn new_best_heads(&self, para_id: ParaId) -> ClientResult<Self::HeadStream> {
|
||||
let polkadot = self.clone();
|
||||
|
||||
let s = self.import_notification_stream().filter_map(move |n| {
|
||||
future::ready(if n.is_new_best {
|
||||
polkadot
|
||||
.parachain_head_at(&BlockId::hash(n.hash), para_id)
|
||||
.ok()
|
||||
.and_then(|h| h)
|
||||
} else {
|
||||
None
|
||||
})
|
||||
});
|
||||
|
||||
Ok(Box::new(s))
|
||||
}
|
||||
|
||||
fn finalized_heads(&self, para_id: ParaId) -> ClientResult<Self::HeadStream> {
|
||||
let polkadot = self.clone();
|
||||
|
||||
let s = self.finality_notification_stream().filter_map(move |n| {
|
||||
future::ready(
|
||||
polkadot
|
||||
.parachain_head_at(&BlockId::hash(n.hash), para_id)
|
||||
.ok()
|
||||
.and_then(|h| h),
|
||||
)
|
||||
});
|
||||
|
||||
Ok(Box::new(s))
|
||||
}
|
||||
|
||||
fn parachain_head_at(
|
||||
&self,
|
||||
at: &BlockId<PBlock>,
|
||||
para_id: ParaId,
|
||||
) -> ClientResult<Option<Vec<u8>>> {
|
||||
self.runtime_api()
|
||||
.persisted_validation_data(at, para_id, OccupiedCoreAssumption::TimedOut)
|
||||
.map(|s| s.map(|s| s.parent_head.0))
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
/// Select chain implementation for parachains.
|
||||
///
|
||||
/// The actual behavior of the implementation depends on the select chain implementation used by
|
||||
/// Polkadot.
|
||||
pub struct SelectChain<Block, PC, SC> {
|
||||
polkadot_client: PC,
|
||||
polkadot_select_chain: SC,
|
||||
para_id: ParaId,
|
||||
_marker: PhantomData<Block>,
|
||||
}
|
||||
|
||||
impl<Block, PC, SC> SelectChain<Block, PC, SC> {
|
||||
/// Create new instance of `Self`.
|
||||
///
|
||||
/// - `para_id`: The id of the parachain.
|
||||
/// - `polkadot_client`: The client of the Polkadot node.
|
||||
/// - `polkadot_select_chain`: The Polkadot select chain implementation.
|
||||
pub fn new(para_id: ParaId, polkadot_client: PC, polkadot_select_chain: SC) -> Self {
|
||||
Self {
|
||||
polkadot_client,
|
||||
polkadot_select_chain,
|
||||
para_id,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block, PC: Clone, SC: Clone> Clone for SelectChain<Block, PC, SC> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
polkadot_client: self.polkadot_client.clone(),
|
||||
polkadot_select_chain: self.polkadot_select_chain.clone(),
|
||||
para_id: self.para_id,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block, PC, SC> SelectChainT<Block> for SelectChain<Block, PC, SC>
|
||||
where
|
||||
Block: BlockT,
|
||||
PC: RelaychainClient + Clone + Send + Sync,
|
||||
PC::Error: ToString,
|
||||
SC: SelectChainT<PBlock>,
|
||||
{
|
||||
fn leaves(&self) -> Result<Vec<<Block as BlockT>::Hash>, ConsensusError> {
|
||||
let leaves = self.polkadot_select_chain.leaves()?;
|
||||
leaves
|
||||
.into_iter()
|
||||
.filter_map(|l| {
|
||||
self.polkadot_client
|
||||
.parachain_head_at(&BlockId::Hash(l), self.para_id)
|
||||
.map(|h| h.and_then(|d| <<Block as BlockT>::Hash>::decode(&mut &d[..]).ok()))
|
||||
.transpose()
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| ConsensusError::ChainLookup(e.to_string()))
|
||||
}
|
||||
|
||||
fn best_chain(&self) -> Result<<Block as BlockT>::Header, ConsensusError> {
|
||||
let best_chain = self.polkadot_select_chain.best_chain()?;
|
||||
let para_best_chain = self
|
||||
.polkadot_client
|
||||
.parachain_head_at(&BlockId::Hash(best_chain.hash()), self.para_id)
|
||||
.map_err(|e| ConsensusError::ChainLookup(e.to_string()))?;
|
||||
|
||||
match para_best_chain {
|
||||
Some(best) => Decode::decode(&mut &best[..]).map_err(|e| {
|
||||
ConsensusError::ChainLookup(format!("Error decoding parachain head: {}", e))
|
||||
}),
|
||||
None => Err(ConsensusError::ChainLookup(
|
||||
"Could not find parachain head for best relay chain!".into(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
mod parachain_consensus;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
pub use parachain_consensus::run_parachain_consensus;
|
||||
|
||||
/// The result of [`ParachainConsensus::produce_candidate`].
|
||||
pub struct ParachainCandidate<B> {
|
||||
@@ -604,330 +114,3 @@ where
|
||||
self.0.import_block(block_import_params, cache).await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use codec::Encode;
|
||||
use cumulus_test_client::{
|
||||
runtime::{Block, Header},
|
||||
Client, InitBlockBuilder, TestClientBuilder, TestClientBuilderExt,
|
||||
};
|
||||
use futures::{channel::mpsc, executor::block_on};
|
||||
use futures_timer::Delay;
|
||||
use std::{sync::Mutex, time::Duration};
|
||||
|
||||
struct RelaychainInner {
|
||||
new_best_heads: Option<mpsc::UnboundedReceiver<Header>>,
|
||||
finalized_heads: Option<mpsc::UnboundedReceiver<Header>>,
|
||||
new_best_heads_sender: mpsc::UnboundedSender<Header>,
|
||||
finalized_heads_sender: mpsc::UnboundedSender<Header>,
|
||||
}
|
||||
|
||||
impl RelaychainInner {
|
||||
fn new() -> Self {
|
||||
let (new_best_heads_sender, new_best_heads) = mpsc::unbounded();
|
||||
let (finalized_heads_sender, finalized_heads) = mpsc::unbounded();
|
||||
|
||||
Self {
|
||||
new_best_heads_sender,
|
||||
finalized_heads_sender,
|
||||
new_best_heads: Some(new_best_heads),
|
||||
finalized_heads: Some(finalized_heads),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Relaychain {
|
||||
inner: Arc<Mutex<RelaychainInner>>,
|
||||
}
|
||||
|
||||
impl Relaychain {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(RelaychainInner::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RelaychainClient for Relaychain {
|
||||
type Error = ClientError;
|
||||
|
||||
type HeadStream = Box<dyn Stream<Item = Vec<u8>> + Send + Unpin>;
|
||||
fn new_best_heads(&self, _: ParaId) -> ClientResult<Self::HeadStream> {
|
||||
let stream = self
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.new_best_heads
|
||||
.take()
|
||||
.expect("Should only be called once");
|
||||
|
||||
Ok(Box::new(stream.map(|v| v.encode())))
|
||||
}
|
||||
|
||||
fn finalized_heads(&self, _: ParaId) -> ClientResult<Self::HeadStream> {
|
||||
let stream = self
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.finalized_heads
|
||||
.take()
|
||||
.expect("Should only be called once");
|
||||
|
||||
Ok(Box::new(stream.map(|v| v.encode())))
|
||||
}
|
||||
|
||||
fn parachain_head_at(
|
||||
&self,
|
||||
_: &BlockId<PBlock>,
|
||||
_: ParaId,
|
||||
) -> ClientResult<Option<Vec<u8>>> {
|
||||
unimplemented!("Not required for tests")
|
||||
}
|
||||
}
|
||||
|
||||
fn build_and_import_block(mut client: Arc<Client>) -> Block {
|
||||
let builder = client.init_block_builder(None, Default::default());
|
||||
|
||||
let block = builder.build().unwrap().block;
|
||||
let (header, body) = block.clone().deconstruct();
|
||||
|
||||
let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header);
|
||||
block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(false));
|
||||
block_import_params.body = Some(body);
|
||||
|
||||
block_on(client.import_block(block_import_params, Default::default())).unwrap();
|
||||
assert_eq!(0, client.chain_info().best_number);
|
||||
|
||||
block
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn follow_new_best_works() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
let client = Arc::new(TestClientBuilder::default().build());
|
||||
|
||||
let block = build_and_import_block(client.clone());
|
||||
let relay_chain = Relaychain::new();
|
||||
let new_best_heads_sender = relay_chain
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.new_best_heads_sender
|
||||
.clone();
|
||||
|
||||
let consensus =
|
||||
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
|
||||
|
||||
let work = async move {
|
||||
new_best_heads_sender
|
||||
.unbounded_send(block.header().clone())
|
||||
.unwrap();
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if block.hash() == client.usage_info().chain.best_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
block_on(async move {
|
||||
futures::pin_mut!(consensus);
|
||||
futures::pin_mut!(work);
|
||||
|
||||
select! {
|
||||
r = consensus.fuse() => panic!("Consensus should not end: {:?}", r),
|
||||
_ = work.fuse() => {},
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn follow_finalized_works() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
let client = Arc::new(TestClientBuilder::default().build());
|
||||
|
||||
let block = build_and_import_block(client.clone());
|
||||
let relay_chain = Relaychain::new();
|
||||
let finalized_sender = relay_chain
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.finalized_heads_sender
|
||||
.clone();
|
||||
|
||||
let consensus =
|
||||
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
|
||||
|
||||
let work = async move {
|
||||
finalized_sender
|
||||
.unbounded_send(block.header().clone())
|
||||
.unwrap();
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if block.hash() == client.usage_info().chain.finalized_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
block_on(async move {
|
||||
futures::pin_mut!(consensus);
|
||||
futures::pin_mut!(work);
|
||||
|
||||
select! {
|
||||
r = consensus.fuse() => panic!("Consensus should not end: {:?}", r),
|
||||
_ = work.fuse() => {},
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn follow_finalized_does_not_stop_on_unknown_block() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
let client = Arc::new(TestClientBuilder::default().build());
|
||||
|
||||
let block = build_and_import_block(client.clone());
|
||||
|
||||
let unknown_block = {
|
||||
let block_builder = client.init_block_builder_at(
|
||||
&BlockId::Hash(block.hash()),
|
||||
None,
|
||||
Default::default(),
|
||||
);
|
||||
block_builder.build().unwrap().block
|
||||
};
|
||||
|
||||
let relay_chain = Relaychain::new();
|
||||
let finalized_sender = relay_chain
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.finalized_heads_sender
|
||||
.clone();
|
||||
|
||||
let consensus =
|
||||
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
|
||||
|
||||
let work = async move {
|
||||
for _ in 0..3usize {
|
||||
finalized_sender
|
||||
.unbounded_send(unknown_block.header().clone())
|
||||
.unwrap();
|
||||
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
finalized_sender
|
||||
.unbounded_send(block.header().clone())
|
||||
.unwrap();
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if block.hash() == client.usage_info().chain.finalized_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
block_on(async move {
|
||||
futures::pin_mut!(consensus);
|
||||
futures::pin_mut!(work);
|
||||
|
||||
select! {
|
||||
r = consensus.fuse() => panic!("Consensus should not end: {:?}", r),
|
||||
_ = work.fuse() => {},
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// It can happen that we first import a relay chain block, while not yet having the parachain
|
||||
// block imported that would be set to the best block. We need to make sure to import this
|
||||
// block as new best block in the moment it is imported.
|
||||
#[test]
|
||||
fn follow_new_best_sets_best_after_it_is_imported() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
let mut client = Arc::new(TestClientBuilder::default().build());
|
||||
|
||||
let block = build_and_import_block(client.clone());
|
||||
|
||||
let unknown_block = {
|
||||
let block_builder = client.init_block_builder_at(
|
||||
&BlockId::Hash(block.hash()),
|
||||
None,
|
||||
Default::default(),
|
||||
);
|
||||
block_builder.build().unwrap().block
|
||||
};
|
||||
|
||||
let relay_chain = Relaychain::new();
|
||||
let new_best_heads_sender = relay_chain
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.new_best_heads_sender
|
||||
.clone();
|
||||
|
||||
let consensus =
|
||||
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
|
||||
|
||||
let work = async move {
|
||||
new_best_heads_sender
|
||||
.unbounded_send(block.header().clone())
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if block.hash() == client.usage_info().chain.best_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Announce the unknown block
|
||||
new_best_heads_sender
|
||||
.unbounded_send(unknown_block.header().clone())
|
||||
.unwrap();
|
||||
|
||||
// Do some iterations. As this is a local task executor, only one task can run at a time.
|
||||
// Meaning that it should already have processed the unknown block.
|
||||
for _ in 0..3usize {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
let (header, body) = unknown_block.clone().deconstruct();
|
||||
|
||||
let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header);
|
||||
block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(false));
|
||||
block_import_params.body = Some(body);
|
||||
|
||||
// Now import the unkown block to make it "known"
|
||||
client
|
||||
.import_block(block_import_params, Default::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if unknown_block.hash() == client.usage_info().chain.best_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
block_on(async move {
|
||||
futures::pin_mut!(consensus);
|
||||
futures::pin_mut!(work);
|
||||
|
||||
select! {
|
||||
r = consensus.fuse() => panic!("Consensus should not end: {:?}", r),
|
||||
_ = work.fuse() => {},
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,418 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Cumulus.
|
||||
|
||||
// Cumulus is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Cumulus is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use sc_client_api::{
|
||||
Backend, BlockBackend, BlockImportNotification, BlockchainEvents, Finalizer, UsageProvider,
|
||||
};
|
||||
use sp_api::ProvideRuntimeApi;
|
||||
use sp_blockchain::{Error as ClientError, Result as ClientResult};
|
||||
use sp_consensus::{BlockImport, BlockImportParams, BlockOrigin, BlockStatus, ForkChoiceStrategy};
|
||||
use sp_runtime::{
|
||||
generic::BlockId,
|
||||
traits::{Block as BlockT, Header as HeaderT},
|
||||
};
|
||||
|
||||
use polkadot_primitives::v1::{
|
||||
Block as PBlock, Id as ParaId, OccupiedCoreAssumption, ParachainHost,
|
||||
};
|
||||
|
||||
use codec::Decode;
|
||||
use futures::{future, select, FutureExt, Stream, StreamExt};
|
||||
|
||||
use std::{pin::Pin, sync::Arc};
|
||||
|
||||
/// Helper for the relay chain client. This is expected to be a lightweight handle like an `Arc`.
|
||||
pub trait RelaychainClient: Clone + 'static {
|
||||
/// The error type for interacting with the Polkadot client.
|
||||
type Error: std::fmt::Debug + Send;
|
||||
|
||||
/// A stream that yields head-data for a parachain.
|
||||
type HeadStream: Stream<Item = Vec<u8>> + Send + Unpin;
|
||||
|
||||
/// Get a stream of new best heads for the given parachain.
|
||||
fn new_best_heads(&self, para_id: ParaId) -> Self::HeadStream;
|
||||
|
||||
/// Get a stream of finalized heads for the given parachain.
|
||||
fn finalized_heads(&self, para_id: ParaId) -> Self::HeadStream;
|
||||
|
||||
/// Returns the parachain head for the given `para_id` at the given block id.
|
||||
fn parachain_head_at(
|
||||
&self,
|
||||
at: &BlockId<PBlock>,
|
||||
para_id: ParaId,
|
||||
) -> ClientResult<Option<Vec<u8>>>;
|
||||
}
|
||||
|
||||
/// Follow the finalized head of the given parachain.
|
||||
///
|
||||
/// For every finalized block of the relay chain, it will get the included parachain header
|
||||
/// corresponding to `para_id` and will finalize it in the parachain.
|
||||
async fn follow_finalized_head<P, Block, B, R>(para_id: ParaId, parachain: Arc<P>, relay_chain: R)
|
||||
where
|
||||
Block: BlockT,
|
||||
P: Finalizer<Block, B> + UsageProvider<Block>,
|
||||
R: RelaychainClient,
|
||||
B: Backend<Block>,
|
||||
{
|
||||
let mut finalized_heads = relay_chain.finalized_heads(para_id);
|
||||
|
||||
loop {
|
||||
let finalized_head = if let Some(h) = finalized_heads.next().await {
|
||||
h
|
||||
} else {
|
||||
tracing::debug!(target: "cumulus-consensus", "Stopping following finalized head.");
|
||||
return;
|
||||
};
|
||||
|
||||
let header = match Block::Header::decode(&mut &finalized_head[..]) {
|
||||
Ok(header) => header,
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
target: "cumulus-consensus",
|
||||
error = ?err,
|
||||
"Could not decode parachain header while following finalized heads.",
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let hash = header.hash();
|
||||
|
||||
// don't finalize the same block multiple times.
|
||||
if parachain.usage_info().chain.finalized_hash != hash {
|
||||
if let Err(e) = parachain.finalize_block(BlockId::hash(hash), None, true) {
|
||||
match e {
|
||||
ClientError::UnknownBlock(_) => tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
block_hash = ?hash,
|
||||
"Could not finalize block because it is unknown.",
|
||||
),
|
||||
_ => tracing::warn!(
|
||||
target: "cumulus-consensus",
|
||||
error = ?e,
|
||||
block_hash = ?hash,
|
||||
"Failed to finalize block",
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the parachain consensus.
|
||||
///
|
||||
/// This will follow the given `relay_chain` to act as consesus for the parachain that corresponds
|
||||
/// to the given `para_id`. It will set the new best block of the parachain as it gets aware of it.
|
||||
/// The same happens for the finalized block.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This will access the backend of the parachain and thus, this future should be spawned as blocking
|
||||
/// task.
|
||||
pub async fn run_parachain_consensus<P, R, Block, B>(
|
||||
para_id: ParaId,
|
||||
parachain: Arc<P>,
|
||||
relay_chain: R,
|
||||
announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
|
||||
) where
|
||||
Block: BlockT,
|
||||
P: Finalizer<Block, B>
|
||||
+ UsageProvider<Block>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ BlockBackend<Block>
|
||||
+ BlockchainEvents<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
R: RelaychainClient,
|
||||
B: Backend<Block>,
|
||||
{
|
||||
let follow_new_best = follow_new_best(
|
||||
para_id,
|
||||
parachain.clone(),
|
||||
relay_chain.clone(),
|
||||
announce_block,
|
||||
);
|
||||
let follow_finalized_head = follow_finalized_head(para_id, parachain, relay_chain);
|
||||
select! {
|
||||
_ = follow_new_best.fuse() => {},
|
||||
_ = follow_finalized_head.fuse() => {},
|
||||
}
|
||||
}
|
||||
|
||||
/// Follow the relay chain new best head, to update the Parachain new best head.
|
||||
async fn follow_new_best<P, R, Block, B>(
|
||||
para_id: ParaId,
|
||||
parachain: Arc<P>,
|
||||
relay_chain: R,
|
||||
announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
|
||||
) where
|
||||
Block: BlockT,
|
||||
P: Finalizer<Block, B>
|
||||
+ UsageProvider<Block>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ BlockBackend<Block>
|
||||
+ BlockchainEvents<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
R: RelaychainClient,
|
||||
B: Backend<Block>,
|
||||
{
|
||||
let mut new_best_heads = relay_chain.new_best_heads(para_id).fuse();
|
||||
let mut imported_blocks = parachain.import_notification_stream().fuse();
|
||||
// The unset best header of the parachain. Will be `Some(_)` when we have imported a relay chain
|
||||
// block before the parachain block it included. In this case we need to wait for this block to
|
||||
// be imported to set it as new best.
|
||||
let mut unset_best_header = None;
|
||||
|
||||
loop {
|
||||
select! {
|
||||
h = new_best_heads.next() => {
|
||||
match h {
|
||||
Some(h) => handle_new_best_parachain_head(
|
||||
h,
|
||||
&*parachain,
|
||||
&mut unset_best_header,
|
||||
).await,
|
||||
None => {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
"Stopping following new best.",
|
||||
);
|
||||
return
|
||||
}
|
||||
}
|
||||
},
|
||||
i = imported_blocks.next() => {
|
||||
match i {
|
||||
Some(i) => handle_new_block_imported(
|
||||
i,
|
||||
&mut unset_best_header,
|
||||
&*parachain,
|
||||
&*announce_block,
|
||||
).await,
|
||||
None => {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
"Stopping following imported blocks.",
|
||||
);
|
||||
return
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a new import block of the parachain.
|
||||
async fn handle_new_block_imported<Block, P>(
|
||||
notification: BlockImportNotification<Block>,
|
||||
unset_best_header_opt: &mut Option<Block::Header>,
|
||||
parachain: &P,
|
||||
announce_block: &(dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync),
|
||||
) where
|
||||
Block: BlockT,
|
||||
P: UsageProvider<Block> + Send + Sync + BlockBackend<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
{
|
||||
// HACK
|
||||
//
|
||||
// Remove after https://github.com/paritytech/substrate/pull/8052 or similar is merged
|
||||
if notification.origin != BlockOrigin::Own {
|
||||
announce_block(notification.hash, None);
|
||||
}
|
||||
|
||||
let unset_best_header = match (notification.is_new_best, &unset_best_header_opt) {
|
||||
// If this is the new best block or we don't have any unset block, we can end it here.
|
||||
(true, _) | (_, None) => return,
|
||||
(false, Some(ref u)) => u,
|
||||
};
|
||||
|
||||
let unset_hash = if notification.header.number() < unset_best_header.number() {
|
||||
return;
|
||||
} else if notification.header.number() == unset_best_header.number() {
|
||||
let unset_hash = unset_best_header.hash();
|
||||
|
||||
if unset_hash != notification.hash {
|
||||
return;
|
||||
} else {
|
||||
unset_hash
|
||||
}
|
||||
} else {
|
||||
unset_best_header.hash()
|
||||
};
|
||||
|
||||
match parachain.block_status(&BlockId::Hash(unset_hash)) {
|
||||
Ok(BlockStatus::InChainWithState) => {
|
||||
drop(unset_best_header);
|
||||
let unset_best_header = unset_best_header_opt
|
||||
.take()
|
||||
.expect("We checked above that the value is set; qed");
|
||||
|
||||
import_block_as_new_best(unset_hash, unset_best_header, parachain).await;
|
||||
}
|
||||
state => tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
?unset_best_header,
|
||||
?notification.header,
|
||||
?state,
|
||||
"Unexpected state for unset best header.",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle the new best parachain head as extracted from the new best relay chain.
|
||||
async fn handle_new_best_parachain_head<Block, P>(
|
||||
head: Vec<u8>,
|
||||
parachain: &P,
|
||||
unset_best_header: &mut Option<Block::Header>,
|
||||
) where
|
||||
Block: BlockT,
|
||||
P: UsageProvider<Block> + Send + Sync + BlockBackend<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
{
|
||||
let parachain_head = match <<Block as BlockT>::Header>::decode(&mut &head[..]) {
|
||||
Ok(header) => header,
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
target: "cumulus-consensus",
|
||||
error = ?err,
|
||||
"Could not decode Parachain header while following best heads.",
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let hash = parachain_head.hash();
|
||||
|
||||
if parachain.usage_info().chain.best_hash == hash {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
block_hash = ?hash,
|
||||
"Skipping set new best block, because block is already the best.",
|
||||
)
|
||||
} else {
|
||||
// Make sure the block is already known or otherwise we skip setting new best.
|
||||
match parachain.block_status(&BlockId::Hash(hash)) {
|
||||
Ok(BlockStatus::InChainWithState) => {
|
||||
unset_best_header.take();
|
||||
|
||||
import_block_as_new_best(hash, parachain_head, parachain).await;
|
||||
}
|
||||
Ok(BlockStatus::InChainPruned) => {
|
||||
tracing::error!(
|
||||
target: "cumulus-collator",
|
||||
block_hash = ?hash,
|
||||
"Trying to set pruned block as new best!",
|
||||
);
|
||||
}
|
||||
Ok(BlockStatus::Unknown) => {
|
||||
*unset_best_header = Some(parachain_head);
|
||||
|
||||
tracing::debug!(
|
||||
target: "cumulus-collator",
|
||||
block_hash = ?hash,
|
||||
"Parachain block not yet imported, waiting for import to enact as best block.",
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
target: "cumulus-collator",
|
||||
block_hash = ?hash,
|
||||
error = ?e,
|
||||
"Failed to get block status of block.",
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn import_block_as_new_best<Block, P>(hash: Block::Hash, header: Block::Header, parachain: &P)
|
||||
where
|
||||
Block: BlockT,
|
||||
P: UsageProvider<Block> + Send + Sync + BlockBackend<Block>,
|
||||
for<'a> &'a P: BlockImport<Block>,
|
||||
{
|
||||
// Make it the new best block
|
||||
let mut block_import_params = BlockImportParams::new(BlockOrigin::ConsensusBroadcast, header);
|
||||
block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(true));
|
||||
block_import_params.import_existing = true;
|
||||
|
||||
if let Err(err) = (&*parachain)
|
||||
.import_block(block_import_params, Default::default())
|
||||
.await
|
||||
{
|
||||
tracing::warn!(
|
||||
target: "cumulus-consensus",
|
||||
block_hash = ?hash,
|
||||
error = ?err,
|
||||
"Failed to set new best block.",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> RelaychainClient for Arc<T>
|
||||
where
|
||||
T: sc_client_api::BlockchainEvents<PBlock> + ProvideRuntimeApi<PBlock> + 'static + Send + Sync,
|
||||
<T as ProvideRuntimeApi<PBlock>>::Api: ParachainHost<PBlock>,
|
||||
{
|
||||
type Error = ClientError;
|
||||
|
||||
type HeadStream = Pin<Box<dyn Stream<Item = Vec<u8>> + Send>>;
|
||||
|
||||
fn new_best_heads(&self, para_id: ParaId) -> Self::HeadStream {
|
||||
let relay_chain = self.clone();
|
||||
|
||||
self.import_notification_stream()
|
||||
.filter_map(move |n| {
|
||||
future::ready(if n.is_new_best {
|
||||
relay_chain
|
||||
.parachain_head_at(&BlockId::hash(n.hash), para_id)
|
||||
.ok()
|
||||
.flatten()
|
||||
} else {
|
||||
None
|
||||
})
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn finalized_heads(&self, para_id: ParaId) -> Self::HeadStream {
|
||||
let relay_chain = self.clone();
|
||||
|
||||
self.finality_notification_stream()
|
||||
.filter_map(move |n| {
|
||||
future::ready(
|
||||
relay_chain
|
||||
.parachain_head_at(&BlockId::hash(n.hash), para_id)
|
||||
.ok()
|
||||
.flatten(),
|
||||
)
|
||||
})
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn parachain_head_at(
|
||||
&self,
|
||||
at: &BlockId<PBlock>,
|
||||
para_id: ParaId,
|
||||
) -> ClientResult<Option<Vec<u8>>> {
|
||||
self.runtime_api()
|
||||
.persisted_validation_data(at, para_id, OccupiedCoreAssumption::TimedOut)
|
||||
.map(|s| s.map(|s| s.parent_head.0))
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,338 @@
|
||||
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Cumulus.
|
||||
|
||||
// Cumulus is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Cumulus is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use crate::*;
|
||||
|
||||
use codec::Encode;
|
||||
use cumulus_test_client::{
|
||||
runtime::{Block, Header},
|
||||
Client, InitBlockBuilder, TestClientBuilder, TestClientBuilderExt,
|
||||
};
|
||||
use futures::{channel::mpsc, executor::block_on, select, FutureExt, Stream, StreamExt};
|
||||
use futures_timer::Delay;
|
||||
use polkadot_primitives::v1::{Block as PBlock, Id as ParaId};
|
||||
use sc_client_api::UsageProvider;
|
||||
use sp_blockchain::{Error as ClientError, Result as ClientResult};
|
||||
use sp_consensus::{BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy};
|
||||
use sp_runtime::generic::BlockId;
|
||||
use std::{
|
||||
sync::{Arc, Mutex},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
struct RelaychainInner {
|
||||
new_best_heads: Option<mpsc::UnboundedReceiver<Header>>,
|
||||
finalized_heads: Option<mpsc::UnboundedReceiver<Header>>,
|
||||
new_best_heads_sender: mpsc::UnboundedSender<Header>,
|
||||
finalized_heads_sender: mpsc::UnboundedSender<Header>,
|
||||
}
|
||||
|
||||
impl RelaychainInner {
|
||||
fn new() -> Self {
|
||||
let (new_best_heads_sender, new_best_heads) = mpsc::unbounded();
|
||||
let (finalized_heads_sender, finalized_heads) = mpsc::unbounded();
|
||||
|
||||
Self {
|
||||
new_best_heads_sender,
|
||||
finalized_heads_sender,
|
||||
new_best_heads: Some(new_best_heads),
|
||||
finalized_heads: Some(finalized_heads),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Relaychain {
|
||||
inner: Arc<Mutex<RelaychainInner>>,
|
||||
}
|
||||
|
||||
impl Relaychain {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(RelaychainInner::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::parachain_consensus::RelaychainClient for Relaychain {
|
||||
type Error = ClientError;
|
||||
|
||||
type HeadStream = Box<dyn Stream<Item = Vec<u8>> + Send + Unpin>;
|
||||
|
||||
fn new_best_heads(&self, _: ParaId) -> Self::HeadStream {
|
||||
let stream = self
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.new_best_heads
|
||||
.take()
|
||||
.expect("Should only be called once");
|
||||
|
||||
Box::new(stream.map(|v| v.encode()))
|
||||
}
|
||||
|
||||
fn finalized_heads(&self, _: ParaId) -> Self::HeadStream {
|
||||
let stream = self
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.finalized_heads
|
||||
.take()
|
||||
.expect("Should only be called once");
|
||||
|
||||
Box::new(stream.map(|v| v.encode()))
|
||||
}
|
||||
|
||||
fn parachain_head_at(&self, _: &BlockId<PBlock>, _: ParaId) -> ClientResult<Option<Vec<u8>>> {
|
||||
unimplemented!("Not required for tests")
|
||||
}
|
||||
}
|
||||
|
||||
fn build_and_import_block(mut client: Arc<Client>) -> Block {
|
||||
let builder = client.init_block_builder(None, Default::default());
|
||||
|
||||
let block = builder.build().unwrap().block;
|
||||
let (header, body) = block.clone().deconstruct();
|
||||
|
||||
let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header);
|
||||
block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(false));
|
||||
block_import_params.body = Some(body);
|
||||
|
||||
block_on(client.import_block(block_import_params, Default::default())).unwrap();
|
||||
assert_eq!(0, client.chain_info().best_number);
|
||||
|
||||
block
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn follow_new_best_works() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
let client = Arc::new(TestClientBuilder::default().build());
|
||||
|
||||
let block = build_and_import_block(client.clone());
|
||||
let relay_chain = Relaychain::new();
|
||||
let new_best_heads_sender = relay_chain
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.new_best_heads_sender
|
||||
.clone();
|
||||
|
||||
let consensus =
|
||||
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
|
||||
|
||||
let work = async move {
|
||||
new_best_heads_sender
|
||||
.unbounded_send(block.header().clone())
|
||||
.unwrap();
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if block.hash() == client.usage_info().chain.best_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
block_on(async move {
|
||||
futures::pin_mut!(consensus);
|
||||
futures::pin_mut!(work);
|
||||
|
||||
select! {
|
||||
r = consensus.fuse() => panic!("Consensus should not end: {:?}", r),
|
||||
_ = work.fuse() => {},
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn follow_finalized_works() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
let client = Arc::new(TestClientBuilder::default().build());
|
||||
|
||||
let block = build_and_import_block(client.clone());
|
||||
let relay_chain = Relaychain::new();
|
||||
let finalized_sender = relay_chain
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.finalized_heads_sender
|
||||
.clone();
|
||||
|
||||
let consensus =
|
||||
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
|
||||
|
||||
let work = async move {
|
||||
finalized_sender
|
||||
.unbounded_send(block.header().clone())
|
||||
.unwrap();
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if block.hash() == client.usage_info().chain.finalized_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
block_on(async move {
|
||||
futures::pin_mut!(consensus);
|
||||
futures::pin_mut!(work);
|
||||
|
||||
select! {
|
||||
r = consensus.fuse() => panic!("Consensus should not end: {:?}", r),
|
||||
_ = work.fuse() => {},
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn follow_finalized_does_not_stop_on_unknown_block() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
let client = Arc::new(TestClientBuilder::default().build());
|
||||
|
||||
let block = build_and_import_block(client.clone());
|
||||
|
||||
let unknown_block = {
|
||||
let block_builder =
|
||||
client.init_block_builder_at(&BlockId::Hash(block.hash()), None, Default::default());
|
||||
block_builder.build().unwrap().block
|
||||
};
|
||||
|
||||
let relay_chain = Relaychain::new();
|
||||
let finalized_sender = relay_chain
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.finalized_heads_sender
|
||||
.clone();
|
||||
|
||||
let consensus =
|
||||
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
|
||||
|
||||
let work = async move {
|
||||
for _ in 0..3usize {
|
||||
finalized_sender
|
||||
.unbounded_send(unknown_block.header().clone())
|
||||
.unwrap();
|
||||
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
finalized_sender
|
||||
.unbounded_send(block.header().clone())
|
||||
.unwrap();
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if block.hash() == client.usage_info().chain.finalized_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
block_on(async move {
|
||||
futures::pin_mut!(consensus);
|
||||
futures::pin_mut!(work);
|
||||
|
||||
select! {
|
||||
r = consensus.fuse() => panic!("Consensus should not end: {:?}", r),
|
||||
_ = work.fuse() => {},
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// It can happen that we first import a relay chain block, while not yet having the parachain
|
||||
// block imported that would be set to the best block. We need to make sure to import this
|
||||
// block as new best block in the moment it is imported.
|
||||
#[test]
|
||||
fn follow_new_best_sets_best_after_it_is_imported() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
let mut client = Arc::new(TestClientBuilder::default().build());
|
||||
|
||||
let block = build_and_import_block(client.clone());
|
||||
|
||||
let unknown_block = {
|
||||
let block_builder =
|
||||
client.init_block_builder_at(&BlockId::Hash(block.hash()), None, Default::default());
|
||||
block_builder.build().unwrap().block
|
||||
};
|
||||
|
||||
let relay_chain = Relaychain::new();
|
||||
let new_best_heads_sender = relay_chain
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.new_best_heads_sender
|
||||
.clone();
|
||||
|
||||
let consensus =
|
||||
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
|
||||
|
||||
let work = async move {
|
||||
new_best_heads_sender
|
||||
.unbounded_send(block.header().clone())
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if block.hash() == client.usage_info().chain.best_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Announce the unknown block
|
||||
new_best_heads_sender
|
||||
.unbounded_send(unknown_block.header().clone())
|
||||
.unwrap();
|
||||
|
||||
// Do some iterations. As this is a local task executor, only one task can run at a time.
|
||||
// Meaning that it should already have processed the unknown block.
|
||||
for _ in 0..3usize {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
let (header, body) = unknown_block.clone().deconstruct();
|
||||
|
||||
let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, header);
|
||||
block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(false));
|
||||
block_import_params.body = Some(body);
|
||||
|
||||
// Now import the unkown block to make it "known"
|
||||
client
|
||||
.import_block(block_import_params, Default::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
loop {
|
||||
Delay::new(Duration::from_millis(100)).await;
|
||||
if unknown_block.hash() == client.usage_info().chain.best_hash {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
block_on(async move {
|
||||
futures::pin_mut!(consensus);
|
||||
futures::pin_mut!(work);
|
||||
|
||||
select! {
|
||||
r = consensus.fuse() => panic!("Consensus should not end: {:?}", r),
|
||||
_ = work.fuse() => {},
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -29,4 +29,4 @@ futures = { version = "0.3.8", features = ["compat"] }
|
||||
codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] }
|
||||
tracing = "0.1.22"
|
||||
async-trait = "0.1.42"
|
||||
parking_lot = "0.9"
|
||||
parking_lot = "0.10.2"
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
[package]
|
||||
name = "cumulus-client-pov-recovery"
|
||||
version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
description = "Cumulus-specific networking protocol"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
# Substrate deps
|
||||
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-maybe-compressed-blob = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
|
||||
# Polkadot deps
|
||||
polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
polkadot-statement-table = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
polkadot-node-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
polkadot-overseer = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
polkadot-node-subsystem = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
|
||||
# Cumulus deps
|
||||
cumulus-primitives-core = { path = "../../primitives/core" }
|
||||
|
||||
# other deps
|
||||
codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] }
|
||||
futures = { version = "0.3.1", features = ["compat"] }
|
||||
futures-timer = "3.0.2"
|
||||
tracing = "0.1.22"
|
||||
rand = "0.8.3"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.2.21", features = ["macros"] }
|
||||
|
||||
# Cumulus deps
|
||||
cumulus-test-service = { path = "../../test/service" }
|
||||
|
||||
# Polkadot deps
|
||||
polkadot-test-client = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
|
||||
# substrate deps
|
||||
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
@@ -0,0 +1,112 @@
|
||||
// Copyright 2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use sp_runtime::traits::Block as BlockT;
|
||||
|
||||
use polkadot_node_primitives::AvailableData;
|
||||
use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage;
|
||||
use polkadot_overseer::OverseerHandler;
|
||||
|
||||
use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, StreamExt};
|
||||
|
||||
use std::{collections::HashSet, pin::Pin};
|
||||
|
||||
/// The active candidate recovery.
|
||||
///
|
||||
/// This handles the candidate recovery and tracks the activate recoveries.
|
||||
pub(crate) struct ActiveCandidateRecovery<Block: BlockT> {
|
||||
/// The recoveries that are currently being executed.
|
||||
recoveries: FuturesUnordered<
|
||||
Pin<Box<dyn Future<Output = (Block::Hash, Option<AvailableData>)> + Send>>,
|
||||
>,
|
||||
/// The block hashes of the candidates currently being recovered.
|
||||
candidates: HashSet<Block::Hash>,
|
||||
overseer_handler: OverseerHandler,
|
||||
}
|
||||
|
||||
impl<Block: BlockT> ActiveCandidateRecovery<Block> {
|
||||
pub fn new(overseer_handler: OverseerHandler) -> Self {
|
||||
Self {
|
||||
recoveries: Default::default(),
|
||||
candidates: Default::default(),
|
||||
overseer_handler,
|
||||
}
|
||||
}
|
||||
|
||||
/// Recover the given `pending_candidate`.
|
||||
pub async fn recover_candidate(
|
||||
&mut self,
|
||||
block_hash: Block::Hash,
|
||||
pending_candidate: crate::PendingCandidate<Block>,
|
||||
) {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
self.overseer_handler
|
||||
.send_msg(AvailabilityRecoveryMessage::RecoverAvailableData(
|
||||
pending_candidate.receipt,
|
||||
pending_candidate.session_index,
|
||||
None,
|
||||
tx,
|
||||
))
|
||||
.await;
|
||||
|
||||
self.candidates.insert(block_hash);
|
||||
|
||||
self.recoveries.push(
|
||||
async move {
|
||||
match rx.await {
|
||||
Ok(Ok(res)) => (block_hash, Some(res)),
|
||||
Ok(Err(error)) => {
|
||||
tracing::debug!(
|
||||
target: crate::LOG_TARGET,
|
||||
?error,
|
||||
?block_hash,
|
||||
"Availability recovery failed",
|
||||
);
|
||||
(block_hash, None)
|
||||
}
|
||||
Err(_) => {
|
||||
tracing::debug!(
|
||||
target: crate::LOG_TARGET,
|
||||
"Availability recovery oneshot channel closed",
|
||||
);
|
||||
(block_hash, None)
|
||||
}
|
||||
}
|
||||
}
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Returns if the given `candidate` is being recovered.
|
||||
pub fn is_being_recovered(&self, candidate: &Block::Hash) -> bool {
|
||||
self.candidates.contains(candidate)
|
||||
}
|
||||
|
||||
/// Waits for the next recovery.
|
||||
///
|
||||
/// If the returned [`AvailableData`] is `None`, it means that the recovery failed.
|
||||
pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option<AvailableData>) {
|
||||
loop {
|
||||
if let Some(res) = self.recoveries.next().await {
|
||||
self.candidates.remove(&res.0);
|
||||
return res;
|
||||
} else {
|
||||
futures::pending!()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,456 @@
|
||||
// Copyright 2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Cumulus.
|
||||
|
||||
// Cumulus is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Parachain PoV recovery
|
||||
//!
|
||||
//! A parachain needs to build PoVs that are send to the relay chain to progress. These PoVs are
|
||||
//! erasure encoded and one piece of it is stored by each relay chain validator. As the relay chain
|
||||
//! decides on which PoV per parachain to include and thus, to progess the parachain it can happen
|
||||
//! that the block corresponding to this PoV isn't propagated in the parachain network. This can have
|
||||
//! several reasons, either a malicious collator that managed to include its own PoV and doesn't want
|
||||
//! to share it with the rest of the network or maybe a collator went down before it could distribute
|
||||
//! the block in the network. When something like this happens we can use the PoV recovery algorithm
|
||||
//! implemented in this crate to recover a PoV and to propagate it with the rest of the network. This
|
||||
//! protocol is only executed by the collators, to not overwhelm the relay chain validators.
|
||||
//!
|
||||
//! It works in the following way:
|
||||
//!
|
||||
//! 1. For every included relay chain block we note the backed candidate of our parachain. If the
|
||||
//! block belonging to the PoV is already known, we do nothing. Otherwise we start
|
||||
//! a timer that waits a random time between 0..relay_chain_slot_length before starting to recover
|
||||
//! the PoV.
|
||||
//!
|
||||
//! 2. If between starting and firing the timer the block is imported, we skip the recovery of the
|
||||
//! PoV.
|
||||
//!
|
||||
//! 3. If the timer fired we recover the PoV using the relay chain PoV recovery protocol. After it
|
||||
//! is recovered, we restore the block and import it.
|
||||
//!
|
||||
//! If we need to recover multiple PoV blocks (which should hopefully not happen in real life), we
|
||||
//! make sure that the blocks are imported in the correct order.
|
||||
|
||||
use sc_client_api::{BlockBackend, BlockchainEvents, UsageProvider};
|
||||
use sp_api::ProvideRuntimeApi;
|
||||
use sp_consensus::{
|
||||
import_queue::{ImportQueue, IncomingBlock},
|
||||
BlockOrigin, BlockStatus,
|
||||
};
|
||||
use sp_runtime::{
|
||||
generic::BlockId,
|
||||
traits::{Block as BlockT, Header as HeaderT, NumberFor},
|
||||
};
|
||||
|
||||
use polkadot_node_primitives::{AvailableData, POV_BOMB_LIMIT};
|
||||
use polkadot_overseer::OverseerHandler;
|
||||
use polkadot_primitives::v1::{
|
||||
Block as PBlock, CandidateReceipt, CommittedCandidateReceipt, Id as ParaId, ParachainHost,
|
||||
SessionIndex,
|
||||
};
|
||||
|
||||
use cumulus_primitives_core::ParachainBlockData;
|
||||
|
||||
use codec::Decode;
|
||||
use futures::{select, stream::FuturesUnordered, Future, FutureExt, Stream, StreamExt};
|
||||
use futures_timer::Delay;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
mod active_candidate_recovery;
|
||||
use active_candidate_recovery::ActiveCandidateRecovery;
|
||||
|
||||
const LOG_TARGET: &str = "cumulus-pov-recovery";
|
||||
|
||||
/// Represents a pending candidate.
|
||||
struct PendingCandidate<Block: BlockT> {
|
||||
receipt: CandidateReceipt,
|
||||
session_index: SessionIndex,
|
||||
block_number: NumberFor<Block>,
|
||||
}
|
||||
|
||||
/// Encapsulates the logic of the pov recovery.
|
||||
pub struct PoVRecovery<Block: BlockT, PC, IQ, RC> {
|
||||
/// All the pending candidates that we are waiting for to be imported or that need to be
|
||||
/// recovered when `next_candidate_to_recover` tells us to do so.
|
||||
pending_candidates: HashMap<Block::Hash, PendingCandidate<Block>>,
|
||||
/// A stream of futures that resolve to hashes of candidates that need to be recovered.
|
||||
///
|
||||
/// The candidates to the hashes are stored in `pending_candidates`. If a candidate is not
|
||||
/// available anymore in this map, it means that it was already imported.
|
||||
next_candidate_to_recover: FuturesUnordered<Pin<Box<dyn Future<Output = Block::Hash> + Send>>>,
|
||||
active_candidate_recovery: ActiveCandidateRecovery<Block>,
|
||||
/// Blocks that wait that the parent is imported.
|
||||
///
|
||||
/// Uses parent -> blocks mapping.
|
||||
waiting_for_parent: HashMap<Block::Hash, Vec<Block>>,
|
||||
relay_chain_slot_duration: Duration,
|
||||
parachain_client: Arc<PC>,
|
||||
parachain_import_queue: IQ,
|
||||
relay_chain_client: Arc<RC>,
|
||||
para_id: ParaId,
|
||||
}
|
||||
|
||||
impl<Block: BlockT, PC, IQ, RC> PoVRecovery<Block, PC, IQ, RC>
|
||||
where
|
||||
PC: BlockBackend<Block> + BlockchainEvents<Block> + UsageProvider<Block>,
|
||||
RC: ProvideRuntimeApi<PBlock> + BlockchainEvents<PBlock>,
|
||||
RC::Api: ParachainHost<PBlock>,
|
||||
IQ: ImportQueue<Block>,
|
||||
{
|
||||
/// Create a new instance.
|
||||
pub fn new(
|
||||
overseer_handler: OverseerHandler,
|
||||
relay_chain_slot_duration: Duration,
|
||||
parachain_client: Arc<PC>,
|
||||
parachain_import_queue: IQ,
|
||||
relay_chain_client: Arc<RC>,
|
||||
para_id: ParaId,
|
||||
) -> Self {
|
||||
Self {
|
||||
pending_candidates: HashMap::new(),
|
||||
next_candidate_to_recover: Default::default(),
|
||||
active_candidate_recovery: ActiveCandidateRecovery::new(overseer_handler),
|
||||
relay_chain_slot_duration,
|
||||
waiting_for_parent: HashMap::new(),
|
||||
parachain_client,
|
||||
parachain_import_queue,
|
||||
relay_chain_client,
|
||||
para_id,
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a new pending candidate.
|
||||
fn handle_pending_candidate(
|
||||
&mut self,
|
||||
receipt: CommittedCandidateReceipt,
|
||||
session_index: SessionIndex,
|
||||
) {
|
||||
let header = match Block::Header::decode(&mut &receipt.commitments.head_data.0[..]) {
|
||||
Ok(header) => header,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
target: LOG_TARGET,
|
||||
error = ?e,
|
||||
"Failed to decode parachain header from pending candidate",
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if *header.number() <= self.parachain_client.usage_info().chain.finalized_number {
|
||||
return;
|
||||
}
|
||||
|
||||
let hash = header.hash();
|
||||
match self.parachain_client.block_status(&BlockId::Hash(hash)) {
|
||||
Ok(BlockStatus::Unknown) => (),
|
||||
// Any other state means, we should ignore it.
|
||||
Ok(_) => return,
|
||||
Err(e) => {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
error = ?e,
|
||||
block_hash = ?hash,
|
||||
"Failed to get block status",
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if self
|
||||
.pending_candidates
|
||||
.insert(
|
||||
hash,
|
||||
PendingCandidate {
|
||||
block_number: *header.number(),
|
||||
receipt: receipt.to_plain(),
|
||||
session_index,
|
||||
},
|
||||
)
|
||||
.is_some()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Wait some random time, with the maximum being the slot duration of the relay chain
|
||||
// before we start to recover the candidate.
|
||||
let delay = Delay::new(self.relay_chain_slot_duration.mul_f64(thread_rng().gen()));
|
||||
self.next_candidate_to_recover.push(
|
||||
async move {
|
||||
delay.await;
|
||||
hash
|
||||
}
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Handle an imported block.
|
||||
fn handle_block_imported(&mut self, hash: &Block::Hash) {
|
||||
self.pending_candidates.remove(&hash);
|
||||
}
|
||||
|
||||
/// Handle a finalized block with the given `block_number`.
|
||||
fn handle_block_finalized(&mut self, block_number: NumberFor<Block>) {
|
||||
self.pending_candidates
|
||||
.retain(|_, pc| pc.block_number > block_number);
|
||||
}
|
||||
|
||||
/// Recover the candidate for the given `block_hash`.
|
||||
async fn recover_candidate(&mut self, block_hash: Block::Hash) {
|
||||
let pending_candidate = match self.pending_candidates.remove(&block_hash) {
|
||||
Some(pending_candidate) => pending_candidate,
|
||||
None => return,
|
||||
};
|
||||
|
||||
self.active_candidate_recovery
|
||||
.recover_candidate(block_hash, pending_candidate)
|
||||
.await;
|
||||
}
|
||||
|
||||
/// Clear `waiting_for_parent` from the given `hash` and do this recursively for all child
|
||||
/// blocks.
|
||||
fn clear_waiting_for_parent(&mut self, hash: Block::Hash) {
|
||||
let mut blocks_to_delete = vec![hash];
|
||||
|
||||
while let Some(delete) = blocks_to_delete.pop() {
|
||||
if let Some(childs) = self.waiting_for_parent.remove(&delete) {
|
||||
blocks_to_delete.extend(childs.iter().map(BlockT::hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a recovered candidate.
|
||||
async fn handle_candidate_recovered(
|
||||
&mut self,
|
||||
block_hash: Block::Hash,
|
||||
available_data: Option<AvailableData>,
|
||||
) {
|
||||
let available_data = match available_data {
|
||||
Some(data) => data,
|
||||
None => {
|
||||
self.clear_waiting_for_parent(block_hash);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let raw_block_data = match sp_maybe_compressed_blob::decompress(
|
||||
&available_data.pov.block_data.0,
|
||||
POV_BOMB_LIMIT,
|
||||
) {
|
||||
Ok(r) => r,
|
||||
Err(error) => {
|
||||
tracing::debug!(target: LOG_TARGET, ?error, "Failed to decompress PoV");
|
||||
|
||||
self.clear_waiting_for_parent(block_hash);
|
||||
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let block_data = match ParachainBlockData::<Block>::decode(&mut &raw_block_data[..]) {
|
||||
Ok(d) => d,
|
||||
Err(error) => {
|
||||
tracing::warn!(
|
||||
target: LOG_TARGET,
|
||||
?error,
|
||||
"Failed to decode parachain block data from recovered PoV",
|
||||
);
|
||||
|
||||
self.clear_waiting_for_parent(block_hash);
|
||||
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let block = block_data.into_block();
|
||||
|
||||
let parent = *block.header().parent_hash();
|
||||
|
||||
match self.parachain_client.block_status(&BlockId::hash(parent)) {
|
||||
Ok(BlockStatus::Unknown) => {
|
||||
if self.active_candidate_recovery.is_being_recovered(&parent) {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
?block_hash,
|
||||
parent_hash = ?parent,
|
||||
"Parent is still being recovered, waiting.",
|
||||
);
|
||||
|
||||
self.waiting_for_parent
|
||||
.entry(parent)
|
||||
.or_default()
|
||||
.push(block);
|
||||
return;
|
||||
} else {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
?block_hash,
|
||||
parent_hash = ?parent,
|
||||
"Parent not found while trying to import recovered block.",
|
||||
);
|
||||
|
||||
self.clear_waiting_for_parent(block_hash);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
Err(error) => {
|
||||
tracing::debug!(
|
||||
target: "cumulus-consensus",
|
||||
block_hash = ?parent,
|
||||
?error,
|
||||
"Error while checking block status",
|
||||
);
|
||||
|
||||
self.clear_waiting_for_parent(block_hash);
|
||||
|
||||
return;
|
||||
}
|
||||
// Any other status is fine to "ignore/accept"
|
||||
_ => (),
|
||||
}
|
||||
|
||||
self.import_block(block).await;
|
||||
}
|
||||
|
||||
/// Import the given `block`.
|
||||
///
|
||||
/// This will also recursivley drain `waiting_for_parent` and import them as well.
|
||||
async fn import_block(&mut self, block: Block) {
|
||||
let mut blocks = VecDeque::new();
|
||||
blocks.push_back(block);
|
||||
|
||||
let mut incoming_blocks = Vec::new();
|
||||
|
||||
while let Some(block) = blocks.pop_front() {
|
||||
let block_hash = block.hash();
|
||||
let (header, body) = block.deconstruct();
|
||||
|
||||
incoming_blocks.push(IncomingBlock {
|
||||
hash: block_hash,
|
||||
header: Some(header),
|
||||
body: Some(body),
|
||||
import_existing: false,
|
||||
allow_missing_state: false,
|
||||
justifications: None,
|
||||
origin: None,
|
||||
});
|
||||
|
||||
if let Some(waiting) = self.waiting_for_parent.remove(&block_hash) {
|
||||
blocks.extend(waiting);
|
||||
}
|
||||
}
|
||||
|
||||
self.parachain_import_queue
|
||||
.import_blocks(BlockOrigin::ConsensusBroadcast, incoming_blocks);
|
||||
}
|
||||
|
||||
/// Run the pov-recovery.
|
||||
pub async fn run(mut self) {
|
||||
let mut imported_blocks = self.parachain_client.import_notification_stream().fuse();
|
||||
let mut finalized_blocks = self.parachain_client.finality_notification_stream().fuse();
|
||||
let pending_candidates =
|
||||
pending_candidates(self.relay_chain_client.clone(), self.para_id).fuse();
|
||||
futures::pin_mut!(pending_candidates);
|
||||
|
||||
loop {
|
||||
select! {
|
||||
pending_candidate = pending_candidates.next() => {
|
||||
if let Some((receipt, session_index)) = pending_candidate {
|
||||
self.handle_pending_candidate(receipt, session_index);
|
||||
} else {
|
||||
tracing::debug!(
|
||||
target: LOG_TARGET,
|
||||
"Pending candidates stream ended",
|
||||
);
|
||||
return;
|
||||
}
|
||||
},
|
||||
imported = imported_blocks.next() => {
|
||||
if let Some(imported) = imported {
|
||||
self.handle_block_imported(&imported.hash);
|
||||
} else {
|
||||
tracing::debug!(
|
||||
target: LOG_TARGET,
|
||||
"Imported blocks stream ended",
|
||||
);
|
||||
return;
|
||||
}
|
||||
},
|
||||
finalized = finalized_blocks.next() => {
|
||||
if let Some(finalized) = finalized {
|
||||
self.handle_block_finalized(*finalized.header.number());
|
||||
} else {
|
||||
tracing::debug!(
|
||||
target: LOG_TARGET,
|
||||
"Finalized blocks stream ended",
|
||||
);
|
||||
return;
|
||||
}
|
||||
},
|
||||
next_to_recover = self.next_candidate_to_recover.next() => {
|
||||
if let Some(block_hash) = next_to_recover {
|
||||
self.recover_candidate(block_hash).await;
|
||||
}
|
||||
},
|
||||
(block_hash, available_data) =
|
||||
self.active_candidate_recovery.wait_for_recovery().fuse() =>
|
||||
{
|
||||
self.handle_candidate_recovered(block_hash, available_data).await;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a stream over pending candidates for the parachain corresponding to `para_id`.
|
||||
fn pending_candidates<RC>(
|
||||
relay_chain_client: Arc<RC>,
|
||||
para_id: ParaId,
|
||||
) -> impl Stream<Item = (CommittedCandidateReceipt, SessionIndex)>
|
||||
where
|
||||
RC: ProvideRuntimeApi<PBlock> + BlockchainEvents<PBlock>,
|
||||
RC::Api: ParachainHost<PBlock>,
|
||||
{
|
||||
relay_chain_client
|
||||
.import_notification_stream()
|
||||
.filter_map(move |n| {
|
||||
let runtime_api = relay_chain_client.runtime_api();
|
||||
let res = runtime_api
|
||||
.candidate_pending_availability(&BlockId::hash(n.hash), para_id)
|
||||
.and_then(|pa| {
|
||||
runtime_api
|
||||
.session_index_for_child(&BlockId::hash(n.hash))
|
||||
.map(|v| pa.map(|pa| (pa, v)))
|
||||
})
|
||||
.map_err(|e| {
|
||||
tracing::error!(
|
||||
target: LOG_TARGET,
|
||||
error = ?e,
|
||||
"Failed fetch pending candidates.",
|
||||
)
|
||||
})
|
||||
.ok()
|
||||
.flatten();
|
||||
|
||||
async move { res }
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
// Copyright 2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use cumulus_primitives_core::ParaId;
|
||||
use cumulus_test_service::{initial_head_data, Keyring::*};
|
||||
use futures::join;
|
||||
use sc_service::TaskExecutor;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Tests the PoV recovery.
|
||||
///
|
||||
/// If there is a block of the parachain included/backed by the relay chain that isn't circulated in
|
||||
/// the parachain network, we need to recover the PoV from the relay chain. Using this PoV we can
|
||||
/// recover the block, import it and share it with the other nodes of the parachain network.
|
||||
#[substrate_test_utils::test]
|
||||
async fn pov_recovery(task_executor: TaskExecutor) {
|
||||
let mut builder = sc_cli::LoggerBuilder::new("");
|
||||
builder.with_colors(false);
|
||||
let _ = builder.init();
|
||||
|
||||
let para_id = ParaId::from(100);
|
||||
|
||||
// Start alice
|
||||
let alice = cumulus_test_service::run_relay_chain_validator_node(
|
||||
task_executor.clone(),
|
||||
Alice,
|
||||
|| {},
|
||||
vec![],
|
||||
);
|
||||
|
||||
// Start bob
|
||||
let bob = cumulus_test_service::run_relay_chain_validator_node(
|
||||
task_executor.clone(),
|
||||
Bob,
|
||||
|| {},
|
||||
vec![alice.addr.clone()],
|
||||
);
|
||||
|
||||
// Register parachain
|
||||
alice
|
||||
.register_parachain(
|
||||
para_id,
|
||||
cumulus_test_service::runtime::WASM_BINARY
|
||||
.expect("You need to build the WASM binary to run this test!")
|
||||
.to_vec(),
|
||||
initial_head_data(para_id),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Run charlie as parachain collator
|
||||
let charlie =
|
||||
cumulus_test_service::TestNodeBuilder::new(para_id, task_executor.clone(), Charlie)
|
||||
.enable_collator()
|
||||
.connect_to_relay_chain_nodes(vec![&alice, &bob])
|
||||
.wrap_announce_block(|_| {
|
||||
// Never announce any block
|
||||
Arc::new(|_, _| {})
|
||||
})
|
||||
.build()
|
||||
.await;
|
||||
|
||||
// Run dave as parachain full node
|
||||
//
|
||||
// It will need to recover the pov blocks through availability recovery.
|
||||
let dave = cumulus_test_service::TestNodeBuilder::new(para_id, task_executor, Dave)
|
||||
.enable_collator()
|
||||
.use_null_consensus()
|
||||
.connect_to_parachain_node(&charlie)
|
||||
.connect_to_relay_chain_nodes(vec![&alice, &bob])
|
||||
.build()
|
||||
.await;
|
||||
|
||||
dave.wait_for_blocks(7).await;
|
||||
|
||||
join!(
|
||||
alice.task_manager.clean_shutdown(),
|
||||
bob.task_manager.clean_shutdown(),
|
||||
charlie.task_manager.clean_shutdown(),
|
||||
dave.task_manager.clean_shutdown(),
|
||||
);
|
||||
}
|
||||
@@ -8,6 +8,7 @@ edition = "2018"
|
||||
# Cumulus dependencies
|
||||
cumulus-client-consensus-common = { path = "../consensus/common" }
|
||||
cumulus-client-collator = { path = "../collator" }
|
||||
cumulus-client-pov-recovery = { path = "../pov-recovery" }
|
||||
cumulus-primitives-core = { path = "../../primitives/core" }
|
||||
|
||||
# Substrate dependencies
|
||||
@@ -16,6 +17,7 @@ sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "mas
|
||||
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
@@ -25,8 +27,9 @@ sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "mas
|
||||
# Polkadot dependencies
|
||||
polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
polkadot-overseer = { git = "https://github.com/paritytech/polkadot", branch = "master" }
|
||||
|
||||
# Other deps
|
||||
futures = "0.3.6"
|
||||
tracing = "0.1.22"
|
||||
codec = { package = "parity-scale-codec", version = "2.0.0" }
|
||||
parking_lot = "0.10.2"
|
||||
|
||||
+118
-26
@@ -20,19 +20,25 @@
|
||||
|
||||
use cumulus_client_consensus_common::ParachainConsensus;
|
||||
use cumulus_primitives_core::{CollectCollationInfo, ParaId};
|
||||
use futures::FutureExt;
|
||||
use polkadot_overseer::OverseerHandler;
|
||||
use polkadot_primitives::v1::{Block as PBlock, CollatorPair};
|
||||
use polkadot_service::{AbstractClient, Client as PClient, ClientHandle, RuntimeApiCollection};
|
||||
use sc_client_api::{
|
||||
Backend as BackendT, BlockBackend, BlockchainEvents, Finalizer, UsageProvider,
|
||||
};
|
||||
use sc_service::{error::Result as ServiceResult, Configuration, Role, TaskManager};
|
||||
use sc_service::{Configuration, Role, TaskManager};
|
||||
use sc_telemetry::TelemetryWorkerHandle;
|
||||
use sp_api::ProvideRuntimeApi;
|
||||
use sp_blockchain::HeaderBackend;
|
||||
use sp_consensus::BlockImport;
|
||||
use sp_consensus::{
|
||||
import_queue::{ImportQueue, IncomingBlock, Link, Origin},
|
||||
BlockImport, BlockOrigin,
|
||||
};
|
||||
use sp_core::traits::SpawnNamed;
|
||||
use sp_runtime::traits::{BlakeTwo256, Block as BlockT};
|
||||
use sp_runtime::{
|
||||
traits::{BlakeTwo256, Block as BlockT, NumberFor},
|
||||
Justifications,
|
||||
};
|
||||
use std::{marker::PhantomData, sync::Arc};
|
||||
|
||||
pub mod genesis;
|
||||
@@ -41,7 +47,7 @@ pub mod genesis;
|
||||
type RFullNode<C> = polkadot_service::NewFull<C>;
|
||||
|
||||
/// Parameters given to [`start_collator`].
|
||||
pub struct StartCollatorParams<'a, Block: BlockT, BS, Client, Spawner, RClient> {
|
||||
pub struct StartCollatorParams<'a, Block: BlockT, BS, Client, Spawner, RClient, IQ> {
|
||||
pub block_status: Arc<BS>,
|
||||
pub client: Arc<Client>,
|
||||
pub announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
|
||||
@@ -51,6 +57,7 @@ pub struct StartCollatorParams<'a, Block: BlockT, BS, Client, Spawner, RClient>
|
||||
pub relay_chain_full_node: RFullNode<RClient>,
|
||||
pub task_manager: &'a mut TaskManager,
|
||||
pub parachain_consensus: Box<dyn ParachainConsensus<Block>>,
|
||||
pub import_queue: IQ,
|
||||
}
|
||||
|
||||
/// Start a collator node for a parachain.
|
||||
@@ -58,7 +65,7 @@ pub struct StartCollatorParams<'a, Block: BlockT, BS, Client, Spawner, RClient>
|
||||
/// A collator is similar to a validator in a normal blockchain.
|
||||
/// It is responsible for producing blocks and sending the blocks to a
|
||||
/// parachain validator for validation and inclusion into the relay chain.
|
||||
pub async fn start_collator<'a, Block, BS, Client, Backend, Spawner, RClient>(
|
||||
pub async fn start_collator<'a, Block, BS, Client, Backend, Spawner, RClient, IQ>(
|
||||
StartCollatorParams {
|
||||
block_status,
|
||||
client,
|
||||
@@ -69,7 +76,8 @@ pub async fn start_collator<'a, Block, BS, Client, Backend, Spawner, RClient>(
|
||||
task_manager,
|
||||
relay_chain_full_node,
|
||||
parachain_consensus,
|
||||
}: StartCollatorParams<'a, Block, BS, Client, Spawner, RClient>,
|
||||
import_queue,
|
||||
}: StartCollatorParams<'a, Block, BS, Client, Spawner, RClient, IQ>,
|
||||
) -> sc_service::error::Result<()>
|
||||
where
|
||||
Block: BlockT,
|
||||
@@ -88,6 +96,7 @@ where
|
||||
Spawner: SpawnNamed + Clone + Send + Sync + 'static,
|
||||
RClient: ClientHandle,
|
||||
Backend: BackendT<Block> + 'static,
|
||||
IQ: ImportQueue<Block> + 'static,
|
||||
{
|
||||
relay_chain_full_node.client.execute_with(StartConsensus {
|
||||
para_id,
|
||||
@@ -95,6 +104,18 @@ where
|
||||
client: client.clone(),
|
||||
task_manager,
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
|
||||
relay_chain_full_node.client.execute_with(StartPoVRecovery {
|
||||
para_id,
|
||||
client: client.clone(),
|
||||
import_queue,
|
||||
task_manager,
|
||||
overseer_handler: relay_chain_full_node
|
||||
.overseer_handler
|
||||
.clone()
|
||||
.ok_or_else(|| "Polkadot full node did not provided an `OverseerHandler`!")?,
|
||||
_phantom: PhantomData,
|
||||
})?;
|
||||
|
||||
cumulus_client_collator::start_collator(cumulus_client_collator::StartCollatorParams {
|
||||
@@ -120,7 +141,7 @@ where
|
||||
pub struct StartFullNodeParams<'a, Block: BlockT, Client, PClient> {
|
||||
pub para_id: ParaId,
|
||||
pub client: Arc<Client>,
|
||||
pub polkadot_full_node: RFullNode<PClient>,
|
||||
pub relay_chain_full_node: RFullNode<PClient>,
|
||||
pub task_manager: &'a mut TaskManager,
|
||||
pub announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
|
||||
}
|
||||
@@ -134,7 +155,7 @@ pub fn start_full_node<Block, Client, Backend, PClient>(
|
||||
client,
|
||||
announce_block,
|
||||
task_manager,
|
||||
polkadot_full_node,
|
||||
relay_chain_full_node,
|
||||
para_id,
|
||||
}: StartFullNodeParams<Block, Client, PClient>,
|
||||
) -> sc_service::error::Result<()>
|
||||
@@ -151,15 +172,15 @@ where
|
||||
Backend: BackendT<Block> + 'static,
|
||||
PClient: ClientHandle,
|
||||
{
|
||||
polkadot_full_node.client.execute_with(StartConsensus {
|
||||
relay_chain_full_node.client.execute_with(StartConsensus {
|
||||
announce_block,
|
||||
para_id,
|
||||
client,
|
||||
task_manager,
|
||||
_phantom: PhantomData,
|
||||
})?;
|
||||
});
|
||||
|
||||
task_manager.add_child(polkadot_full_node.task_manager);
|
||||
task_manager.add_child(relay_chain_full_node.task_manager);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -186,7 +207,7 @@ where
|
||||
for<'b> &'b Client: BlockImport<Block>,
|
||||
Backend: BackendT<Block> + 'static,
|
||||
{
|
||||
type Output = ServiceResult<()>;
|
||||
type Output = ();
|
||||
|
||||
fn execute_with_client<PClient, Api, PBackend>(self, client: Arc<PClient>) -> Self::Output
|
||||
where
|
||||
@@ -198,24 +219,60 @@ where
|
||||
{
|
||||
let consensus = cumulus_client_consensus_common::run_parachain_consensus(
|
||||
self.para_id,
|
||||
self.client,
|
||||
client,
|
||||
self.client.clone(),
|
||||
client.clone(),
|
||||
self.announce_block,
|
||||
);
|
||||
|
||||
self.task_manager.spawn_essential_handle().spawn(
|
||||
"cumulus-consensus",
|
||||
consensus.then(|r| async move {
|
||||
if let Err(e) = r {
|
||||
tracing::error!(
|
||||
target: "cumulus-service",
|
||||
error = %e,
|
||||
"Parachain consensus failed.",
|
||||
)
|
||||
}
|
||||
}),
|
||||
self.task_manager
|
||||
.spawn_essential_handle()
|
||||
.spawn("cumulus-consensus", consensus);
|
||||
}
|
||||
}
|
||||
|
||||
struct StartPoVRecovery<'a, Block: BlockT, Client, IQ> {
|
||||
para_id: ParaId,
|
||||
client: Arc<Client>,
|
||||
task_manager: &'a mut TaskManager,
|
||||
overseer_handler: OverseerHandler,
|
||||
import_queue: IQ,
|
||||
_phantom: PhantomData<Block>,
|
||||
}
|
||||
|
||||
impl<'a, Block, Client, IQ> polkadot_service::ExecuteWithClient for StartPoVRecovery<'a, Block, Client, IQ>
|
||||
where
|
||||
Block: BlockT,
|
||||
Client: UsageProvider<Block>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ BlockBackend<Block>
|
||||
+ BlockchainEvents<Block>
|
||||
+ 'static,
|
||||
IQ: ImportQueue<Block> + 'static,
|
||||
{
|
||||
type Output = sc_service::error::Result<()>;
|
||||
|
||||
fn execute_with_client<PClient, Api, PBackend>(self, client: Arc<PClient>) -> Self::Output
|
||||
where
|
||||
<Api as sp_api::ApiExt<PBlock>>::StateBackend: sp_api::StateBackend<BlakeTwo256>,
|
||||
PBackend: sc_client_api::Backend<PBlock>,
|
||||
PBackend::State: sp_api::StateBackend<BlakeTwo256>,
|
||||
Api: RuntimeApiCollection<StateBackend = PBackend::State>,
|
||||
PClient: AbstractClient<PBlock, PBackend, Api = Api> + 'static,
|
||||
{
|
||||
let pov_recovery = cumulus_client_pov_recovery::PoVRecovery::new(
|
||||
self.overseer_handler,
|
||||
sc_consensus_babe::Config::get_or_compute(&*client)?.slot_duration(),
|
||||
self.client,
|
||||
self.import_queue,
|
||||
client,
|
||||
self.para_id,
|
||||
);
|
||||
|
||||
self.task_manager
|
||||
.spawn_essential_handle()
|
||||
.spawn("cumulus-pov-recovery", pov_recovery.run());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -253,3 +310,38 @@ pub fn build_polkadot_full_node(
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// A shared import queue
|
||||
///
|
||||
/// This is basically a hack until the Substrate side is implemented properly.
|
||||
#[derive(Clone)]
|
||||
pub struct SharedImportQueue<Block: BlockT>(Arc<parking_lot::Mutex<dyn ImportQueue<Block>>>);
|
||||
|
||||
impl<Block: BlockT> SharedImportQueue<Block> {
|
||||
/// Create a new instance of the shared import queue.
|
||||
pub fn new<IQ: ImportQueue<Block> + 'static>(import_queue: IQ) -> Self {
|
||||
Self(Arc::new(parking_lot::Mutex::new(import_queue)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> ImportQueue<Block> for SharedImportQueue<Block> {
|
||||
fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec<IncomingBlock<Block>>) {
|
||||
self.0.lock().import_blocks(origin, blocks)
|
||||
}
|
||||
|
||||
fn import_justifications(
|
||||
&mut self,
|
||||
who: Origin,
|
||||
hash: Block::Hash,
|
||||
number: NumberFor<Block>,
|
||||
justifications: Justifications,
|
||||
) {
|
||||
self.0
|
||||
.lock()
|
||||
.import_justifications(who, hash, number, justifications)
|
||||
}
|
||||
|
||||
fn poll_actions(&mut self, cx: &mut std::task::Context, link: &mut dyn Link<Block>) {
|
||||
self.0.lock().poll_actions(cx, link)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ derive_more = "0.15.0"
|
||||
exit-future = "0.1.4"
|
||||
futures = { version = "0.3.1", features = ["compat"] }
|
||||
log = "0.4.8"
|
||||
parking_lot = "0.9.0"
|
||||
parking_lot = "0.10.2"
|
||||
trie-root = "0.15.2"
|
||||
codec = { package = "parity-scale-codec", version = "2.0.0" }
|
||||
structopt = "0.3.3"
|
||||
|
||||
@@ -243,14 +243,14 @@ where
|
||||
let prometheus_registry = parachain_config.prometheus_registry().cloned();
|
||||
let transaction_pool = params.transaction_pool.clone();
|
||||
let mut task_manager = params.task_manager;
|
||||
let import_queue = params.import_queue;
|
||||
let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue);
|
||||
let (network, network_status_sinks, system_rpc_tx, start_network) =
|
||||
sc_service::build_network(sc_service::BuildNetworkParams {
|
||||
config: ¶chain_config,
|
||||
client: client.clone(),
|
||||
transaction_pool: transaction_pool.clone(),
|
||||
spawn_handle: task_manager.spawn_handle(),
|
||||
import_queue,
|
||||
import_queue: import_queue.clone(),
|
||||
on_demand: None,
|
||||
block_announce_validator_builder: Some(Box::new(|_| block_announce_validator)),
|
||||
})?;
|
||||
@@ -304,6 +304,7 @@ where
|
||||
relay_chain_full_node,
|
||||
spawner,
|
||||
parachain_consensus,
|
||||
import_queue,
|
||||
};
|
||||
|
||||
start_collator(params).await?;
|
||||
@@ -313,7 +314,7 @@ where
|
||||
announce_block,
|
||||
task_manager: &mut task_manager,
|
||||
para_id: id,
|
||||
polkadot_full_node: relay_chain_full_node,
|
||||
relay_chain_full_node,
|
||||
};
|
||||
|
||||
start_full_node(params)?;
|
||||
|
||||
@@ -215,6 +215,11 @@ impl<B: BlockT> ParachainBlockData<B> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert `self` into the stored block.
|
||||
pub fn into_block(self) -> B {
|
||||
B::new(self.header, self.extrinsics)
|
||||
}
|
||||
|
||||
/// Convert `self` into the stored header.
|
||||
pub fn into_header(self) -> B::Header {
|
||||
self.header
|
||||
|
||||
@@ -47,7 +47,8 @@ pub use local_executor::LocalExecutor;
|
||||
pub type Backend = substrate_test_client::Backend<Block>;
|
||||
|
||||
/// Test client executor.
|
||||
pub type Executor = client::LocalCallExecutor<Backend, sc_executor::NativeExecutor<LocalExecutor>>;
|
||||
pub type Executor =
|
||||
client::LocalCallExecutor<Block, Backend, sc_executor::NativeExecutor<LocalExecutor>>;
|
||||
|
||||
/// Test client builder for Cumulus
|
||||
pub type TestClientBuilder =
|
||||
|
||||
@@ -8,6 +8,7 @@ edition = "2018"
|
||||
codec = { package = "parity-scale-codec", version = "2.0.0" }
|
||||
rand = "0.7.3"
|
||||
serde = { version = "1.0.101", features = ["derive"] }
|
||||
async-trait = "0.1.42"
|
||||
|
||||
# Substrate
|
||||
frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
|
||||
@@ -49,6 +50,7 @@ polkadot-overseer = { git = "https://github.com/paritytech/polkadot", branch = "
|
||||
cumulus-client-consensus-relay-chain = { path = "../../client/consensus/relay-chain" }
|
||||
cumulus-client-network = { path = "../../client/network" }
|
||||
cumulus-client-service = { path = "../../client/service" }
|
||||
cumulus-client-consensus-common = { path = "../../client/consensus/common" }
|
||||
cumulus-primitives-core = { path = "../../primitives/core" }
|
||||
cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" }
|
||||
cumulus-test-runtime = { path = "../runtime" }
|
||||
|
||||
+118
-56
@@ -22,13 +22,14 @@ mod chain_spec;
|
||||
mod genesis;
|
||||
|
||||
use core::future::Future;
|
||||
use cumulus_client_consensus_common::{ParachainCandidate, ParachainConsensus};
|
||||
use cumulus_client_network::BlockAnnounceValidator;
|
||||
use cumulus_client_service::{
|
||||
prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams,
|
||||
};
|
||||
use cumulus_primitives_core::ParaId;
|
||||
use cumulus_test_runtime::{NodeBlock as Block, RuntimeApi};
|
||||
use polkadot_primitives::v1::CollatorPair;
|
||||
use cumulus_test_runtime::{Hash, Header, NodeBlock as Block, RuntimeApi};
|
||||
use polkadot_primitives::v1::{CollatorPair, Hash as PHash, PersistedValidationData};
|
||||
use sc_client_api::execution_extensions::ExecutionStrategies;
|
||||
use sc_executor::native_executor_instance;
|
||||
pub use sc_executor::NativeExecutor;
|
||||
@@ -45,11 +46,7 @@ use sp_arithmetic::traits::SaturatedConversion;
|
||||
use sp_blockchain::HeaderBackend;
|
||||
use sp_core::{Pair, H256};
|
||||
use sp_keyring::Sr25519Keyring;
|
||||
use sp_runtime::{
|
||||
codec::Encode,
|
||||
generic,
|
||||
traits::BlakeTwo256
|
||||
};
|
||||
use sp_runtime::{codec::Encode, generic, traits::BlakeTwo256};
|
||||
use sp_state_machine::BasicExternalities;
|
||||
use sp_trie::PrefixedMemoryDB;
|
||||
use std::sync::Arc;
|
||||
@@ -62,6 +59,25 @@ pub use cumulus_test_runtime as runtime;
|
||||
pub use genesis::*;
|
||||
pub use sp_keyring::Sr25519Keyring as Keyring;
|
||||
|
||||
/// A consensus that will never produce any block.
|
||||
#[derive(Clone)]
|
||||
struct NullConsensus;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl ParachainConsensus<Block> for NullConsensus {
|
||||
async fn produce_candidate(
|
||||
&mut self,
|
||||
_: &Header,
|
||||
_: PHash,
|
||||
_: &PersistedValidationData,
|
||||
) -> Option<ParachainCandidate<Block>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// The signature of the announce block fn.
|
||||
pub type AnnounceBlockFn = Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>;
|
||||
|
||||
// Native executor instance.
|
||||
native_executor_instance!(
|
||||
pub RuntimeExecutor,
|
||||
@@ -134,7 +150,9 @@ async fn start_node_impl<RB>(
|
||||
collator_key: Option<CollatorPair>,
|
||||
relay_chain_config: Configuration,
|
||||
para_id: ParaId,
|
||||
wrap_announce_block: Option<Box<dyn FnOnce(AnnounceBlockFn) -> AnnounceBlockFn>>,
|
||||
rpc_ext_builder: RB,
|
||||
consensus: Consensus,
|
||||
) -> sc_service::error::Result<(
|
||||
TaskManager,
|
||||
Arc<TFullClient<Block, RuntimeApi, RuntimeExecutor>>,
|
||||
@@ -185,14 +203,14 @@ where
|
||||
let block_announce_validator_builder = move |_| Box::new(block_announce_validator) as Box<_>;
|
||||
|
||||
let prometheus_registry = parachain_config.prometheus_registry().cloned();
|
||||
let import_queue = params.import_queue;
|
||||
let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue);
|
||||
let (network, network_status_sinks, system_rpc_tx, start_network) =
|
||||
sc_service::build_network(sc_service::BuildNetworkParams {
|
||||
config: ¶chain_config,
|
||||
client: client.clone(),
|
||||
transaction_pool: transaction_pool.clone(),
|
||||
spawn_handle: task_manager.spawn_handle(),
|
||||
import_queue,
|
||||
import_queue: import_queue.clone(),
|
||||
on_demand: None,
|
||||
block_announce_validator_builder: Some(Box::new(block_announce_validator_builder)),
|
||||
})?;
|
||||
@@ -224,43 +242,57 @@ where
|
||||
Arc::new(move |hash, data| network.announce_block(hash, data))
|
||||
};
|
||||
|
||||
let announce_block = wrap_announce_block
|
||||
.map(|w| (w)(announce_block.clone()))
|
||||
.unwrap_or_else(|| announce_block);
|
||||
|
||||
if let Some(collator_key) = collator_key {
|
||||
let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
transaction_pool,
|
||||
prometheus_registry.as_ref(),
|
||||
None,
|
||||
);
|
||||
let parachain_consensus: Box<dyn ParachainConsensus<Block>> = match consensus {
|
||||
Consensus::RelayChain => {
|
||||
let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
transaction_pool,
|
||||
prometheus_registry.as_ref(),
|
||||
None,
|
||||
);
|
||||
|
||||
let relay_chain_client = relay_chain_full_node.client.clone();
|
||||
let relay_chain_backend = relay_chain_full_node.backend.clone();
|
||||
let relay_chain_client = relay_chain_full_node.client.clone();
|
||||
let relay_chain_backend = relay_chain_full_node.backend.clone();
|
||||
|
||||
let parachain_consensus = cumulus_client_consensus_relay_chain::RelayChainConsensus::new(
|
||||
para_id,
|
||||
proposer_factory,
|
||||
move |_, (relay_parent, validation_data)| {
|
||||
let parachain_inherent =
|
||||
cumulus_primitives_parachain_inherent::ParachainInherentData::create_at(
|
||||
relay_parent,
|
||||
&*relay_chain_client,
|
||||
&*relay_chain_backend,
|
||||
&validation_data,
|
||||
Box::new(
|
||||
cumulus_client_consensus_relay_chain::RelayChainConsensus::new(
|
||||
para_id,
|
||||
);
|
||||
async move {
|
||||
let time = sp_timestamp::InherentDataProvider::from_system_time();
|
||||
proposer_factory,
|
||||
move |_, (relay_parent, validation_data)| {
|
||||
let parachain_inherent =
|
||||
cumulus_primitives_parachain_inherent::ParachainInherentData::create_at(
|
||||
relay_parent,
|
||||
&*relay_chain_client,
|
||||
&*relay_chain_backend,
|
||||
&validation_data,
|
||||
para_id,
|
||||
);
|
||||
|
||||
let parachain_inherent = parachain_inherent.ok_or_else(|| {
|
||||
Box::<dyn std::error::Error + Send + Sync>::from(String::from("error"))
|
||||
})?;
|
||||
Ok((time, parachain_inherent))
|
||||
}
|
||||
},
|
||||
client.clone(),
|
||||
relay_chain_full_node.client.clone(),
|
||||
relay_chain_full_node.backend.clone(),
|
||||
);
|
||||
async move {
|
||||
let time = sp_timestamp::InherentDataProvider::from_system_time();
|
||||
|
||||
let parachain_inherent = parachain_inherent.ok_or_else(|| {
|
||||
Box::<dyn std::error::Error + Send + Sync>::from(String::from(
|
||||
"error",
|
||||
))
|
||||
})?;
|
||||
Ok((time, parachain_inherent))
|
||||
}
|
||||
},
|
||||
client.clone(),
|
||||
relay_chain_full_node.client.clone(),
|
||||
relay_chain_full_node.backend.clone(),
|
||||
),
|
||||
)
|
||||
}
|
||||
Consensus::Null => Box::new(NullConsensus),
|
||||
};
|
||||
|
||||
let relay_chain_full_node =
|
||||
relay_chain_full_node.with_client(polkadot_test_service::TestClient);
|
||||
@@ -273,8 +305,9 @@ where
|
||||
task_manager: &mut task_manager,
|
||||
para_id,
|
||||
collator_key,
|
||||
parachain_consensus: Box::new(parachain_consensus),
|
||||
parachain_consensus,
|
||||
relay_chain_full_node,
|
||||
import_queue,
|
||||
};
|
||||
|
||||
start_collator(params).await?;
|
||||
@@ -287,7 +320,7 @@ where
|
||||
announce_block,
|
||||
task_manager: &mut task_manager,
|
||||
para_id,
|
||||
polkadot_full_node: relay_chain_full_node,
|
||||
relay_chain_full_node,
|
||||
};
|
||||
|
||||
start_full_node(params)?;
|
||||
@@ -313,6 +346,12 @@ pub struct TestNode {
|
||||
pub rpc_handlers: RpcHandlers,
|
||||
}
|
||||
|
||||
enum Consensus {
|
||||
/// Use the relay-chain provided consensus.
|
||||
RelayChain,
|
||||
/// Use the null consensus that will never produce any block.
|
||||
Null,
|
||||
}
|
||||
|
||||
/// A builder to create a [`TestNode`].
|
||||
pub struct TestNodeBuilder {
|
||||
@@ -323,8 +362,10 @@ pub struct TestNodeBuilder {
|
||||
parachain_nodes: Vec<MultiaddrWithPeerId>,
|
||||
parachain_nodes_exclusive: bool,
|
||||
relay_chain_nodes: Vec<MultiaddrWithPeerId>,
|
||||
wrap_announce_block: Option<Box<dyn FnOnce(AnnounceBlockFn) -> AnnounceBlockFn>>,
|
||||
storage_update_func_parachain: Option<Box<dyn Fn()>>,
|
||||
storage_update_func_relay_chain: Option<Box<dyn Fn()>>,
|
||||
consensus: Consensus,
|
||||
}
|
||||
|
||||
impl TestNodeBuilder {
|
||||
@@ -342,8 +383,10 @@ impl TestNodeBuilder {
|
||||
parachain_nodes: Vec::new(),
|
||||
parachain_nodes_exclusive: false,
|
||||
relay_chain_nodes: Vec::new(),
|
||||
wrap_announce_block: None,
|
||||
storage_update_func_parachain: None,
|
||||
storage_update_func_relay_chain: None,
|
||||
consensus: Consensus::RelayChain,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -404,32 +447,43 @@ impl TestNodeBuilder {
|
||||
mut self,
|
||||
nodes: impl IntoIterator<Item = &'a polkadot_test_service::PolkadotTestNode>,
|
||||
) -> Self {
|
||||
self.relay_chain_nodes.extend(nodes.into_iter().map(|n| n.addr.clone()));
|
||||
self.relay_chain_nodes
|
||||
.extend(nodes.into_iter().map(|n| n.addr.clone()));
|
||||
self
|
||||
}
|
||||
|
||||
/// Wrap the announce block function of this node.
|
||||
pub fn wrap_announce_block(
|
||||
mut self,
|
||||
wrap: impl FnOnce(AnnounceBlockFn) -> AnnounceBlockFn + 'static,
|
||||
) -> Self {
|
||||
self.wrap_announce_block = Some(Box::new(wrap));
|
||||
self
|
||||
}
|
||||
|
||||
/// Allows accessing the parachain storage before the test node is built.
|
||||
pub fn update_storage_parachain(
|
||||
mut self,
|
||||
updater: impl Fn() + 'static,
|
||||
) -> Self {
|
||||
pub fn update_storage_parachain(mut self, updater: impl Fn() + 'static) -> Self {
|
||||
self.storage_update_func_parachain = Some(Box::new(updater));
|
||||
self
|
||||
}
|
||||
|
||||
/// Allows accessing the relay chain storage before the test node is built.
|
||||
pub fn update_storage_relay_chain(
|
||||
mut self,
|
||||
updater: impl Fn() + 'static,
|
||||
) -> Self {
|
||||
pub fn update_storage_relay_chain(mut self, updater: impl Fn() + 'static) -> Self {
|
||||
self.storage_update_func_relay_chain = Some(Box::new(updater));
|
||||
self
|
||||
}
|
||||
|
||||
/// Use the null consensus that will never author any block.
|
||||
pub fn use_null_consensus(mut self) -> Self {
|
||||
self.consensus = Consensus::Null;
|
||||
self
|
||||
}
|
||||
|
||||
/// Build the [`TestNode`].
|
||||
pub async fn build(self) -> TestNode {
|
||||
let parachain_config = node_config(
|
||||
self.storage_update_func_parachain.unwrap_or_else(|| Box::new(|| ())),
|
||||
self.storage_update_func_parachain
|
||||
.unwrap_or_else(|| Box::new(|| ())),
|
||||
self.task_executor.clone(),
|
||||
self.key.clone(),
|
||||
self.parachain_nodes,
|
||||
@@ -439,7 +493,8 @@ impl TestNodeBuilder {
|
||||
)
|
||||
.expect("could not generate Configuration");
|
||||
let mut relay_chain_config = polkadot_test_service::node_config(
|
||||
self.storage_update_func_relay_chain.unwrap_or_else(|| Box::new(|| ())),
|
||||
self.storage_update_func_relay_chain
|
||||
.unwrap_or_else(|| Box::new(|| ())),
|
||||
self.task_executor,
|
||||
self.key,
|
||||
self.relay_chain_nodes,
|
||||
@@ -455,7 +510,9 @@ impl TestNodeBuilder {
|
||||
self.collator_key,
|
||||
relay_chain_config,
|
||||
self.para_id,
|
||||
self.wrap_announce_block,
|
||||
|_| Default::default(),
|
||||
self.consensus,
|
||||
)
|
||||
.await
|
||||
.expect("could not create Cumulus test service");
|
||||
@@ -611,7 +668,9 @@ impl TestNode {
|
||||
self.send_extrinsic(
|
||||
runtime::SudoCall::sudo_unchecked_weight(Box::new(call.into()), 1_000),
|
||||
Sr25519Keyring::Alice,
|
||||
).await.map(drop)
|
||||
)
|
||||
.await
|
||||
.map(drop)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -634,7 +693,10 @@ pub fn construct_extrinsic(
|
||||
let extra: runtime::SignedExtra = (
|
||||
frame_system::CheckSpecVersion::<runtime::Runtime>::new(),
|
||||
frame_system::CheckGenesis::<runtime::Runtime>::new(),
|
||||
frame_system::CheckEra::<runtime::Runtime>::from(generic::Era::mortal(period, current_block)),
|
||||
frame_system::CheckEra::<runtime::Runtime>::from(generic::Era::mortal(
|
||||
period,
|
||||
current_block,
|
||||
)),
|
||||
frame_system::CheckNonce::<runtime::Runtime>::from(nonce),
|
||||
frame_system::CheckWeight::<runtime::Runtime>::new(),
|
||||
pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(tip),
|
||||
|
||||
Reference in New Issue
Block a user