backing: Remove redundant erasure encoding (#7469)

* Remove redundant erasure encoding

Signed-off-by: Andrei Sandu <andrei-mihail@parity.io>

* Review feedback

Signed-off-by: Andrei Sandu <andrei-mihail@parity.io>

* fix comments

Signed-off-by: Andrei Sandu <andrei-mihail@parity.io>

---------

Signed-off-by: Andrei Sandu <andrei-mihail@parity.io>
This commit is contained in:
Andrei Sandu
2023-07-07 11:20:30 +03:00
committed by GitHub
parent 518773a943
commit e8d567a1f5
9 changed files with 213 additions and 62 deletions
+8 -2
View File
@@ -17,7 +17,10 @@
use fatality::Nested;
use futures::channel::{mpsc, oneshot};
use polkadot_node_subsystem::{messages::ValidationFailed, SubsystemError};
use polkadot_node_subsystem::{
messages::{StoreAvailableDataError, ValidationFailed},
SubsystemError,
};
use polkadot_node_subsystem_util::Error as UtilError;
use polkadot_primitives::BackedCandidate;
@@ -50,7 +53,7 @@ pub enum Error {
ValidateFromChainState(#[source] oneshot::Canceled),
#[error("StoreAvailableData channel closed before receipt")]
StoreAvailableData(#[source] oneshot::Canceled),
StoreAvailableDataChannel(#[source] oneshot::Canceled),
#[error("a channel was closed before receipt in try_join!")]
JoinMultiple(#[source] oneshot::Canceled),
@@ -74,6 +77,9 @@ pub enum Error {
#[fatal]
#[error(transparent)]
OverseerExited(SubsystemError),
#[error("Availability store error")]
StoreAvailableData(#[source] StoreAvailableDataError),
}
/// Utility for eating top level errors and log them.
+28 -32
View File
@@ -38,7 +38,7 @@ use polkadot_node_subsystem::{
messages::{
AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage,
CandidateValidationMessage, CollatorProtocolMessage, ProvisionableData, ProvisionerMessage,
RuntimeApiRequest, StatementDistributionMessage,
RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError,
},
overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem,
Stage, SubsystemError,
@@ -490,8 +490,6 @@ impl TableContextTrait for TableContext {
}
}
struct InvalidErasureRoot;
// It looks like it's not possible to do an `impl From` given the current state of
// the code. So this does the necessary conversion.
fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement {
@@ -561,26 +559,34 @@ async fn store_available_data(
n_validators: u32,
candidate_hash: CandidateHash,
available_data: AvailableData,
expected_erasure_root: Hash,
) -> Result<(), Error> {
let (tx, rx) = oneshot::channel();
// Important: the `av-store` subsystem will check if the erasure root of the `available_data` matches `expected_erasure_root`
// which was provided by the collator in the `CandidateReceipt`. This check is consensus critical and the `backing` subsystem
// relies on it for ensuring candidate validity.
sender
.send_message(AvailabilityStoreMessage::StoreAvailableData {
candidate_hash,
n_validators,
available_data,
expected_erasure_root,
tx,
})
.await;
let _ = rx.await.map_err(Error::StoreAvailableData)?;
Ok(())
rx.await
.map_err(Error::StoreAvailableDataChannel)?
.map_err(Error::StoreAvailableData)
}
// Make a `PoV` available.
//
// This will compute the erasure root internally and compare it to the expected erasure root.
// This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`.
// This calls the AV store to write the available data to storage. The AV store also checks the erasure root matches
// the `expected_erasure_root`.
// This returns `Err()` on erasure root mismatch or due to any AV store subsystem error.
//
// Otherwise, it returns either `Ok(())`
async fn make_pov_available(
sender: &mut impl overseer::CandidateBackingSenderTrait,
@@ -590,29 +596,17 @@ async fn make_pov_available(
validation_data: polkadot_primitives::PersistedValidationData,
expected_erasure_root: Hash,
span: Option<&jaeger::Span>,
) -> Result<Result<(), InvalidErasureRoot>, Error> {
let available_data = AvailableData { pov, validation_data };
) -> Result<(), Error> {
let _span = span.as_ref().map(|s| s.child("store-data").with_candidate(candidate_hash));
{
let _span = span.as_ref().map(|s| s.child("erasure-coding").with_candidate(candidate_hash));
let chunks = erasure_coding::obtain_chunks_v1(n_validators, &available_data)?;
let branches = erasure_coding::branches(chunks.as_ref());
let erasure_root = branches.root();
if erasure_root != expected_erasure_root {
return Ok(Err(InvalidErasureRoot))
}
}
{
let _span = span.as_ref().map(|s| s.child("store-data").with_candidate(candidate_hash));
store_available_data(sender, n_validators as u32, candidate_hash, available_data).await?;
}
Ok(Ok(()))
store_available_data(
sender,
n_validators as u32,
candidate_hash,
AvailableData { pov, validation_data },
expected_erasure_root,
)
.await
}
async fn request_pov(
@@ -749,11 +743,11 @@ async fn validate_and_make_available(
candidate.descriptor.erasure_root,
span.as_ref(),
)
.await?;
.await;
match erasure_valid {
Ok(()) => Ok((candidate, commitments, pov.clone())),
Err(InvalidErasureRoot) => {
Err(Error::StoreAvailableData(StoreAvailableDataError::InvalidErasureRoot)) => {
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?candidate.hash(),
@@ -762,6 +756,8 @@ async fn validate_and_make_available(
);
Err(candidate)
},
// Bubble up any other error.
Err(e) => return Err(e),
}
},
ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch) => {