From f2bdd99532054a96a7429ce47fb3a3f352d59bc3 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Thu, 13 Jan 2022 09:15:13 +0100 Subject: [PATCH] Add some docs to prevent a time loop. (#4702) * Add some docs to prevent a time loop. * Review remarks. --- .../node/network/availability-recovery/src/lib.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 41298512f4..6c75839dac 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -569,8 +569,20 @@ const fn is_unavailable( } /// Re-encode the data into erasure chunks in order to verify -/// the root hash of the provided merkle tree, which is built +/// the root hash of the provided Merkle tree, which is built /// on-top of the encoded chunks. +/// +/// This (expensive) check is necessary, as otherwise we can't be sure that some chunks won't have +/// been tampered with by the backers, which would result in some validators considering the data +/// valid and some invalid as having fetched different set of chunks. The checking of the Merkle +/// proof for individual chunks only gives us guarantees, that we have fetched a chunk belonging to +/// a set the backers have committed to. +/// +/// NOTE: It is fine to do this check with already decoded data, because if the decoding failed for +/// some validators, we can be sure that chunks have been tampered with (by the backers) or the +/// data was invalid to begin with. In the former case, validators fetching valid chunks will see +/// invalid data as well, because the root won't match. In the latter case the situation is the +/// same for anyone anyways. fn reconstructed_data_matches_root( n_validators: usize, expected_root: &Hash,