diff --git a/404.html b/404.html index 5ccabe8..ba2b02e 100644 --- a/404.html +++ b/404.html @@ -91,7 +91,7 @@ diff --git a/approved/0001-agile-coretime.html b/approved/0001-agile-coretime.html index 8f1a4ba..8cf8ae4 100644 --- a/approved/0001-agile-coretime.html +++ b/approved/0001-agile-coretime.html @@ -90,7 +90,7 @@ diff --git a/approved/0005-coretime-interface.html b/approved/0005-coretime-interface.html index 17ff88d..292f447 100644 --- a/approved/0005-coretime-interface.html +++ b/approved/0005-coretime-interface.html @@ -90,7 +90,7 @@ diff --git a/approved/0007-system-collator-selection.html b/approved/0007-system-collator-selection.html index 67b2952..e5deeb9 100644 --- a/approved/0007-system-collator-selection.html +++ b/approved/0007-system-collator-selection.html @@ -90,7 +90,7 @@ diff --git a/approved/0008-parachain-bootnodes-dht.html b/approved/0008-parachain-bootnodes-dht.html index 01e7107..221241c 100644 --- a/approved/0008-parachain-bootnodes-dht.html +++ b/approved/0008-parachain-bootnodes-dht.html @@ -90,7 +90,7 @@ diff --git a/approved/0009-improved-net-light-client-requests.html b/approved/0009-improved-net-light-client-requests.html index 202fb82..ff34161 100644 --- a/approved/0009-improved-net-light-client-requests.html +++ b/approved/0009-improved-net-light-client-requests.html @@ -90,7 +90,7 @@ diff --git a/approved/0010-burn-coretime-revenue.html b/approved/0010-burn-coretime-revenue.html index 164e5e4..b9987ae 100644 --- a/approved/0010-burn-coretime-revenue.html +++ b/approved/0010-burn-coretime-revenue.html @@ -90,7 +90,7 @@ diff --git a/approved/0012-process-for-adding-new-collectives.html b/approved/0012-process-for-adding-new-collectives.html index ae631d4..c126c78 100644 --- a/approved/0012-process-for-adding-new-collectives.html +++ b/approved/0012-process-for-adding-new-collectives.html @@ -90,7 +90,7 @@ diff --git a/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html b/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html index 68e9574..cb41673 100644 --- a/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html +++ b/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html @@ -90,7 +90,7 @@ diff --git a/approved/0014-improve-locking-mechanism-for-parachains.html b/approved/0014-improve-locking-mechanism-for-parachains.html index cd272d9..b5e03fc 100644 --- a/approved/0014-improve-locking-mechanism-for-parachains.html +++ b/approved/0014-improve-locking-mechanism-for-parachains.html @@ -90,7 +90,7 @@ diff --git a/approved/0017-coretime-market-redesign.html b/approved/0017-coretime-market-redesign.html index 6dbe51b..5ece2f9 100644 --- a/approved/0017-coretime-market-redesign.html +++ b/approved/0017-coretime-market-redesign.html @@ -90,7 +90,7 @@ diff --git a/approved/0022-adopt-encointer-runtime.html b/approved/0022-adopt-encointer-runtime.html index 57c2adf..1b44bd6 100644 --- a/approved/0022-adopt-encointer-runtime.html +++ b/approved/0022-adopt-encointer-runtime.html @@ -90,7 +90,7 @@ diff --git a/approved/0026-sassafras-consensus.html b/approved/0026-sassafras-consensus.html index 58276bf..a83f2cc 100644 --- a/approved/0026-sassafras-consensus.html +++ b/approved/0026-sassafras-consensus.html @@ -90,7 +90,7 @@ diff --git a/approved/0032-minimal-relay.html b/approved/0032-minimal-relay.html index 958f0ea..4938fcf 100644 --- a/approved/0032-minimal-relay.html +++ b/approved/0032-minimal-relay.html @@ -90,7 +90,7 @@ diff --git a/approved/0042-extrinsics-state-version.html b/approved/0042-extrinsics-state-version.html index 9ffdd3b..2dac6ae 100644 --- a/approved/0042-extrinsics-state-version.html +++ b/approved/0042-extrinsics-state-version.html @@ -90,7 +90,7 @@ diff --git a/approved/0043-storage-proof-size-hostfunction.html b/approved/0043-storage-proof-size-hostfunction.html index c6897f1..43b6a82 100644 --- a/approved/0043-storage-proof-size-hostfunction.html +++ b/approved/0043-storage-proof-size-hostfunction.html @@ -90,7 +90,7 @@ diff --git a/approved/0045-nft-deposits-asset-hub.html b/approved/0045-nft-deposits-asset-hub.html index 3453aa7..8a857c1 100644 --- a/approved/0045-nft-deposits-asset-hub.html +++ b/approved/0045-nft-deposits-asset-hub.html @@ -90,7 +90,7 @@ diff --git a/approved/0047-assignment-of-availability-chunks.html b/approved/0047-assignment-of-availability-chunks.html index 6c1ed94..a0784c1 100644 --- a/approved/0047-assignment-of-availability-chunks.html +++ b/approved/0047-assignment-of-availability-chunks.html @@ -90,7 +90,7 @@ diff --git a/approved/0048-session-keys-runtime-api.html b/approved/0048-session-keys-runtime-api.html index 63f0d2a..f8246c2 100644 --- a/approved/0048-session-keys-runtime-api.html +++ b/approved/0048-session-keys-runtime-api.html @@ -90,7 +90,7 @@ diff --git a/approved/0050-fellowship-salaries.html b/approved/0050-fellowship-salaries.html index 7c5a09c..2c13523 100644 --- a/approved/0050-fellowship-salaries.html +++ b/approved/0050-fellowship-salaries.html @@ -90,7 +90,7 @@ diff --git a/approved/0056-one-transaction-per-notification.html b/approved/0056-one-transaction-per-notification.html index b8d4f24..8edb414 100644 --- a/approved/0056-one-transaction-per-notification.html +++ b/approved/0056-one-transaction-per-notification.html @@ -90,7 +90,7 @@ diff --git a/approved/0059-nodes-capabilities-discovery.html b/approved/0059-nodes-capabilities-discovery.html index a4e21f7..24fe522 100644 --- a/approved/0059-nodes-capabilities-discovery.html +++ b/approved/0059-nodes-capabilities-discovery.html @@ -90,7 +90,7 @@ diff --git a/approved/0078-merkleized-metadata.html b/approved/0078-merkleized-metadata.html index b7c758a..17f6dce 100644 --- a/approved/0078-merkleized-metadata.html +++ b/approved/0078-merkleized-metadata.html @@ -90,7 +90,7 @@ diff --git a/approved/0084-general-transaction-extrinsic-format.html b/approved/0084-general-transaction-extrinsic-format.html index a9543dc..d1bb9b2 100644 --- a/approved/0084-general-transaction-extrinsic-format.html +++ b/approved/0084-general-transaction-extrinsic-format.html @@ -90,7 +90,7 @@ diff --git a/approved/0091-dht-record-creation-time.html b/approved/0091-dht-record-creation-time.html index 8d31b9e..cc71025 100644 --- a/approved/0091-dht-record-creation-time.html +++ b/approved/0091-dht-record-creation-time.html @@ -90,7 +90,7 @@ diff --git a/approved/0097-unbonding_queue.html b/approved/0097-unbonding_queue.html index b93a18a..7cb2043 100644 --- a/approved/0097-unbonding_queue.html +++ b/approved/0097-unbonding_queue.html @@ -90,7 +90,7 @@ diff --git a/approved/0099-transaction-extension-version.html b/approved/0099-transaction-extension-version.html index b9494e7..ef75a77 100644 --- a/approved/0099-transaction-extension-version.html +++ b/approved/0099-transaction-extension-version.html @@ -90,7 +90,7 @@ diff --git a/approved/0100-xcm-multi-type-asset-transfer.html b/approved/0100-xcm-multi-type-asset-transfer.html index eecb97c..937b3fb 100644 --- a/approved/0100-xcm-multi-type-asset-transfer.html +++ b/approved/0100-xcm-multi-type-asset-transfer.html @@ -90,7 +90,7 @@ diff --git a/approved/0101-xcm-transact-remove-max-weight-param.html b/approved/0101-xcm-transact-remove-max-weight-param.html index 777a457..491c80a 100644 --- a/approved/0101-xcm-transact-remove-max-weight-param.html +++ b/approved/0101-xcm-transact-remove-max-weight-param.html @@ -90,7 +90,7 @@ diff --git a/approved/0103-introduce-core-index-commitment.html b/approved/0103-introduce-core-index-commitment.html index bdaf207..d52501b 100644 --- a/approved/0103-introduce-core-index-commitment.html +++ b/approved/0103-introduce-core-index-commitment.html @@ -90,7 +90,7 @@ diff --git a/approved/0105-xcm-improved-fee-mechanism.html b/approved/0105-xcm-improved-fee-mechanism.html index 1c8e31a..09fca0e 100644 --- a/approved/0105-xcm-improved-fee-mechanism.html +++ b/approved/0105-xcm-improved-fee-mechanism.html @@ -90,7 +90,7 @@ diff --git a/approved/0107-xcm-execution-hints.html b/approved/0107-xcm-execution-hints.html index 2a8876f..ff2e258 100644 --- a/approved/0107-xcm-execution-hints.html +++ b/approved/0107-xcm-execution-hints.html @@ -90,7 +90,7 @@ diff --git a/approved/0108-xcm-remove-testnet-ids.html b/approved/0108-xcm-remove-testnet-ids.html index bb0c915..2dfc22b 100644 --- a/approved/0108-xcm-remove-testnet-ids.html +++ b/approved/0108-xcm-remove-testnet-ids.html @@ -90,7 +90,7 @@ diff --git a/approved/0122-alias-origin-on-asset-transfers.html b/approved/0122-alias-origin-on-asset-transfers.html index d6f0828..5e43182 100644 --- a/approved/0122-alias-origin-on-asset-transfers.html +++ b/approved/0122-alias-origin-on-asset-transfers.html @@ -90,7 +90,7 @@ diff --git a/approved/0123-pending-code-as-storage-location-for-runtime-upgrades.html b/approved/0123-pending-code-as-storage-location-for-runtime-upgrades.html index 08d0445..3c42132 100644 --- a/approved/0123-pending-code-as-storage-location-for-runtime-upgrades.html +++ b/approved/0123-pending-code-as-storage-location-for-runtime-upgrades.html @@ -90,7 +90,7 @@ diff --git a/approved/0125-xcm-asset-metadata.html b/approved/0125-xcm-asset-metadata.html index e91433f..582730c 100644 --- a/approved/0125-xcm-asset-metadata.html +++ b/approved/0125-xcm-asset-metadata.html @@ -90,7 +90,7 @@ diff --git a/approved/0126-introduce-pvq.html b/approved/0126-introduce-pvq.html index 24a6066..19af857 100644 --- a/approved/0126-introduce-pvq.html +++ b/approved/0126-introduce-pvq.html @@ -90,7 +90,7 @@ diff --git a/approved/0135-compressed-blob-prefixes.html b/approved/0135-compressed-blob-prefixes.html index 0bbd346..782f2ef 100644 --- a/approved/0135-compressed-blob-prefixes.html +++ b/approved/0135-compressed-blob-prefixes.html @@ -90,7 +90,7 @@ diff --git a/approved/0139-faster-erasure-coding.html b/approved/0139-faster-erasure-coding.html index e5f9795..ea010ca 100644 --- a/approved/0139-faster-erasure-coding.html +++ b/approved/0139-faster-erasure-coding.html @@ -90,7 +90,7 @@ diff --git a/approved/0146-deflationary-fee-proposal.html b/approved/0146-deflationary-fee-proposal.html index bdbd906..d2bdbfa 100644 --- a/approved/0146-deflationary-fee-proposal.html +++ b/approved/0146-deflationary-fee-proposal.html @@ -90,7 +90,7 @@ diff --git a/approved/0149-rfc-1-renewal-adjustment.html b/approved/0149-rfc-1-renewal-adjustment.html index 67f7bf5..d15e3e6 100644 --- a/approved/0149-rfc-1-renewal-adjustment.html +++ b/approved/0149-rfc-1-renewal-adjustment.html @@ -90,7 +90,7 @@ diff --git a/index.html b/index.html index 2467c7f..4d8b48f 100644 --- a/index.html +++ b/index.html @@ -90,7 +90,7 @@ diff --git a/introduction.html b/introduction.html index 2467c7f..4d8b48f 100644 --- a/introduction.html +++ b/introduction.html @@ -90,7 +90,7 @@ diff --git a/new/0155-polkadot-dollar.html b/new/0155-polkadot-dollar.html index 06ba9d5..8e0699b 100644 --- a/new/0155-polkadot-dollar.html +++ b/new/0155-polkadot-dollar.html @@ -90,7 +90,7 @@ @@ -364,7 +364,7 @@ - @@ -378,7 +378,7 @@ - diff --git a/print.html b/print.html index 5a933bb..955f20b 100644 --- a/print.html +++ b/print.html @@ -91,7 +91,7 @@ @@ -361,6 +361,247 @@ detailing proposed changes to the technical implementation of the Polkadot netwo
  • Technical implementation of PDD-based reward distribution systems.
  • Impact assessment on DOT tokenomics and staking participation rates.
  • +

    (source)

    +

    Table of Contents

    + +

    RFC-0000: Validator Rewards

    +
    + + + +
    Start DateDate of initial proposal
    DescriptionRewards protocol for Polkadot validators
    AuthorsJeff Burdges, ...
    +
    +

    Summary

    +

    An off-chain approximation protocol should assign rewards based upon the approvals and availability work done by validators.

    +

    All validators track which approval votes they actually use, reporting the aggregate, after which an on-chain median computation gives a good approximation under byzantine assumptions. Approval checkers report aggregate information about which availability chunks they use too, but in availability we need a tit-for-tat game to enforce honesty, because approval committees could often bias results thanks to their small size.

    +

    Motivation

    +

    We want all or most polkadot subsystems be profitable for validataors, because otherwise operators might profit from running modified code. In particular, almost all rewards in Kusama/Polkadot should come from work done securing parachains, primarily approval checking, but also backing, availability, and support of XCMP.

    +

    Among these task, our highest priorities must be approval checks, which ensure soundness, and sending availability chunks to approval checkers. We prove backers must be paid strictly less than approval checkers.

    +

    At present though, validators' rewards have relatively little relationship to validators operating costs, in terms of bandwidth and CPU time. Worse, polkadot's scaling makes us particular vulnerable "no-shows" caused by validators skipping their approval checks.

    +

    We're particularly concernned about hardware specks impact upon the number of parachain cores. We've requested relatively low spec machines so far, only four physical CPU cores, although some run even lower specs like only two physical CPU cores. Alone, rewards cannot fix our low speced validator problem, but rewards and outreach together should far more impact than either alone.

    +

    In future, we'll further increase validator spec requirements, which directly improve polkadot's throughput, and which repeats this dynamic of purging underspeced nodes, except outreach becomes more important because de facto too many slow validators can "out vote" the faster ones

    +

    Stakeholders

    +

    We alter the validators rewards protocol, but with negligable impact upon rewards for honest validators who comply with hardware and bandwidth recommendations.

    +

    We shall still reward participation in relay chain concensus of course, which de facto means block production but not finality, but these current reward levels shall wind up greatly reduced. Any validators who manipulate block rewards now could lose rewards here, simply because of rewards being shifted from block production to availability, but this sounds desirable.

    +

    We've discussed roughly this rewards protocol in https://hackmd.io/@rgbPIkIdTwSICPuAq67Jbw/S1fHcvXSF and https://github.com/paritytech/polkadot-sdk/issues/1811 as well as related topics like https://github.com/paritytech/polkadot-sdk/issues/5122

    +

    Logic

    +

    Categories

    +

    We alter the current rewards scheme by reducing to roughly these proportions of total rewards:

    + +

    We add roughly these proportions of total rewards covering parachain work:

    + +

    Observation

    +

    We track this data for each candidate during the approvals process:

    +
    /// Our subjective record of out availability transfers for this candidate.
    +CandidateRewards {
    +    /// Anyone who backed this parablock
    +    backers: [AuthorityId; NumBackers],
    +    /// Anyone to whome we think no-showed, even only briefly.
    +    noshows: HashSet<AuthorityId>,
    +    /// Anyone who sent us chunks for this candidate
    +    downloaded_from: HashMap<AuthorityId,u16>,    
    +    /// Anyone to whome we sent chunks for this candidate
    +    uploaded_to: HashMap<AuthorityId,u16>,
    +}
    +
    +

    We no longer require this data during disputes.

    + +

    After we approve a relay chain block, then we collect all its CandidateRewards into an ApprovalsTally, with one ApprovalTallyLine for each validator. In this, we compute approval_usages from the final run of the approvals loop, plus 0.8 for each backer.

    +

    As discussed below, we say a validator 𝑢 uses an approval vote by a validator 𝑣 on a candidate 𝑐 if the the final approving run of the elves approval loop by 𝑢 counted the vote by 𝑣 towards approving the candidate 𝑐. We only count these useful votes that actually gets used.

    +
    /// Our subjective record of what we used from, and provided to, all other validators on the finalized chain
    +pub struct ApprovalsTally(Vec<ApprovalTallyLine>);
    +
    +/// Our subjective record of what we used from, and provided to, all one other validators on the finalized chain
    +pub struct ApprovalTallyLine {
    +    /// Approvals by this validator which our approvals gadget used in marking candidates approved.
    +    approval_usages: u32,
    +    /// How many times we think this validator no-showed, even only briefly.
    +    noshows: u32
    +    /// Availability chunks we downloaded from this validator for our approval checks we used.
    +    used_downloads: u32,
    +    /// Availability chunks we uploaded to this validator which whose approval checks we used.
    +    used_uploads: u32,
    +}
    +
    +

    At finality we sum these ApprovalsTally for one for the whole epoch so far, into another ApprovalsTally. We can optionally sum them earlier at chain heads, but this requires mutablity.

    +

    Messages

    +

    After the epoch is finalized, we share the first three field of each ApprovalTallyLine in its ApprovalTally.

    +
    /// Our subjective record of what we used from some other validator on the finalized chain
    +pub struct ApprovalTallyMessageLine {
    +    /// Approvals by this validator which our approvals gadget used in marking candidates approved.
    +    approval_usages: u32,
    +    /// How many times we think this validator no-showed, even only briefly.
    +    noshows: u32
    +    /// Availability chunks we downloaded from this validator for our approval checks we used.
    +    used_downloads: u32,
    +}
    +
    +/// Our subjective record of what we used from all other validators on the finalized chain
    +pub struct ApprovalsTallyMessage(Vec<ApprovalTallyMessageLine>);
    +
    +

    Actual ApprovalsTallyMessages sent over the wire must be signed of course, likely by the grandpa ed25519 key.

    +

    Rewards computation

    +

    We compute the approvals rewards for each validator by taking the median of the approval_usages fields for each validator across all validators ApprovalsTallyMessages. We compute some noshows_percentiles for each validator similarly, but using a 2/3 precentile instead of the median.

    +
    let mut approval_usages_medians = Vec::new(); 
    +let mut noshows_percentiles = = Vec::new(); 
    +for i in 0..num_validators {
    +    let mut v: Vec<u32> = approvals_tally_messages.iter().map(|atm| atm.0[i].approval_usages);
    +    v.sort();
    +    approval_usages_medians.push(v[num_validators/2]);
    +    let mut v: Vec<u32> = approvals_tally_messages.iter().map(|atm| atm.0[i].noshows);
    +    v.sort();
    +    noshows_percentiles.push(v[num_validators/3]); 
    +}
    +
    +

    Assuming more than 50% honersty, these median tell us how many approval votes form each validator.

    +

    We re-weight the used_downloads from the ith validator by their median times their expected f+1 chunks and divided by how many chunks downloads they claimed, and sum them

    +
    #[cfg(offchain)]
    +let mut my_missing_uploads = my_approvals_tally.iter().map(|l| l.used_uploads).collect();
    +let mut reweighted_total_used_downloads = vec[0u64; num_validators];
    +for (mmu,atm) in my_missing_uploads.iter_mut().zip(approvals_tally_messages) {
    +    let d = atm.0.iter().map(|l| l.used_downloads).sum();
    +    for i in 0..num_validators {
    +        let atm_from_i = approval_usages_medians[i] * (f+1) / d;
    +        #[cfg(offchain)]
    +        if i == me { mmu -= atm_from_i };
    +        reweighted_total_used_downloads[i] += atm_from_i;
    +    }
    +}
    +
    +

    We distribute rewards on-chain using approval_usages_medians and reweighted_total_used_downloads. Approval checkers could later change from who they download chunks using my_missing_uploads.

    +

    We deduct small amount of rewards using noshows_medians too, likely 1% of the rewards for an approval, but excuse some small number of noshows, ala noshows_medians[i].saturating_sub(MAX_NO_PENALTY_NOSHOWS).

    +

    Strategies

    +

    In theory, validators could adopt whatever strategy they like to penalize validators who stiff them on availability redistribution rewards, except they should not stiff back, only choose other availability providers. We discuss one good strategy below, but initially this could go unimplemented.

    +

    Concensus

    +

    We avoid placing rewards logic on the relay chain now, so we must either collect the signed ApprovalsTallyMessages and do the above computations somewhere sufficently trusted, like a parachain, or via some distributed protocol with its own assumptions.

    +

    In-core

    +

    A dedicated rewards parachain could easily collect the ApprovalsTallyMessages and do the above computations. In this, we logically have two phases, first we build the on-chain Merkle tree M of ApprovalsTallyMessages, and second we process those into the rewards data.

    +

    Any in-core approach risks enough malicious collators biasing the rewards by censoring the ApprovalsTallyMessages messages for some validators during the first phase. After this first phase completes, our second phase proceeds deterministically.

    +

    As an option, each validator could handle this second phase itself by creating single heavy transaction with n state accesses in this Merkle tree M, and this transaction sends the era points.

    +

    A remark for future developments..

    +

    JAM-like non/sub-parachain accumulation could mitigate the risk of the rewards parachain being captured.

    +

    JAM services all have either parachain accumulation or else non/sub-parachain accumulation.

    + +

    In our case, each ApprovalsTallyMessage would become a block for the first phase rewards service, so then the accumulation tracks an MMR of the rewards service block hashes, which becomes M from Option 1. At 1024 validators this requires 9 * 32 = 288 bytes for the MMR and 1024/8 = 128 bytes for a bitfield, so 416 bytes of relay chain state in total. Any validator could then add their ApprovalsTallyMessage in any order, but only one per relay chain block, so the submission timeframe should be long enough to prevent censorship.

    +

    Arguably after JAM, we should migrate critical functions to non/sub-parachain aka JAM services without mutable state, so this covers validator elections, DKGs, and rewards. Yet, non/sub-parachains cannot eliminate all censorship risks, so the near term benefits seem questionable.

    +

    Off-core

    +

    All validators could collect ApprovalsTallyMessages and independently compute rewards off-core. At that point, all validators have opinions about all other validators rewards, but even among honest validators these opinions could differ if some lack some ApprovalsTallyMessages.

    +

    We'd have the same in-core computation problem if we perform statistics like medians upon these opinions. We could however take an optimistic approach where each validator computes medians like above, but then shares their hash of the final rewards list. If 2/3rds voted for the same hash, then we distribute rewards as above. If not, then we distribute no rewards until governance selects the correct hash.

    +

    We never validate in-core the signatures on ApprovalsTallyMessages or the computation, so this approach permits more direct cheating by malicious 2/3rd majority, but if that occurs then we've broken our security assumptions anyways. It's somewhat likely these hashes do diverge during some network disruptions though, which increases our "drama" factor considerably, which maybe unacceptable.

    +

    Explanation

    +

    Backing

    +

    Polkadot's efficency creates subtle liveness concerns: Anytime one node cannot perform one of its approval checks then Polkadot loses in expectation 3.25 approval checks, or 0.10833 parablocks. This makes back pressure essential.

    +

    We cannot throttle approval checks securely either, so reactive off-chain back pressure only makes sense during or before the backing phase. In other words, if nodes feel overworked themselves, or perhaps beleive others to be, then they should drop backing checks, never approval checks. It follows backing work must be rewarded less well and less reliably than approvals, as otherwise validators could benefit from behavior that harms the network.

    +

    We propose that one backing statement be rewarded at 80% of one approval statement, so backers earn only 80% of what approval checkers earn. We omit rewards for availability distribution, so backers spend more on bandwidth too. Approval checkers always fetch chunks first from backers though, so good backers earn roughly 7% there, meaning backing checks earn roughly 13% less than approval checks. We should lower this 80% if we ever increase availability redistribution rewards.

    +

    Although imperfect, we believe this simplifies implementation, and provides robustness against mistakes elsewhere, including by governance mistakes, but incurs minimal risk. In principle, backer might not distribute systemic chunks, but approval checkers fetch systemic chunks from backers first anyways, so likely this yields negligable gains.

    +

    As always we require that backers' rewards covers their operational costs plus some profit, but approval checks must be more profitable.

    +

    Approvals

    +

    In polkadot, all validators run the elves approval loop for each candidate, in which the validator listens to other approval checkers assignments and approval statements/votes, and with which it marks checkers no-show or done, and marks candidates approved. Also, this loop determines and announces validators' own approval checker assignments.

    +

    Any validator should always conclude whatever approval checks it begins, but our approval assignment loop ignore some approval checks, either because they were announced too soon or because an earlier no-show delivered its approval vote before the final approval. We say a validator $u$ uses an approval vote by a validator $v$ on a candidate $c$ if the approval assignments loop by $u$ counted the vote by $v$ towards approving the candidate $c$. We actually rerun the elves approval loop quite frequently, but only the final run that marks the candidate approved determines the useful approval votes.

    +

    We should not rewards votes announced too soon, so by only counting the final run we unavoidably omit rewards for some honest no-show replacements too. We expect the 80%-ish discount for backing covers these losses, so approval checks remain more profitable than backing.

    +

    We propose a simple approximate solution based upon computing medians across validators for used votes.

    +
      +
    1. +

      In an epoch $e$, each validator $u$ counts of the number $\alpha_{u,v}$ of votes they used from each validator $v$, including themselves. Any time a validator marks a candidate approved, they increment these counts appropriately.

      +
    2. +
    3. +

      After epoch $e$'s last block gets finalized, all validators of epoch $e$ submit an approvals tally message ApprovalsTallyMessage that reveals their number $\alpha_{u,v}$ of useful approvals they saw from each validator $v$ on candidates that became available in epoch $n$. We do not send $\alpha_{u,u}$ for tit-for-tat reasons discussed below, not for bias concerns. We record these approvals tally messages on-chain.

      +
    4. +
    5. +

      After some delay, we compute on-chain the median $\alpha_v := \textrm{median} { \alpha_{u,v} : u }$ used approvals statements for each validator $v$.

      +
    6. +
    +

    As discussed in https://hackmd.io/@rgbPIkIdTwSICPuAq67Jbw/S1fHcvXSF we could compute these medians using the on-line algorithm if substrate had a nice priority queue.

    +

    We never achieve true consensus on approval checkers and their approval votes. Yet, our approval assignment loop gives a rough concensus, under our Byzantine assumption and some synchrony assumption. It then follows that miss-reporting by malicious validators should not appreciably alter the median $\alpha_v$ and hence rewards.

    +

    We never tally used approval assignments to candidate equivocations or other forks. Any validator should always conclude whatever approval checks it begins, even on other forks, but we expect relay chain equivocations should be vanishingly rare, and sassafras should make forks uncommon.

    +

    We account for noshows similarly, and deduce a much smaller amount of rewards, but require a 2/3 precentile level, not kjust a median.

    +

    Availability redistribution

    +

    As approval checkers could easily perform useless checks, we shall reward availability providers for the availability chunks they provide that resulted in useful approval checks. We enforce honesty using a tit-for-tat mechanism because chunk transfers are inherently subjective.

    +

    An approval checker reconstructs the full parachain block by downloading distinct $f+1$ chunks from other validators, where at most $f$ validators are byzantine, out of the $n \ge 3 f + 1$ total validators. In downloading chunks, validators prefer the $f+1$ systemic chunks over the non-systemic chunks, and prefer fetching from validators who already voted valid, like backing checkers. It follows some validators should recieve credit for more than one chunk per candidate.

    +

    We expect a validator $v$ has actually performed more approval checks $\omega_v$ than the median $\alpha_v$ for which they actually received credit. In fact, approval checkers even ignore some of their own approval checks, meaning $\alpha_{v,v} \le \omega_v$ too.

    +

    Alongside approvals count for epoch $e$, approval checker $v$ computes the counts $\beta_{u,v}$ of the number of chunks they downloaded from each availability provider $u$, excluding themselves, for which they percieve the approval check turned out useful, meaning their own approval counts in $\alpha_{v,v}$. Approval checkers publish $\beta_{u,v}$ alongside $\alpha_{u,v}$ in the approvals tally message ApprovalsTallyMessage. We originally proposed include the self availability usage $\beta_{v,v}$ here, but this should not matter, and excluding simplifies the code.

    +

    Symmetrically, availability provider $u$ computes the counts $\gamma_{u,v}$ of the number of chunks they uploaded to each approval checker $v$, again including themselves, again for which they percieve the approval check turned out useful. Availability provider $u$ never reveal its $\gamma_{u,v}$ however.

    +

    At this point, $\alpha_v$, $\alpha_{v,v}$, and $\alpha_{u,v}$ all potentially differ. We established consensus upon $\alpha_v$ above however, with which we avoid approval checkers printing unearned availability provider rewards:

    +

    After receiving "all" pairs $(\alpha_{u,v},\beta_{u,v})$, validator $w$ re-weights the $\beta_{u,v}$ and their own $\gamma_{w,v}$. +$$ +\begin{aligned} +\beta\prime_{w,v} &= {(f+1) \alpha_v \over \sum_u \beta_{u,v}} \beta_{w,v} \ +\gamma\prime_{w,v} &= {(f+1) \alpha_w \over \sum_v \gamma_{w,v}} \gamma_{w,v} \ +\end{aligned} +$$ +At this point, we compute $\beta\prime_w = \sum_v \beta\prime_{w,v}$ on-chain for each $w$ and reward $w$ proportionally.

    +

    Tit-for-tat

    +

    We employ a tit-for-tat strategy to punish validators who lie about from whome they obtain availability chunks. We only alter validators future choices in from whom they obtain availability chunks, and never punish by lying ourselves, so nothing here breaks polkadot, but not having roughly this strategy enables cheating.

    +

    An availability provider $w$ defines $\delta\prime_{w,v} := \gamma\prime_{w,v} - \beta\prime_{w,v}$ to be the re-weighted number of chunks by which $v$ stiffed $w$. Now $w$ increments their cumulative stiffing perception $\eta_{w,v}$ from $v$ by the value $\delta\prime_{w,v}$, so $\eta_{w,v} \mathrel{+}= \delta\prime_{w,v}$

    +

    In future, anytime $w$ seeks chunks in reconstruction $w$ skips $v$ proportional to $\eta_{w,v} / \sum_u \eta_{w,u}$, with each skip reducing $\eta_{w,u}$ by 1. We expect honest accedental availability stiffs have only small $\delta\prime_{w,v}$, so they clear out quickly, but intentional skips add up more quickly.

    +

    We keep $\gamma_{w,v}$ and $\alpha_{u,u}$ secret so that approval checkers cannot really know others stiffing perceptions, although $\alpha_{u,v}$ leaks some relevant information. We expect this secrecy keeps skips secret and thus prevents the tit-for-tat escalating beyond one round, which hopefully creates a desirable Nash equilibrium.

    +

    We favor skiping systematic chunks to reduce reconstructon costs, so we face costs when skipping them. We could however fetch systematic chunks from availability providers as well as backers, or even other approval checkers, so this might not become problematic in practice.

    +

    Concerns: Drawbacks, Testing, Security, and Privacy

    +

    We do not pay backers individually for availability distribution per se. We could only do so by including this information into the availability bitfields, which complicates on-chain computation. Also, if one of the two backers does not distribute then the availability core should remain occupied longer, meaning the lazy backer loses some rewards too. It's likely future protocol improbvements change this, so we should monitor for lazy backers outside the rewards system.

    +

    We discuss approvals being considered by the tit-for-tat in earlier drafts. An adversary who successfuly manipulates the rewards median votes would've alraedy violated polkadot's security assumptions though, which requires a hard fork and correcting the dot allocation. Incorrect report wrong approval_usages remain interesting statistics though.

    +

    Adversarial validators could manipulates their availability votes though, even without being a supermajority. If they still download honestly, then this costs them more rewards than they earn. We do not prevent validators from preferentially obtaining their pieces from their friends though. We should analyze, or at least observe, the long-term consequences.

    +

    A priori, whale nominator's validators could stiff validators but then rotate their validators quickly enough so that they never suffered being skipped back. We discuss several possible solution, and their difficulties, under "Rob's nominator-wise skipping" in https://hackmd.io/@rgbPIkIdTwSICPuAq67Jbw/S1fHcvXSF but overall less seems like more here. Also frequent validator rotation could be penalized elsewhere.

    +

    Performance, Ergonomics, and Compatibility

    + +

    We operate off-chain except for final rewards votes and median tallies. We expect lower overhead rewards protocols would lack information, thereby admitting easier cheating.

    +

    Initially, we designed the ELVES approval gadget to allow on-chain operation, in part for rewards computation, but doing so looks expensive. Also, on-chain rewards computaiton remains only an approximation too, but could even be biased more easily than our off-chain protocol presented here.

    + +

    We alraedy teach validators about missed parachain blocks, but we'll teach approval checking more going forwards, because current efforts focus more upon backing.

    + +

    JAM's block exports should not complicate availability rewards, but could impact some alternative schemes.

    +

    Prior Art and References

    +

    None

    +

    Unresolved Questions

    +

    Provide specific questions to discuss and address before the RFC is voted on by the Fellowship. This should include, for example, alternatives to aspects of the proposed design where the appropriate trade-off to make is unclear.

    + +

    Synthetic parachain flag

    +

    Any rewards protocol could simply be "out voted" by too many slow validators: An increase the number of parachain cores increases more workload, but this creates no-shows if too few validators could handle this workload.

    +

    We could add a synthetic parachain flag, only settable by governance, which treats no-shows as positive approval votes for that parachain, but without adding rewards. We should never enable this for real parachains, only for synthetic ones like gluttons. We should not enable the synthetic parachain flag long-term even for gluttonsm, because validators could easily modify their code. Yet, synthetic approval checks might enable pushing the hardware upgrades more agressively over the short-term.

    (source)

    Table of Contents

    -

    Motivation

    +

    Motivation

    The heap allocation of the runtime is currently controlled by the host using a memory allocator on the host side.

    The API of many host functions contains buffer allocations. For example, when calling ext_hashing_twox_256_version_1, the host allocates a 32-byte buffer using the host allocator, and returns a pointer to this buffer to the runtime. The runtime later has to call ext_allocator_free_version_1 on this pointer to free the buffer.

    Even though no benchmark has been done, it is pretty obvious that this design is very inefficient. To continue with the example of ext_hashing_twox_256_version_1, it would be more efficient to instead write the output hash to a buffer allocated by the runtime on its stack and passed by pointer to the function. Allocating a buffer on the stack, in the worst case, consists simply of decreasing a number; in the best case, it is free. Doing so would save many VM memory reads and writes by the allocator, and would save a function call to ext_allocator_free_version_1.

    Furthermore, the existence of the host-side allocator has become questionable over time. It is implemented in a very naive way, and for determinism and backwards compatibility reasons, it needs to be implemented exactly identically in every client implementation. Runtimes make substantial use of heap memory allocations, and each allocation needs to go through the runtime <-> host boundary twice (once for allocating and once for freeing). Moving the allocator to the runtime side would be a good idea, although it would increase the runtime size. But before the host-side allocator can be deprecated, all the host functions that use it must be updated to avoid using it.

    -

    Stakeholders

    +

    Stakeholders

    No attempt was made to convince stakeholders.

    -

    Explanation

    +

    Explanation

    New definitions

    New Definition I: Runtime Optional Positive Integer

    The Runtime optional positive integer is a signed 64-bit value. Positive values in the range of [0..2³²) represent corresponding unsigned 32-bit values. The value of -1 represents a non-existing value (an absent value). All other values are invalid.

    @@ -929,9 +1170,9 @@ detailing proposed changes to the technical implementation of the Polkadot netwo Authorspolka.dom (polkadotdom) -

    Summary

    +

    Summary

    This RFC proposes changes to pallet-conviction-voting that allow for simultaneous voting and delegation. For example, Alice could delegate to Bob, then later vote on a referendum while keeping their delegation to Bob intact. It is a strict subset of Leemo's RFC 35.

    -

    Motivation

    +

    Motivation

    Backdrop

    Under our current voting system, a voter can either vote or delegate. To vote, they must first ensure they have no delegate, and to delegate, they must first clear their current votes.

    The Issue

    @@ -946,12 +1187,12 @@ detailing proposed changes to the technical implementation of the Polkadot netwo

    This RFC aims to solve the second and third issue and thus more accurately align governance to the true voter preferences.

    An Aside

    One may ask, could a voter not just undelegate, vote, then delegate again? Could this just be built into the user interface? Unfortunately, this does not work due to the need to clear their votes before redelegation. In practice the voter would undelegate, vote, wait until the referendum is closed, hope that there's no other referenda they would like to vote on, then redelegate. At best it's a temporally extended friction. At worst the voter goes unrepresented in voting for the duration of the vote clearing period.

    -

    Stakeholders

    +

    Stakeholders

    Runtime developers: If runtime developers are relying on the previous assumptions for their VotingHooks implementations, they will need to rethink their approach. In addition, a runtime migration is needed. Lastly, it is a serious change in governance that requires some consideration beyond the technical.

    App developers: Apps like Subsquare and Polkassembly would need to update their user interface logic. They will also need to handle the new error.

    Users: We will want users to be aware of the new functionality, though not required.

    Technical Writers: This change will require rewrites of documentation and tutorials.

    -

    Explanation

    +

    Explanation

    New Data & Runtime Logic

    The Voting Enum, which currently holds the user's vote data, would first be collapsed and it's underlying fields consolidated, as there would no longer be a distinction between the enum's variants. A (poll index -> retracted votes count) field would then be added to the resulting structure - It's role to keep track of the per poll balance that has been clawed back from the user by those delegating to them. See here for a potential implementation.

    The implementation must allow for the (poll index -> retracted votes) data to exist even if the user does not currently have a vote for that poll. A simple example that highlights the necessity is as follows: A delegator votes first, then the delegate does. If the delegator is not allowed to create the retracted votes data on the delegate, the tally count would be corrupted when the delegate votes.

    @@ -972,7 +1213,7 @@ detailing proposed changes to the technical implementation of the Polkadot netwo

    If a delegate's voting power can be stripped from them at any point, then there is necessarily a reduction in their power within the system. This provides less incentive to become a delegate. But again, there are more customizable ways to handle this if it proves necessary.

    Testing, Security, and Privacy

    This change would mean a more complicated STF for voting, which would increase difficulty of hardening. Though sufficient unit testing should handle this with ease.

    -

    Performance, Ergonomics, and Compatibility

    +

    Performance, Ergonomics, and Compatibility

    Performance

    The proposed changes would increase both the compute and storage requirements by about 2x for all voting functions. No change in complexity.

    Ergonomics

    @@ -980,11 +1221,11 @@ detailing proposed changes to the technical implementation of the Polkadot netwo

    Compatibility

    Runtime developers will need to add the migration and ensure their hooks still work.

    App developers will need to update their user interfaces to accommodate the new functionality. They will need to handle the new error as well.

    -

    Prior Art and References

    +

    Prior Art and References

    A current implementation can be found here.

    -

    Unresolved Questions

    +

    Unresolved Questions

    None

    - +

    It is possible we would like to add a system parameter for the rate of change of the voting/delegation system. This could prevent wild swings in the voter preferences function and motivate/shield delegates by solidifying their positions over some amount of time. However, it's unclear that this would be valuable or even desirable.

    (source)

    Table of Contents

    @@ -1016,11 +1257,11 @@ detailing proposed changes to the technical implementation of the Polkadot netwo Authorsbhargavbh, burdges, AlistairStewart -

    Summary

    +

    Summary

    This RFC proposes a modification to the AURA round-robin block production mechanism for system parachains (e.g. Polkadot Hub). The proposed change increases the number of consecutive block production slots assigned to each collator from the current single-slot allocation to a configurable value, initially set at four. This modification aims to enhance censorship resistance by mitigating data-withholding attacks.

    -

    Motivation

    +

    Motivation

    The Polkadot Relay Chain guarantees the safety of parachain blocks, but it does not provide explicit guarantees for liveness or censorship resistance. With the planned migration of core Relay Chain functionalities—such as Balances, Staking, and Governance—to the Polkadot Hub system parachain in early November 2025, it becomes critical to establish a mechanism for achieving censorship resistance for these parachains without compromising throughput. For example, if governance functionality is migrated to Polkadot-Hub, malicious collators could systematically censor aye votes for a Relay Chain runtime upgrade, potentially altering the referendum's outcome. This demonstrates that censorship attacks on a system parachain can have a direct and undesirable impact on the security of the Relay Chain. This proposal addresses such censorship vulnerabilities by modifying the AURA block production mechanism utilized by system parachain collator with minimal honesty assumptions on the collators.

    -

    Stakeholders

    +

    Stakeholders

    Effective multi-slot collation requires that collators be able to prioritize transactions that have been targeted for censorship. The implementation should incorporate a framework for priority transactions (e.g., governance votes, election extrinsics) to ensure that such transactions are included in the uncensored blocks.

    -

    Prior Art and References

    +

    Prior Art and References

    This RFC is related to RFC-7, which details the selection mechanism for System Parachain Collators. In general, a more robust collator selection mechanism that reduces the proportion of malicious actors would directly benefit the effectiveness of the ideas presented in this RFC

    Future Directions

    A resilient mechanism is needed for prioritising transactions in block production for collators that are actively targeted for censorship. There are two potential approches:

    @@ -1113,9 +1354,9 @@ This approach is compatible with the Slot-Based collation and the currently depl AuthorsGavin Wood -

    Summary

    +

    Summary

    This proposes a periodic, sale-based method for assigning Polkadot Coretime, the analogue of "block space" within the Polkadot Network. The method takes into account the need for long-term capital expenditure planning for teams building on Polkadot, yet also provides a means to allow Polkadot to capture long-term value in the resource which it sells. It supports the possibility of building rich and dynamic secondary markets to optimize resource allocation and largely avoids the need for parameterization.

    -

    Motivation

    +

    Motivation

    Present System

    The Polkadot Ubiquitous Computer, or just Polkadot UC, represents the public service provided by the Polkadot Network. It is a trust-free, WebAssembly-based, multicore, internet-native omnipresent virtual machine which is highly resilient to interference and corruption.

    The present system of allocating the limited resources of the Polkadot Ubiquitous Computer is through a process known as parachain slot auctions. This is a parachain-centric paradigm whereby a single core is long-term allocated to a single parachain which itself implies a Substrate/Cumulus-based chain secured and connected via the Relay-chain. Slot auctions are on-chain candle auctions which proceed for several days and result in the core being assigned to the parachain for six months at a time up to 24 months in advance. Practically speaking, we only see two year periods being bid upon and leased.

    @@ -1136,7 +1377,7 @@ This approach is compatible with the Slot-Based collation and the currently depl
  • The solution SHOULD avoid creating additional dependencies on functionality which the Relay-chain need not strictly provide for the delivery of the Polkadot UC.
  • Furthermore, the design SHOULD be implementable and deployable in a timely fashion; three months from the acceptance of this RFC should not be unreasonable.

    -

    Stakeholders

    +

    Stakeholders

    Primary stakeholder sets are:

    Socialization:

    The essensials of this proposal were presented at Polkadot Decoded 2023 Copenhagen on the Main Stage. A small amount of socialization at the Parachain Summit preceeded it and some substantial discussion followed it. Parity Ecosystem team is currently soliciting views from ecosystem teams who would be key stakeholders.

    -

    Explanation

    +

    Explanation

    Overview

    Upon implementation of this proposal, the parachain-centric slot auctions and associated crowdloans cease. Instead, Coretime on the Polkadot UC is sold by the Polkadot System in two separate formats: Bulk Coretime and Instantaneous Coretime.

    When a Polkadot Core is utilized, we say it is dedicated to a Task rather than a "parachain". The Task to which a Core is dedicated may change at every Relay-chain block and while one predominant type of Task is to secure a Cumulus-based blockchain (i.e. a parachain), other types of Tasks are envisioned.

    @@ -1577,7 +1818,7 @@ InstaPoolHistory: (empty)
  • Governance upgrade proposal(s).
  • Monitoring of the upgrade process.
  • -

    Performance, Ergonomics and Compatibility

    +

    Performance, Ergonomics and Compatibility

    No specific considerations.

    Parachains already deployed into the Polkadot UC must have a clear plan of action to migrate to an agile Coretime market.

    While this proposal does not introduce documentable features per se, adequate documentation must be provided to potential purchasers of Polkadot Coretime. This SHOULD include any alterations to the Polkadot-SDK software collection.

    @@ -1586,7 +1827,7 @@ InstaPoolHistory: (empty)

    A regular security review SHOULD be conducted prior to deployment through a review by the Web3 Foundation economic research group.

    Any final implementation MUST pass a professional external security audit.

    The proposal introduces no new privacy concerns.

    - +

    RFC-3 proposes a means of implementing the high-level allocations within the Relay-chain.

    RFC-5 proposes the API for interacting with Relay-chain.

    Additional work should specify the interface for the instantaneous market revenue so that the Coretime-chain can ensure Bulk Coretime placed in the instantaneous market is properly compensated.

    @@ -1602,7 +1843,7 @@ InstaPoolHistory: (empty)
  • The percentage of cores to be sold as Bulk Coretime.
  • The fate of revenue collected.
  • -

    Prior Art and References

    +

    Prior Art and References

    Robert Habermeier initially wrote on the subject of Polkadot blockspace-centric in the article Polkadot Blockspace over Blockchains. While not going into details, the article served as an early reframing piece for moving beyond one-slot-per-chain models and building out secondary market infrastructure for resource allocation.

    (source)

    Table of Contents

    @@ -1635,10 +1876,10 @@ InstaPoolHistory: (empty) AuthorsGavin Wood, Robert Habermeier -

    Summary

    +

    Summary

    In the Agile Coretime model of the Polkadot Ubiquitous Computer, as proposed in RFC-1 and RFC-3, it is necessary for the allocating parachain (envisioned to be one or more pallets on a specialised Brokerage System Chain) to communicate the core assignments to the Relay-chain, which is responsible for ensuring those assignments are properly enacted.

    This is a proposal for the interface which will exist around the Relay-chain in order to communicate this information and instructions.

    -

    Motivation

    +

    Motivation

    The background motivation for this interface is splitting out coretime allocation functions and secondary markets from the Relay-chain onto System parachains. A well-understood and general interface is necessary for ensuring the Relay-chain receives coretime allocation instructions from one or more System chains without introducing dependencies on the implementation details of either side.

    Requirements

    -

    Stakeholders

    +

    Stakeholders

    Primary stakeholder sets are:

    Socialization:

    This content of this RFC was discussed in the Polkdot Fellows channel.

    -

    Explanation

    +

    Explanation

    The interface has two sections: The messages which the Relay-chain is able to receive from the allocating parachain (the UMP message types), and messages which the Relay-chain is able to send to the allocating parachain (the DMP message types). These messages are expected to be able to be implemented in a well-known pallet and called with the XCM Transact instruction.

    Future work may include these messages being introduced into the XCM standard.

    UMP Message Types

    @@ -1733,17 +1974,17 @@ assert_eq!(targets.iter().map(|x| x.1).sum(), 57600);

    Realistic Limits of the Usage

    For request_revenue_info, a successful request should be possible if when is no less than the Relay-chain block number on arrival of the message less 100,000.

    For assign_core, a successful request should be possible if begin is no less than the Relay-chain block number on arrival of the message plus 10 and workload contains no more than 100 items.

    -

    Performance, Ergonomics and Compatibility

    +

    Performance, Ergonomics and Compatibility

    No specific considerations.

    Testing, Security and Privacy

    Standard Polkadot testing and security auditing applies.

    The proposal introduces no new privacy concerns.

    - +

    RFC-1 proposes a means of determining allocation of Coretime using this interface.

    RFC-3 proposes a means of implementing the high-level allocations within the Relay-chain.

    Drawbacks, Alternatives and Unknowns

    None at present.

    -

    Prior Art and References

    +

    Prior Art and References

    None.

    (source)

    Table of Contents

    @@ -1789,13 +2030,13 @@ assert_eq!(targets.iter().map(|x| x.1).sum(), 57600); AuthorsJoe Petrowski -

    Summary

    +

    Summary

    As core functionality moves from the Relay Chain into system chains, so increases the reliance on the liveness of these chains for the use of the network. It is not economically scalable, nor necessary from a game-theoretic perspective, to pay collators large rewards. This RFC proposes a mechanism -- part technical and part social -- for ensuring reliable collator sets that are resilient to attemps to stop any subsytem of the Polkadot protocol.

    -

    Motivation

    +

    Motivation

    In order to guarantee access to Polkadot's system, the collators on its system chains must propose blocks (provide liveness) and allow all transactions to eventually be included. That is, some collators may censor transactions, but there must exist one collator in the set who will include a @@ -1831,12 +2072,12 @@ to censor any subset of transactions.

  • Collators selected by governance SHOULD have a reasonable expectation that the Treasury will reimburse their operating costs.
  • -

    Stakeholders

    +

    Stakeholders

    -

    Explanation

    +

    Explanation

    This protocol builds on the existing Collator Selection pallet and its notion of Invulnerables. Invulnerables are collators (identified by their AccountIds) who @@ -1879,7 +2120,7 @@ costs for Invulnerable collators.

    The vast majority of cases can be covered by unit testing. Integration test should ensure that the Collator Selection UpdateOrigin, which has permission to modify the Invulnerables and desired number of Candidates, can handle updates over XCM from the system's governance location.

    -

    Performance, Ergonomics, and Compatibility

    +

    Performance, Ergonomics, and Compatibility

    This proposal has very little impact on most users of Polkadot, and should improve the performance of system chains by reducing the number of missed blocks.

    Performance

    @@ -1892,7 +2133,7 @@ to compete in a bond-based election rather than a race to claim a Candidate spot

    Compatibility

    This RFC is compatible with the existing implementation and can be handled via upgrades and migration.

    -

    Prior Art and References

    +

    Prior Art and References

    Written Discussions

    -

    Unresolved Questions

    +

    Unresolved Questions

    None at this time.

    - +

    There may exist in the future system chains for which this model of collator selection is not appropriate. These chains should be evaluated on a case-by-case basis.

    (source)

    @@ -1948,10 +2189,10 @@ appropriate. These chains should be evaluated on a case-by-case basis.

    AuthorsPierre Krieger -

    Summary

    +

    Summary

    The full nodes of the Polkadot peer-to-peer network maintain a distributed hash table (DHT), which is currently used for full nodes discovery and validators discovery purposes.

    This RFC proposes to extend this DHT to be used to discover full nodes of the parachains of Polkadot.

    -

    Motivation

    +

    Motivation

    The maintenance of bootnodes has long been an annoyance for everyone.

    When a bootnode is newly-deployed or removed, every chain specification must be updated in order to take the update into account. This has lead to various non-optimal solutions, such as pulling chain specifications from GitHub repositories. When it comes to RPC nodes, UX developers often have trouble finding up-to-date addresses of parachain RPC nodes. With the ongoing migration from RPC nodes to light clients, similar problems would happen with chain specifications as well.

    @@ -1960,9 +2201,9 @@ When it comes to RPC nodes, UX developers often have trouble finding up-to-date

    Because the list of bootnodes in chain specifications is so annoying to modify, the consequence is that the number of bootnodes is rather low (typically between 2 and 15). In order to better resist downtimes and DoS attacks, a better solution would be to use every node of a certain chain as potential bootnode, rather than special-casing some specific nodes.

    While this RFC doesn't solve these problems for relay chains, it aims at solving it for parachains by storing the list of all the full nodes of a parachain on the relay chain DHT.

    Assuming that this RFC is implemented, and that light clients are used, deploying a parachain wouldn't require more work than registering it onto the relay chain and starting the collators. There wouldn't be any need for special infrastructure nodes anymore.

    -

    Stakeholders

    +

    Stakeholders

    This RFC has been opened on my own initiative because I think that this is a good technical solution to a usability problem that many people are encountering and that they don't realize can be solved.

    -

    Explanation

    +

    Explanation

    The content of this RFC only applies for parachains and parachain nodes that are "Substrate-compatible". It is in no way mandatory for parachains to comply to this RFC.

    Note that "Substrate-compatible" is very loosely defined as "implements the same mechanisms and networking protocols as Substrate". The author of this RFC believes that "Substrate-compatible" should be very precisely specified, but there is controversy on this topic.

    While a lot of this RFC concerns the implementation of parachain nodes, it makes use of the resources of the Polkadot chain, and as such it is important to describe them in the Polkadot specification.

    @@ -2011,7 +2252,7 @@ Furthermore, when a large number of providers (here, a provider is a bootnode) a

    For this reason, an attacker can abuse this mechanism by randomly generating libp2p PeerIds until they find the 20 entries closest to the key representing the target parachain. They are then in control of the parachain bootnodes. Because the key changes periodically and isn't predictable, and assuming that the Polkadot DHT is sufficiently large, it is not realistic for an attack like this to be maintained in the long term.

    Furthermore, parachain clients are expected to cache a list of known good nodes on their disk. If the mechanism described in this RFC went down, it would only prevent new nodes from accessing the parachain, while clients that have connected before would not be affected.

    -

    Performance, Ergonomics, and Compatibility

    +

    Performance, Ergonomics, and Compatibility

    Performance

    The DHT mechanism generally has a low overhead, especially given that publishing providers is done only every 24 hours.

    Doing a Kademlia iterative query then sending a provider record shouldn't take more than around 50 kiB in total of bandwidth for the parachain bootnode.

    @@ -2022,11 +2263,11 @@ If this every becomes a problem, this value of 20 is an arbitrary constant that

    Irrelevant.

    Compatibility

    Irrelevant.

    -

    Prior Art and References

    +

    Prior Art and References

    None.

    -

    Unresolved Questions

    +

    Unresolved Questions

    While it fundamentally doesn't change much to this RFC, using BabeApi_currentEpoch and BabeApi_nextEpoch might be inappropriate. I'm not familiar enough with good practices within the runtime to have an opinion here. Should it be an entirely new pallet?

    - +

    It is possible that in the future a client could connect to a parachain without having to rely on a trusted parachain specification.

    (source)

    Table of Contents

    @@ -2059,9 +2300,9 @@ If this every becomes a problem, this value of 20 is an arbitrary constant that AuthorsPierre Krieger -

    Summary

    +

    Summary

    Improve the networking messages that query storage items from the remote, in order to reduce the bandwidth usage and number of round trips of light clients.

    -

    Motivation

    +

    Motivation

    Clients on the Polkadot peer-to-peer network can be divided into two categories: full nodes and light clients. So-called full nodes are nodes that store the content of the chain locally on their disk, while light clients are nodes that don't. In order to access for example the balance of an account, a full node can do a disk read, while a light client needs to send a network message to a full node and wait for the full node to reply with the desired value. This reply is in the form of a Merkle proof, which makes it possible for the light client to verify the exactness of the value.

    Unfortunately, this network protocol is suffering from some issues:

    Once Polkadot and Kusama will have transitioned to state_version = 1, which modifies the format of the trie entries, it will be possible to generate Merkle proofs that contain only the hashes of values in the storage. Thanks to this, it is already possible to prove the existence of a key without sending its entire value (only its hash), or to prove that a value has changed or not between two blocks (by sending just their hashes). Thus, the only reason why aforementioned issues exist is because the existing networking messages don't give the possibility for the querier to query this. This is what this proposal aims at fixing.

    -

    Stakeholders

    +

    Stakeholders

    This is the continuation of https://github.com/w3f/PPPs/pull/10, which itself is the continuation of https://github.com/w3f/PPPs/pull/5.

    -

    Explanation

    +

    Explanation

    The protobuf schema of the networking protocol can be found here: https://github.com/paritytech/substrate/blob/5b6519a7ff4a2d3cc424d78bc4830688f3b184c0/client/network/light/src/schema/light.v1.proto

    The proposal is to modify this protocol in this way:

    @@ -11,6 +11,7 @@ message Request {
    @@ -2139,18 +2380,18 @@ Also note that child tries aren't considered as descendants of the main trie whe
     

    The main security consideration concerns the size of replies and the resources necessary to generate them. It is for example easily possible to ask for all keys and values of the chain, which would take a very long time to generate. Since responses to this networking protocol have a maximum size, the replier should truncate proofs that would lead to the response being too large. Note that it is already possible to send a query that would lead to a very large reply with the existing network protocol. The only thing that this proposal changes is that it would make it less complicated to perform such an attack.

    Implementers of the replier side should be careful to detect early on when a reply would exceed the maximum reply size, rather than inconditionally generate a reply, as this could take a very large amount of CPU, disk I/O, and memory. Existing implementations might currently be accidentally protected from such an attack thanks to the fact that requests have a maximum size, and thus that the list of keys in the query was bounded. After this proposal, this accidental protection would no longer exist.

    Malicious server nodes might truncate Merkle proofs even when they don't strictly need to, and it is not possible for the client to (easily) detect this situation. However, malicious server nodes can already do undesirable things such as throttle down their upload bandwidth or simply not respond. There is no need to handle unnecessarily truncated Merkle proofs any differently than a server simply not answering the request.

    -

    Performance, Ergonomics, and Compatibility

    +

    Performance, Ergonomics, and Compatibility

    Performance

    It is unclear to the author of the RFC what the performance implications are. Servers are supposed to have limits to the amount of resources they use to respond to requests, and as such the worst that can happen is that light client requests become a bit slower than they currently are.

    Ergonomics

    Irrelevant.

    Compatibility

    The prior networking protocol is maintained for now. The older version of this protocol could get removed in a long time.

    -

    Prior Art and References

    +

    Prior Art and References

    None. This RFC is a clean-up of an existing mechanism.

    -

    Unresolved Questions

    +

    Unresolved Questions

    None

    - +

    The current networking protocol could be deprecated in a long time. Additionally, the current "state requests" protocol (used for warp syncing) could also be deprecated in favor of this one.

    (source)

    Table of Contents

    @@ -2171,13 +2412,13 @@ Also note that child tries aren't considered as descendants of the main trie whe AuthorsJonas Gehrlein -

    Summary

    +

    Summary

    The Polkadot UC will generate revenue from the sale of available Coretime. The question then arises: how should we handle these revenues? Broadly, there are two reasonable paths – burning the revenue and thereby removing it from total issuance or divert it to the Treasury. This Request for Comment (RFC) presents arguments favoring burning as the preferred mechanism for handling revenues from Coretime sales.

    -

    Motivation

    +

    Motivation

    How to handle the revenue accrued from Coretime sales is an important economic question that influences the value of DOT and should be properly discussed before deciding for either of the options. Now is the best time to start this discussion.

    -

    Stakeholders

    +

    Stakeholders

    Polkadot DOT token holders.

    -

    Explanation

    +

    Explanation

    This RFC discusses potential benefits of burning the revenue accrued from Coretime sales instead of diverting them to Treasury. Here are the following arguments for it.

    It's in the interest of the Polkadot community to have a consistent and predictable Treasury income, because volatility in the inflow can be damaging, especially in situations when it is insufficient. As such, this RFC operates under the presumption of a steady and sustainable Treasury income flow, which is crucial for the Polkadot community's stability. The assurance of a predictable Treasury income, as outlined in a prior discussion here, or through other equally effective measures, serves as a baseline assumption for this argument.

    Consequently, we need not concern ourselves with this particular issue here. This naturally begs the question - why should we introduce additional volatility to the Treasury by aligning it with the variable Coretime sales? It's worth noting that Coretime revenues often exhibit an inverse relationship with periods when Treasury spending should ideally be ramped up. During periods of low Coretime utilization (indicated by lower revenue), Treasury should spend more on projects and endeavours to increase the demand for Coretime. This pattern underscores that Coretime sales, by their very nature, are an inconsistent and unpredictable source of funding for the Treasury. Given the importance of maintaining a steady and predictable inflow, it's unnecessary to rely on another volatile mechanism. Some might argue that we could have both: a steady inflow (from inflation) and some added bonus from Coretime sales, but burning the revenue would offer further benefits as described below.

    @@ -2220,13 +2461,13 @@ Also note that child tries aren't considered as descendants of the main trie whe AuthorsJoe Petrowski -

    Summary

    +

    Summary

    Since the introduction of the Collectives parachain, many groups have expressed interest in forming new -- or migrating existing groups into -- on-chain collectives. While adding a new collective is relatively simple from a technical standpoint, the Fellowship will need to merge new pallets into the Collectives parachain for each new collective. This RFC proposes a means for the network to ratify a new collective, thus instructing the Fellowship to instate it in the runtime.

    -

    Motivation

    +

    Motivation

    Many groups have expressed interest in representing collectives on-chain. Some of these include:

    -

    Prior Art and References

    +

    Prior Art and References

    This RFC builds extensively on the available ideas put forward in RFC-1.

    Additionally, I want to express a special thanks to Samuel Haefner, Shahar Dobzinski, and Alistair Stewart for fruitful discussions and helping me structure my thoughts.

    (source)

    @@ -2747,19 +2988,19 @@ To mitigate this, we propose preventing the market from closing at the ope Authors@brenzi for Encointer Association, 8000 Zurich, Switzerland -

    Summary

    +

    Summary

    Encointer is a system chain on Kusama since Jan 2022 and has been developed and maintained by the Encointer association. This RFC proposes to treat Encointer like any other system chain and include it in the fellowship repo with this PR.

    -

    Motivation

    +

    Motivation

    Encointer does not seek to be in control of its runtime repository. As a decentralized system, the fellowship has a more suitable structure to maintain a system chain runtime repo than the Encointer association does.

    Also, Encointer aims to update its runtime in batches with other system chains in order to have consistency for interoperability across system chains.

    -

    Stakeholders

    +

    Stakeholders

    • Fellowship: Will continue to take upon them the review and auditing work for the Encointer runtime, but the process is streamlined with other system chains and therefore less time-consuming compared to the separate repo and CI process we currently have.
    • Kusama Network: Tokenholders can easily see the changes of all system chains in one place.
    • Encointer Association: Further decentralization of the Encointer Network necessities like devops.
    • Encointer devs: Being able to work directly in the Fellowship runtimes repo to streamline and synergize with other developers.
    -

    Explanation

    +

    Explanation

    Our PR has all details about our runtime and how we would move it into the fellowship repo.

    Noteworthy: All Encointer-specific pallets will still be located in encointer's repo for the time being: https://github.com/encointer/pallets

    It will still be the duty of the Encointer team to keep its runtime up to date and provide adequate test fixtures. Frequent dependency bumps with Polkadot releases would be beneficial for interoperability and could be streamlined with other system chains but that will not be a duty of fellowship. Whenever possible, all system chains could be upgraded jointly (including Encointer) with a batch referendum.

    @@ -2772,13 +3013,13 @@ To mitigate this, we propose preventing the market from closing at the ope

    Other than all other system chains, development and maintenance of the Encointer Network is mainly financed by the KSM Treasury and possibly the DOT Treasury in the future. Encointer is dedicated to maintaining its network and runtime code for as long as possible, but there is a dependency on funding which is not in the hands of the fellowship. The only risk in the context of funding, however, is that the Encointer runtime will see less frequent updates if there's less funding.

    Testing, Security, and Privacy

    No changes to the existing system are proposed. Only changes to how maintenance is organized.

    -

    Performance, Ergonomics, and Compatibility

    +

    Performance, Ergonomics, and Compatibility

    No changes

    -

    Prior Art and References

    +

    Prior Art and References

    Existing Encointer runtime repo

    -

    Unresolved Questions

    +

    Unresolved Questions

    None identified

    - +

    More info on Encointer: encointer.org

    (source)

    Table of Contents

    @@ -3698,11 +3939,11 @@ other privacy-enhancing mechanisms to address this concern. AuthorsJoe Petrowski, Gavin Wood -

    Summary

    +

    Summary

    The Relay Chain contains most of the core logic for the Polkadot network. While this was necessary prior to the launch of parachains and development of XCM, most of this logic can exist in parachains. This is a proposal to migrate several subsystems into system parachains.

    -

    Motivation

    +

    Motivation

    Polkadot's scaling approach allows many distinct state machines (known generally as parachains) to operate with common guarantees about the validity and security of their state transitions. Polkadot provides these common guarantees by executing the state transitions on a strict subset (a backing @@ -3714,13 +3955,13 @@ blockspace) to the network.

    By minimising state transition logic on the Relay Chain by migrating it into "system chains" -- a set of parachains that, with the Relay Chain, make up the Polkadot protocol -- the Polkadot Ubiquitous Computer can maximise its primary offering: secure blockspace.

    -

    Stakeholders

    +

    Stakeholders

    • Parachains that interact with affected logic on the Relay Chain;
    • Core protocol and XCM format developers;
    • Tooling, block explorer, and UI developers.
    -

    Explanation

    +

    Explanation

    The following pallets and subsystems are good candidates to migrate from the Relay Chain:

    • Identity
    • @@ -3872,7 +4113,7 @@ may require some optimizations to deal with constraints.

      Testing, Security, and Privacy

      Standard audit/review requirements apply. More powerful multi-chain integration test tools would be useful in developement.

      -

      Performance, Ergonomics, and Compatibility

      +

      Performance, Ergonomics, and Compatibility

      Describe the impact of the proposal on the exposed functionality of Polkadot.

      Performance

      This is an optimization. The removal of public/user transactions on the Relay Chain ensures that its @@ -3886,16 +4127,16 @@ runtimes to recognize the new locations in the network.

      Compatibility

      Implementing this proposal will require some changes to pallet APIs and/or a pub-sub protocol. Application developers will need to interact with multiple chains in the network.

      -

      Prior Art and References

      +

      Prior Art and References

      -

      Unresolved Questions

      +

      Unresolved Questions

      There remain some implementation questions, like how to use balances for both Staking and Governance. See, for example, Moving Staking off the Relay Chain.

      - +

      Ideally the Relay Chain becomes transactionless, such that not even balances are represented there. With Staking and Governance off the Relay Chain, this is not an unreasonable next step.

      With Identity on Polkadot, Kusama may opt to drop its People Chain.

      @@ -3930,13 +4171,13 @@ With Staking and Governance off the Relay Chain, this is not an unreasonable nex AuthorsVedhavyas Singareddi -

      Summary

      +

      Summary

      At the moment, we have system_version field on RuntimeVersion that derives which state version is used for the Storage. We have a use case where we want extrinsics root is derived using StateVersion::V1. Without defining a new field under RuntimeVersion, we would like to propose adding system_version that can be used to derive both storage and extrinsic state version.

      -

      Motivation

      +

      Motivation

      Since the extrinsic state version is always StateVersion::V0, deriving extrinsic root requires full extrinsic data. This would be problematic when we need to verify the extrinsics root if the extrinsic sizes are bigger. This problem is further explored in https://github.com/polkadot-fellows/RFCs/issues/19

      @@ -3948,11 +4189,11 @@ One of the main challenge here is some extrinsics could be big enough that this included in the Consensus block due to Block's weight restriction. If the extrinsic root is derived using StateVersion::V1, then we do not need to pass the full extrinsic data but rather at maximum, 32 byte of extrinsic data.

      -

      Stakeholders

      +

      Stakeholders

      • Technical Fellowship, in its role of maintaining system runtimes.
      -

      Explanation

      +

      Explanation

      In order to use project specific StateVersion for extrinsic roots, we proposed an implementation that introduced parameter to frame_system::Config but that unfortunately did not feel correct. @@ -3983,7 +4224,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { so that chains know which system_version to use.

      Testing, Security, and Privacy

      AFAIK, should not have any impact on the security or privacy.

      -

      Performance, Ergonomics, and Compatibility

      +

      Performance, Ergonomics, and Compatibility

      These changes should be compatible for existing chains if they use state_version value for system_verision.

      Performance

      I do not believe there is any performance hit with this change.

      @@ -3991,13 +4232,13 @@ so that chains know which system_version to use.

      This does not break any exposed Apis.

      Compatibility

      This change should not break any compatibility.

      -

      Prior Art and References

      +

      Prior Art and References

      We proposed introducing a similar change by introducing a parameter to frame_system::Config but did not feel that is the correct way of introducing this change.

      -

      Unresolved Questions

      +

      Unresolved Questions

      I do not have any specific questions about this change at the moment.

      - +

      IMO, this change is pretty self-contained and there won't be any future work necessary.

      (source)

      Table of Contents

      @@ -4026,9 +4267,9 @@ is the correct way of introducing this change.

      AuthorsSebastian Kunert -

      Summary

      +

      Summary

      This RFC proposes a new host function for parachains, storage_proof_size. It shall provide the size of the currently recorded storage proof to the runtime. Runtime authors can use the proof size to improve block utilization by retroactively reclaiming unused storage weight.

      -

      Motivation

      +

      Motivation

      The number of extrinsics that are included in a parachain block is limited by two constraints: execution time and proof size. FRAME weights cover both concepts, and block-builders use them to decide how many extrinsics to include in a block. However, these weights are calculated ahead of time by benchmarking on a machine with reference hardware. The execution-time properties of the state-trie and its storage items are unknown at benchmarking time. Therefore, we make some assumptions about the state-trie:

      • Trie Depth: We assume a trie depth to account for intermediary nodes.
      • @@ -4037,12 +4278,12 @@ is the correct way of introducing this change.

        These pessimistic assumptions lead to an overestimation of storage weight, negatively impacting block utilization on parachains.

        In addition, the current model does not account for multiple accesses to the same storage items. While these repetitive accesses will not increase storage-proof size, the runtime-side weight monitoring will account for them multiple times. Since the proof size is completely opaque to the runtime, we can not implement retroactive storage weight correction.

        A solution must provide a way for the runtime to track the exact storage-proof size consumed on a per-extrinsic basis.

        -

        Stakeholders

        +

        Stakeholders

        • Parachain Teams: They MUST include this host function in their runtime and node.
        • Light-client Implementors: They SHOULD include this host function in their runtime and node.
        -

        Explanation

        +

        Explanation

        This RFC proposes a new host function that exposes the storage-proof size to the runtime. As a result, runtimes can implement storage weight reclaiming mechanisms that improve block utilization.

        This RFC proposes the following host function signature:

        #![allow(unused)]
        @@ -4050,14 +4291,14 @@ is the correct way of introducing this change.

        fn ext_storage_proof_size_version_1() -> u64; }

        The host function MUST return an unsigned 64-bit integer value representing the current proof size. In block-execution and block-import contexts, this function MUST return the current size of the proof. To achieve this, parachain node implementors need to enable proof recording for block imports. In other contexts, this function MUST return 18446744073709551615 (u64::MAX), which represents disabled proof recording.

        -

        Performance, Ergonomics, and Compatibility

        +

        Performance, Ergonomics, and Compatibility

        Performance

        Parachain nodes need to enable proof recording during block import to correctly implement the proposed host function. Benchmarking conducted with balance transfers has shown a performance reduction of around 0.6% when proof recording is enabled.

        Ergonomics

        The host function proposed in this RFC allows parachain runtime developers to keep track of the proof size. Typical usage patterns would be to keep track of the overall proof size or the difference between subsequent calls to the host function.

        Compatibility

        Parachain teams will need to include this host function to upgrade.

        -

        Prior Art and References

        +

        Prior Art and References

        • Pull Request including proposed host function: PoV Reclaim (Clawback) Node Side.
        • Issue with discussion: [FRAME core] Clawback PoV Weights For Dispatchables
        • @@ -4111,12 +4352,12 @@ is the correct way of introducing this change.

          AuthorsAurora Poppyseed, Just_Luuuu, Viki Val, Joe Petrowski -

          Summary

          +

          Summary

          This RFC proposes changing the current deposit requirements on the Polkadot and Kusama Asset Hub for creating an NFT collection, minting an individual NFT, and lowering its corresponding metadata and attribute deposits. The objective is to lower the barrier to entry for NFT creators, fostering a more inclusive and vibrant ecosystem while maintaining network integrity and preventing spam.

          -

          Motivation

          +

          Motivation

          The current deposit of 10 DOT for collection creation (along with 0.01 DOT for item deposit and 0.2 DOT for metadata and attribute deposits) on the Polkadot Asset Hub and 0.1 KSM on Kusama Asset Hub presents a significant financial barrier for many NFT creators. By lowering the deposit @@ -4133,7 +4374,7 @@ low.

          • Deposits SHOULD be derived from deposit function, adjusted by correspoding pricing mechansim.
          -

          Stakeholders

          +

          Stakeholders

          • NFT Creators: Primary beneficiaries of the proposed change, particularly those who found the current deposit requirements prohibitive.
          • @@ -4147,7 +4388,7 @@ collections, enhancing the overall ecosystem.

            Previous discussions have been held within the Polkadot Forum, with artists expressing their concerns about the deposit amounts.

            -

            Explanation

            +

            Explanation

            This RFC proposes a revision of the deposit constants in the configuration of the NFTs pallet on the Polkadot Asset Hub. The new deposit amounts would be determined by a standard deposit formula.

            As of v1.1.1, the Collection Deposit is 10 DOT and the Item Deposit is 0.01 DOT (see @@ -4251,7 +4492,7 @@ Polkadot Asset Hub and 191 on Kusama Asset Hub with a relatively low volume.

            Security concerns

            As noted above, state bloat is a security concern. In the case of abuse, governance could adapt by increasing deposit rates and/or using forceDestroy on collections agreed to be spam.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            The primary performance consideration stems from the potential for state bloat due to increased activity from lower deposit requirements. It's vital to monitor and manage this to avoid any @@ -4262,7 +4503,7 @@ efficient data management and periodic reviews of storage requirements, will be Kusama and Polkadot Asset Hubs, making Polkadot and Kusama more accessible and user-friendly.

            Compatibility

            The change does not impact compatibility as a redeposit function is already implemented.

            -

            Unresolved Questions

            +

            Unresolved Questions

            If this RFC is accepted, there should not be any unresolved questions regarding how to adapt the implementation of deposits for NFT collections.

            Addendum

            @@ -4350,11 +4591,11 @@ Polkadot and Kusama networks.

            AuthorsAlin Dima -

            Summary

            +

            Summary

            Propose a way of permuting the availability chunk indices assigned to validators, in the context of recovering available data from systematic chunks, with the purpose of fairly distributing network bandwidth usage.

            -

            Motivation

            +

            Motivation

            Currently, the ValidatorIndex is always identical to the ChunkIndex. Since the validator array is only shuffled once per session, naively using the ValidatorIndex as the ChunkIndex would pose an unreasonable stress on the first N/3 validators during an entire session, when favouring availability recovery from systematic chunks.

            @@ -4362,9 +4603,9 @@ validators during an entire session, when favouring availability recovery from s systematic availability chunks to different validators, based on the relay chain block and core. The main purpose is to ensure fair distribution of network bandwidth usage for availability recovery in general and in particular for systematic chunk holders.

            -

            Stakeholders

            +

            Stakeholders

            Relay chain node core developers.

            -

            Explanation

            +

            Explanation

            Systematic erasure codes

            An erasure coding algorithm is considered systematic if it preserves the original unencoded data as part of the resulting code. @@ -4531,7 +4772,7 @@ mitigate this problem and will likely be needed in the future for CoreJam and/or

            Testing, Security, and Privacy

            Extensive testing will be conducted - both automated and manual. This proposal doesn't affect security or privacy.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            This is a necessary data availability optimisation, as reed-solomon erasure coding has proven to be a top consumer of CPU time in polkadot as we scale up the parachain block size and number of availability cores.

            @@ -4544,12 +4785,12 @@ halved and total POV recovery time decrease by 80% for large POVs. See more

            This is a breaking change. See upgrade path section above. All validators and collators need to have upgraded their node versions before the feature will be enabled via a governance call.

            -

            Prior Art and References

            +

            Prior Art and References

            See comments on the tracking issue and the in-progress PR

            -

            Unresolved Questions

            +

            Unresolved Questions

            Not applicable.

            - +

            This enables future optimisations for the performance of availability recovery, such as retrieving batched systematic chunks from backers/approval-checkers.

            Appendix A

            @@ -4624,7 +4865,7 @@ dispute scenarios.

            AuthorsBastian Köcher -

            Summary

            +

            Summary

            This RFC proposes to changes the SessionKeys::generate_session_keys runtime api interface. This runtime api is used by validator operators to generate new session keys on a node. The public session keys are then registered manually on chain by the validator operator. Before this RFC it was not possible by the on chain logic to ensure that the account setting the public session keys is also in @@ -4632,7 +4873,7 @@ possession of the private session keys. To solve this the RFC proposes to pass t registration on chain to generate_session_keys. Further this RFC proposes to change the return value of the generate_session_keys function also to not only return the public session keys, but also the proof of ownership for the private session keys. The validator operator will then need to send the public session keys and the proof together when registering new session keys on chain.

            -

            Motivation

            +

            Motivation

            When submitting the new public session keys to the on chain logic there doesn't exist any verification of possession of the private session keys. This means that users can basically register any kind of public session keys on chain. While the on chain logic ensures that there are no duplicate keys, someone could try to prevent others from registering new session keys by setting them first. While this wouldn't bring @@ -4640,13 +4881,13 @@ the "attacker" any kind of advantage, more like disadvantages (potenti e.g. changing its session key in the event of a private session key leak.

            After this RFC this kind of attack would not be possible anymore, because the on chain logic can verify that the sending account is in ownership of the private session keys.

            -

            Stakeholders

            +

            Stakeholders

            • Polkadot runtime implementors
            • Polkadot node implementors
            • Validator operators
            -

            Explanation

            +

            Explanation

            We are first going to explain the proof format being used:

            #![allow(unused)]
             fn main() {
            @@ -4686,7 +4927,7 @@ This will require updating some high level docs and making users familiar with t
             

            Testing, Security, and Privacy

            Testing of the new changes only requires passing an appropriate owner for the current testing context. The changes to the proof generation and verification got audited to ensure they are correct.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            The session key generation is an offchain process and thus, doesn't influence the performance of the chain. Verifying the proof is done on chain as part of the transaction logic for setting the session keys. @@ -4700,11 +4941,11 @@ a runtime is enacted that contains these changes otherwise they will fail to gen The RPC that exists around this runtime api needs to be updated to support passing the account id and for returning the ownership proof alongside the public session keys.

            UIs would need to be updated to support the new RPC and the changed on chain logic.

            -

            Prior Art and References

            +

            Prior Art and References

            None.

            -

            Unresolved Questions

            +

            Unresolved Questions

            None.

            - +

            Substrate implementation of the RFC.

            (source)

            Table of Contents

            @@ -4742,10 +4983,10 @@ and for returning the ownership proof alongside the public session keys.

            AuthorsJoe Petrowski, Gavin Wood -

            Summary

            +

            Summary

            The Fellowship Manifesto states that members should receive a monthly allowance on par with gross income in OECD countries. This RFC proposes concrete amounts.

            -

            Motivation

            +

            Motivation

            One motivation for the Technical Fellowship is to provide an incentive mechanism that can induct and retain technical talent for the continued progress of the network.

            In order for members to uphold their commitment to the network, they should receive support to @@ -4755,12 +4996,12 @@ on par with a full-time job. Providing a livable wage to those making such contr pragmatic to work full-time on Polkadot.

            Note: Goals of the Fellowship, expectations for each Dan, and conditions for promotion and demotion are all explained in the Manifesto. This RFC is only to propose concrete values for allowances.

            -

            Stakeholders

            +

            Stakeholders

            • Fellowship members
            • Polkadot Treasury
            -

            Explanation

            +

            Explanation

            This RFC proposes agreeing on salaries relative to a single level, the III Dan. As such, changes to the amount or asset used would only be on a single value, and all others would adjust relatively. A III Dan is someone whose contributions match the expectations of a full-time individual contributor. @@ -4825,14 +5066,14 @@ RFC.

            to acquire them. However, the asset of choice can be changed in the future.

            Testing, Security, and Privacy

            N/A.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            N/A

            Ergonomics

            N/A

            Compatibility

            N/A

            -

            Prior Art and References

            +

            Prior Art and References

            -

            Unresolved Questions

            +

            Unresolved Questions

            None at present.

            (source)

            Table of Contents

            @@ -4873,11 +5114,11 @@ States AuthorsPierre Krieger -

            Summary

            +

            Summary

            When two peers connect to each other, they open (amongst other things) a so-called "notifications protocol" substream dedicated to gossiping transactions to each other.

            Each notification on this substream currently consists in a SCALE-encoded Vec<Transaction> where Transaction is defined in the runtime.

            This RFC proposes to modify the format of the notification to become (Compact(1), Transaction). This maintains backwards compatibility, as this new format decodes as a Vec of length equal to 1.

            -

            Motivation

            +

            Motivation

            There exists three motivations behind this change:

            • @@ -4890,9 +5131,9 @@ States
            • It makes the implementation way more straight-forward by not having to repeat code related to back-pressure. See explanations below.

            -

            Stakeholders

            +

            Stakeholders

            Low-level developers.

            -

            Explanation

            +

            Explanation

            To give an example, if you send one notification with three transactions, the bytes that are sent on the wire are:

            concat(
                 leb128(total-size-in-bytes-of-the-rest),
            @@ -4917,18 +5158,18 @@ This is equivalent to forcing the Vec<Transaction> to always
             

            An alternative could be to introduce a new version of the transactions notifications protocol that sends one Transaction per notification, but this is significantly more complicated to implement and can always be done later in case the Compact(1) is bothersome.

            Testing, Security, and Privacy

            Irrelevant.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            Irrelevant.

            Ergonomics

            Irrelevant.

            Compatibility

            The change is backwards compatible if done in two steps: modify the sender to always send one transaction per notification, then, after a while, modify the receiver to enforce the new format.

            -

            Prior Art and References

            +

            Prior Art and References

            Irrelevant.

            -

            Unresolved Questions

            +

            Unresolved Questions

            None.

            - +

            None. This is a simple isolated change.

            (source)

            Table of Contents

            @@ -4968,20 +5209,20 @@ This is equivalent to forcing the Vec<Transaction> to always AuthorsPierre Krieger -

            Summary

            +

            Summary

            This RFC proposes to make the mechanism of RFC #8 more generic by introducing the concept of "capabilities".

            Implementations can implement certain "capabilities", such as serving old block headers or being a parachain bootnode.

            The discovery mechanism of RFC #8 is extended to be able to discover nodes of specific capabilities.

            -

            Motivation

            +

            Motivation

            The Polkadot peer-to-peer network is made of nodes. Not all these nodes are equal. Some nodes store only the headers of recent blocks, some nodes store all the block headers and bodies since the genesis, some nodes store the storage of all blocks since the genesis, and so on.

            It is currently not possible to know ahead of time (without connecting to it and asking) which nodes have which data available, and it is not easily possible to build a list of nodes that have a specific piece of data available.

            If you want to download for example the header of block 500, you have to connect to a randomly-chosen node, ask it for block 500, and if it says that it doesn't have the block, disconnect and try another randomly-chosen node. In certain situations such as downloading the storage of old blocks, nodes that have the information are relatively rare, and finding through trial and error a node that has the data can take a long time.

            This RFC attempts to solve this problem by giving the possibility to build a list of nodes that are capable of serving specific data.

            -

            Stakeholders

            +

            Stakeholders

            Low-level client developers. People interested in accessing the archive of the chain.

            -

            Explanation

            +

            Explanation

            Reading RFC #8 first might help with comprehension, as this RFC is very similar.

            Please keep in mind while reading that everything below applies for both relay chains and parachains, except mentioned otherwise.

            Capabilities

            @@ -5026,7 +5267,7 @@ If blocks pruning is enabled and the chain is a relay chain, then Substrate unfo Furthermore, when a large number of providers are registered, only the providers closest to the key are kept, up to a certain implementation-defined limit.

            For this reason, an attacker can abuse this mechanism by randomly generating libp2p PeerIds until they find the 20 entries closest to the key representing the target capability. They are then in control of the list of nodes with that capability. While doing this can in no way be actually harmful, it could lead to eclipse attacks.

            Because the key changes periodically and isn't predictable, and assuming that the Polkadot DHT is sufficiently large, it is not realistic for an attack like this to be maintained in the long term.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            The DHT mechanism generally has a low overhead, especially given that publishing providers is done only every 24 hours.

            Doing a Kademlia iterative query then sending a provider record shouldn't take more than around 50 kiB in total of bandwidth for the parachain bootnode.

            @@ -5036,11 +5277,11 @@ Furthermore, when a large number of providers are registered, only the providers

            Irrelevant.

            Compatibility

            Irrelevant.

            -

            Prior Art and References

            +

            Prior Art and References

            Unknown.

            -

            Unresolved Questions

            +

            Unresolved Questions

            While it fundamentally doesn't change much to this RFC, using BabeApi_currentEpoch and BabeApi_nextEpoch might be inappropriate. I'm not familiar enough with good practices within the runtime to have an opinion here. Should it be an entirely new pallet?

            - +

            This RFC would make it possible to reliably discover archive nodes, which would make it possible to reliably send archive node requests, something that isn't currently possible. This could solve the problem of finding archive RPC node providers by migrating archive-related request to using the native peer-to-peer protocol rather than JSON-RPC.

            If we ever decide to break backwards compatibility, we could divide the "history" and "archive" capabilities in two, between nodes capable of serving older blocks and nodes capable of serving newer blocks. We could even add to the peer-to-peer network nodes that are only capable of serving older blocks (by reading from a database) but do not participate in the head of the chain, and that just exist for historical purposes.

            @@ -5089,12 +5330,12 @@ We could even add to the peer-to-peer network nodes that are only capable of ser AuthorsZondax AG, Parity Technologies -

            Summary

            +

            Summary

            To interact with chains in the Polkadot ecosystem it is required to know how transactions are encoded and how to read state. For doing this, Polkadot-SDK, the framework used by most of the chains in the Polkadot ecosystem, exposes metadata about the runtime to the outside. UIs, wallets, and others can use this metadata to interact with these chains. This makes the metadata a crucial piece of the transaction encoding as users are relying on the interacting software to encode the transactions in the correct format.

            It gets even more important when the user signs the transaction in an offline wallet, as the device by its nature cannot get access to the metadata without relying on the online wallet to provide it. This makes it so that the offline wallet needs to trust an online party, deeming the security assumptions of the offline devices, mute.

            This RFC proposes a way for offline wallets to leverage metadata, within the constraints of these. The design idea is that the metadata is chunked and these chunks are put into a merkle tree. The root hash of this merkle tree represents the metadata. The offline wallets can use the root hash to decode transactions by getting proofs for the individual chunks of the metadata. This root hash is also included in the signed data of the transaction (but not sent as part of the transaction). The runtime is then including its known metadata root hash when verifying the transaction. If the metadata root hash known by the runtime differs from the one that the offline wallet used, it very likely means that the online wallet provided some fake data and the verification of the transaction fails.

            Users depend on offline wallets to correctly display decoded transactions before signing. With merkleized metadata, they can be assured of the transaction's legitimacy, as incorrect transactions will be rejected by the runtime.

            -

            Motivation

            +

            Motivation

            Polkadot's innovative design (both relay chain and parachains) present the ability to developers to upgrade their network as frequently as they need. These systems manage to have integrations working after the upgrades with the help of FRAME Metadata. This Metadata, which is in the order of half a MiB for most Polkadot-SDK chains, completely describes chain interfaces and properties. Securing this metadata is key for users to be able to interact with the Polkadot-SDK chain in the expected way.

            On the other hand, offline wallets provide a secure way for Blockchain users to hold their own keys (some do a better job than others). These devices seldomly get upgraded, usually account for one particular network and hold very small internal memories. Currently in the Polkadot ecosystem there is no secure way of having these offline devices know the latest Metadata of the Polkadot-SDK chain they are interacting with. This results in a plethora of similar yet slightly different offline wallets for all different Polkadot-SDK chains, as well as the impediment of keeping these regularly updated, thus not fully leveraging Polkadot-SDK’s unique forkless upgrade feature.

            The two main reasons why this is not possible today are:

            @@ -5121,14 +5362,14 @@ We could even add to the peer-to-peer network nodes that are only capable of ser
          • Chunks handling mechanism SHOULD support chunks being sent in any order without memory utilization overhead;
          • Unused enum variants MUST be stripped (this has great impact on transmitted metadata size; examples: era enum, enum with all calls for call batching).
          • -

            Stakeholders

            +

            Stakeholders

            • Runtime implementors
            • UI/wallet implementors
            • Offline wallet implementors

            The idea for this RFC was brought up by runtime implementors and was extensively discussed with offline wallet implementors. It was designed in such a way that it can work easily with the existing offline wallet solutions in the Polkadot ecosystem.

            -

            Explanation

            +

            Explanation

            The FRAME metadata provides a wide range of information about a FRAME based runtime. It contains information about the pallets, the calls per pallet, the storage entries per pallet, runtime APIs, and type information about most of the types that are used in the runtime. For decoding extrinsics on an offline wallet, what is mainly required is type information. Most of the other information in the FRAME metadata is actually not required for decoding extrinsics and thus it can be removed. Therefore, the following is a proposal on a custom representation of the metadata and how this custom metadata is chunked, ensuring that only the needed chunks required for decoding a particular extrinsic are sent to the offline wallet. The necessary information to transform the FRAME metadata type information into the type information presented in this RFC will be provided. However, not every single detail on how to convert from FRAME metadata into the RFC type information is described.

            First, the MetadataDigest is introduced. After that, ExtrinsicMetadata is covered and finally the actual format of the type information. Then pruning of unrelated type information is covered and how to generate the TypeRefs. In the latest step, merkle tree calculation is explained.

            Metadata digest

            @@ -5405,17 +5646,17 @@ nodes: [[[2, 3], [4, 5]], [0, 1]]

            All implementations are required to strictly follow the RFC to generate the metadata hash. This includes which hash function to use and how to construct the metadata types tree. So, all implementations are following the same security criteria. As the chains will calculate the metadata hash at compile time, the build process needs to be trusted. However, this is already a solved problem in the Polkadot ecosystem by using reproducible builds. So, anyone can rebuild a chain runtime to ensure that a proposal is actually containing the changes as advertised.

            Implementations can also be tested easily against each other by taking some metadata and ensuring that they all come to the same metadata hash.

            Privacy of users should also not be impacted. This assumes that wallets will generate the metadata hash locally and don't leak any information to third party services about which chunks a user will send to their offline wallet. Besides that, there is no leak of private information as getting the raw metadata from the chain is an operation that is done by almost everyone.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            There should be no measurable impact on performance to Polkadot or any other chain using this feature. The metadata root hash is calculated at compile time and at runtime it is optionally used when checking the signature of a transaction. This means that at runtime no performance heavy operations are done.

            Ergonomics & Compatibility

            The proposal alters the way a transaction is built, signed, and verified. So, this imposes some required changes to any kind of developer who wants to construct transactions for Polkadot or any chain using this feature. As the developer can pass 0 for disabling the verification of the metadata root hash, it can be easily ignored.

            -

            Prior Art and References

            +

            Prior Art and References

            RFC 46 produced by the Alzymologist team is a previous work reference that goes in this direction as well.

            On other ecosystems, there are other solutions to the problem of trusted signing. Cosmos for example has a standardized way of transforming a transaction into some textual representation and this textual representation is included in the signed data. Basically achieving the same as what the RFC proposes, but it requires that for every transaction applied in a block, every node in the network always has to generate this textual representation to ensure the transaction signature is valid.

            -

            Unresolved Questions

            +

            Unresolved Questions

            None.

            - +
            • Does it work with all kind of offline wallets?
            • Generic types currently appear multiple times in the metadata with each instantiation. It could be may be useful to have generic type only once in the metadata and declare the generic parameters at their instantiation.
            • @@ -5453,20 +5694,20 @@ nodes: [[[2, 3], [4, 5]], [0, 1]] AuthorsGeorge Pisaltu -

              Summary

              +

              Summary

              This RFC proposes a change to the extrinsic format to incorporate a new transaction type, the "general" transaction.

              -

              Motivation

              +

              Motivation

              "General" transactions, a new type of transaction that this RFC aims to support, are transactions which obey the runtime's extensions and have according extension data yet do not have hard-coded signatures. They are first described in Extrinsic Horizon and supported in 3685. They enable users to authorize origins in new, more flexible ways (e.g. ZK proofs, mutations over pre-authenticated origins). As of now, all transactions are limited to the account signing model for origin authorization and any additional origin changes happen in extrinsic logic, which cannot leverage the validation process of extensions.

              An example of a use case for such an extension would be sponsoring the transaction fee for some other user. A new extension would be put in place to verify that a part of the initial payload was signed by the author under who the extrinsic should run and change the origin, but the payment for the whole transaction should be handled under a sponsor's account. A POC for this can be found in 3712.

              The new "general" transaction type would coexist with both current transaction types for a while and, therefore, the current number of supported transaction types, capped at 2, is insufficient. A new extrinsic type must be introduced alongside the current signed and unsigned types. Currently, an encoded extrinsic's first byte indicate the type of extrinsic using the most significant bit - 0 for unsigned, 1 for signed - and the 7 following bits indicate the extrinsic format version, which has been equal to 4 for a long time.

              By taking one bit from the extrinsic format version encoding, we can support 2 additional extrinsic types while also having a minimal impact on our capability to extend and change the extrinsic format in the future.

              -

              Stakeholders

              +

              Stakeholders

              • Runtime users
              • Runtime devs
              • Wallet devs
              -

              Explanation

              +

              Explanation

              An extrinsic is currently encoded as one byte to identify the extrinsic type and version. This RFC aims to change the interpretation of this byte regarding the reserved bits for the extrinsic type and version. In the following explanation, bits represented using T make up the extrinsic type and bits represented using V make up the extrinsic version.

              Currently, the bit allocation within the leading encoded byte is 0bTVVV_VVVV. In practice in the Polkadot ecosystem, the leading byte would be 0bT000_0100 as the version has been equal to 4 for a long time.

              This RFC proposes for the bit allocation to change to 0bTTVV_VVVV. As a result, the extrinsic format version will be bumped to 5 and the extrinsic type bit representation would change as follows:

              @@ -5481,7 +5722,7 @@ nodes: [[[2, 3], [4, 5]], [0, 1]]

              This change would reduce the maximum possible transaction version from the current 127 to 63. In order to bypass the new, lower limit, the extrinsic format would have to change again.

              Testing, Security, and Privacy

              There is no impact on testing, security or privacy.

              -

              Performance, Ergonomics, and Compatibility

              +

              Performance, Ergonomics, and Compatibility

              This change would allow Polkadot to support new types of transactions, with the specific "general" transaction type in mind at the time of writing this proposal.

              Performance

              There is no performance impact.

              @@ -5489,11 +5730,11 @@ nodes: [[[2, 3], [4, 5]], [0, 1]]

              The impact to developers and end-users is minimal as it would just be a bitmask update on their part for parsing the extrinsic type along with the version.

              Compatibility

              This change breaks backwards compatiblity because any transaction that is neither signed nor unsigned, but a new transaction type, would be interpreted as having a future extrinsic format version.

              -

              Prior Art and References

              +

              Prior Art and References

              The original design was originally proposed in the TransactionExtension PR, which is also the motivation behind this effort.

              -

              Unresolved Questions

              +

              Unresolved Questions

              None.

              - +

              Following this change, the "general" transaction type will be introduced as part of the Extrinsic Horizon effort, which will shape future work.

              (source)

              Table of Contents

              @@ -5526,16 +5767,16 @@ nodes: [[[2, 3], [4, 5]], [0, 1]] AuthorsAlex Gheorghe (alexggh) -

              Summary

              +

              Summary

              Extend the DHT authority discovery records with a signed creation time, so that nodes can determine which record is newer and always decide to prefer the newer records to the old ones.

              -

              Motivation

              +

              Motivation

              Currently, we use the Kademlia DHT for storing records regarding the p2p address of an authority discovery key, the problem is that if the nodes decide to change its PeerId/Network key it will publish a new record, however because of the distributed and replicated nature of the DHT there is no way to tell which record is newer so both old PeerId and the new PeerId will live in the network until the old one expires(36h), that creates all sort of problem and leads to the node changing its address not being properly connected for up to 36h.

              After this RFC, nodes are extended to decide to keep the new record and propagate the new record to nodes that have the old record stored, so in the end all the nodes will converge faster to the new record(in the order of minutes, not 36h)

              Implementation of the rfc: https://github.com/paritytech/polkadot-sdk/pull/3786.

              Current issue without this enhacement: https://github.com/paritytech/polkadot-sdk/issues/3673

              -

              Stakeholders

              +

              Stakeholders

              Polkadot node developers.

              -

              Explanation

              +

              Explanation

              This RFC heavily relies on the functionalities of the Kademlia DHT already in use by Polkadot. You can find a link to the specification here.

              In a nutshell, on a specific node the current authority-discovery protocol publishes Kademila DHT records at startup and periodically. The records contain the full address of the node for each authorithy key it owns. The node tries also to find the full address of all authorities in the network by querying the DHT and picking up the first record it finds for each of the authority id it found on chain.

              @@ -5573,7 +5814,7 @@ in order to speed up the time until all nodes have the newest record, nodes can

              Testing, Security, and Privacy

              This RFC's implementation https://github.com/paritytech/polkadot-sdk/pull/3786 had been tested on various local test networks and versi.

              With regard to security the creation time is wrapped inside SignedAuthorityRecord wo it will be signed with the authority id key, so there is no way for other malicious nodes to manipulate this field without the received node observing.

              -

              Performance, Ergonomics, and Compatibility

              +

              Performance, Ergonomics, and Compatibility

              Irrelevant.

              Performance

              Irrelevant.

              @@ -5581,11 +5822,11 @@ in order to speed up the time until all nodes have the newest record, nodes can

              Irrelevant.

              Compatibility

              The changes are backwards compatible with the existing protocol, so nodes with both the old protocol and newer protocol can exist in the network, this is achieved by the fact that we use protobuf for serializing and deserializing the records, so new fields will be ignore when deserializing with the older protocol and vice-versa when deserializing an old record with the new protocol the new field will be None and the new code accepts this record as being valid.

              -

              Prior Art and References

              +

              Prior Art and References

              The enhancements have been inspired by the algorithm specified in here

              -

              Unresolved Questions

              +

              Unresolved Questions

              N/A

              - +

              N/A

              (source)

              Table of Contents

              @@ -5631,23 +5872,23 @@ in order to speed up the time until all nodes have the newest record, nodes can AuthorsJonas Gehrlein & Alistair Stewart -

              Summary

              +

              Summary

              This RFC proposes a flexible unbonding mechanism for tokens that are locked from staking on the Relay Chain (DOT/KSM), aiming to enhance user convenience without compromising system security.

              Locking tokens for staking ensures that Polkadot is able to slash tokens backing misbehaving validators. With changing the locking period, we still need to make sure that Polkadot can slash enough tokens to deter misbehaviour. This means that not all tokens can be unbonded immediately, however we can still allow some tokens to be unbonded quickly.

              The new mechanism leads to a signficantly reduced unbonding time on average, by queuing up new unbonding requests and scaling their unbonding duration relative to the size of the queue. New requests are executed with a minimum of 2 days, when the queue is comparatively empty, to the conventional 28 days, if the sum of requests (in terms of stake) exceed some threshold. In scenarios between these two bounds, the unbonding duration scales proportionately. The new mechanism will never be worse than the current fixed 28 days.

              In this document we also present an empirical analysis by retrospectively fitting the proposed mechanism to the historic unbonding timeline and show that the average unbonding duration would drastically reduce, while still being sensitive to large unbonding events. Additionally, we discuss implications for UI, UX, and conviction voting.

              Note: Our proposition solely focuses on the locks imposed from staking. Other locks, such as governance, remain unchanged. Also, this mechanism should not be confused with the already existing feature of FastUnstake, which lets users unstake tokens immediately that have not received rewards for 28 days or longer.

              As an initial step to gauge its effectiveness and stability, it is recommended to implement and test this model on Kusama before considering its integration into Polkadot, with appropriate adjustments to the parameters. In the following, however, we limit our discussion to Polkadot.

              -

              Motivation

              +

              Motivation

              Polkadot has one of the longest unbonding periods among all Proof-of-Stake protocols, because security is the most important goal. Staking on Polkadot is still attractive compared to other protocols because of its above-average staking APY. However the long unbonding period harms usability and deters potential participants that want to contribute to the security of the network.

              The current length of the unbonding period imposes significant costs for any entity that even wants to perform basic tasks such as a reorganization / consolidation of their stashes, or updating their private key infrastructure. It also limits participation of users that have a large preference for liquidity.

              The combination of long unbonding periods and high returns has lead to the proliferation of liquid staking, where parachains or centralised exchanges offer users their staked tokens before the 28 days unbonding period is over either in original DOT/KSM form or derivative tokens. Liquid staking is harmless if few tokens are involved but it could result in many validators being selected by a few entities if a large fraction of DOTs were involved. This may lead to centralization (see here for more discussion on threats of liquid staking) and an opportunity for attacks.

              The new mechanism greatly increases the competitiveness of Polkadot, while maintaining sufficient security.

              -

              Stakeholders

              +

              Stakeholders

              • Every DOT/KSM token holder
              -

              Explanation

              +

              Explanation

              Before diving into the details of how to implement the unbonding queue, we give readers context about why Polkadot has a 28-day unbonding period in the first place. The reason for it is to prevent long-range attacks (LRA) that becomes theoretically possible if more than 1/3 of validators collude. In essence, a LRA describes the inability of users, who disconnect from the consensus at time t0 and reconnects later, to realize that validators which were legitimate at a certain time, say t0 but dropped out in the meantime, are not to be trusted anymore. That means, for example, a user syncing the state could be fooled by trusting validators that fell outside the active set of validators after t0, and are building a competitive and malicious chain (fork).

              LRAs of longer than 28 days are mitigated by the use of trusted checkpoints, which are assumed to be no more than 28 days old. A new node that syncs Polkadot will start at the checkpoint and look for proofs of finality of later blocks, signed by 2/3 of the validators. In an LRA fork, some of the validator sets may be different but only if 2/3 of some validator set in the last 28 days signed something incorrect.

              If we detect an LRA of no more than 28 days with the current unbonding period, then we should be able to detect misbehaviour from over 1/3 of validators whose nominators are still bonded. The stake backing these validators is considerable fraction of the total stake (empirically it is 0.287 or so). If we allowed more than this stake to unbond, without checking who it was backing, then the LRA attack might be free of cost for an attacker. The proposed mechansim allows up to half this stake to unbond within 28 days. This halves the amount of tokens that can be slashed, but this is still very high in absolute terms. For example, at the time of writing (19.06.2024) this would translate to around 120 millions DOTs.

              @@ -5713,7 +5954,7 @@ The analysis can be reproduced or changed to other parameters using Testing, Security, and Privacy

              NA

              -

              Performance, Ergonomics, and Compatibility

              +

              Performance, Ergonomics, and Compatibility

              NA

              Performance

              The authors cannot see any potential impact on performance.

              @@ -5721,7 +5962,7 @@ The analysis can be reproduced or changed to other parameters using Compatibility

              The authors cannot see any potential impact on compatibility. This should be assessed by the technical fellows.

              -

              Prior Art and References

              +

              Prior Art and References

              • Ethereum proposed a similar solution
              • Alistair did some initial write-up
              • @@ -5758,20 +5999,20 @@ The analysis can be reproduced or changed to other parameters using Summary +

                Summary

                This RFC proposes a change to the extrinsic format to include a transaction extension version.

                -

                Motivation

                +

                Motivation

                The extrinsic format supports to be extended with transaction extensions. These transaction extensions are runtime specific and can be different per chain. Each transaction extension can add data to the extrinsic itself or extend the signed payload. This means that adding a transaction extension is breaking the chain specific extrinsic format. A recent example was the introduction of the CheckMetadatHash to Polkadot and all its system chains. As the extension was adding one byte to the extrinsic, it broke a lot of tooling. By introducing an extra version for the transaction extensions it will be possible to introduce changes to these transaction extensions while still being backwards compatible. Based on the version of the transaction extensions, each chain runtime could decode the extrinsic correctly and also create the correct signed payload.

                -

                Stakeholders

                +

                Stakeholders

                • Runtime users
                • Runtime devs
                • Wallet devs
                -

                Explanation

                +

                Explanation

                RFC84 introduced the extrinsic format 5. The idea is to piggyback onto this change of the extrinsic format to add the extra version for the transaction extensions. If required, this could also come as extrinsic format 6, but 5 is not yet deployed anywhere.

                The extrinsic format supports the following types of transactions:

                @@ -5791,7 +6032,7 @@ as extrinsic format 6, but 5 is not yet deployed anywh

                This adds one byte more to each signed transaction.

                Testing, Security, and Privacy

                There is no impact on testing, security or privacy.

                -

                Performance, Ergonomics, and Compatibility

                +

                Performance, Ergonomics, and Compatibility

                This will ensure that changes to the transactions extensions can be done in a backwards compatible way.

                Performance

                There is no performance impact.

                @@ -5801,11 +6042,11 @@ to decode these old versions, but this should be neglectable.

                Compatibility

                When introduced together with extrinsic format version 5 from RFC84, it can be implemented in a backwards compatible way. So, transactions can still be send using the old extrinsic format and decoded by the runtime.

                -

                Prior Art and References

                +

                Prior Art and References

                None.

                -

                Unresolved Questions

                +

                Unresolved Questions

                None.

                - +

                None.

                (source)

                Table of Contents

                @@ -5842,14 +6083,14 @@ old extrinsic format and decoded by the runtime.

                AuthorsAdrian Catangiu -

                Summary

                +

                Summary

                This RFC proposes a new instruction that provides a way to initiate on remote chains, asset transfers which transfer multiple types (teleports, local-reserve, destination-reserve) of assets, using XCM alone.

                The currently existing instructions are too opinionated and force each XCM asset transfer to a single transfer type (teleport, local-reserve, destination-reserve). This results in inability to combine different types of transfers in single transfer which results in overall poor UX when trying to move assets across chains.

                -

                Motivation

                +

                Motivation

                XCM is the de-facto cross-chain messaging protocol within the Polkadot ecosystem, and cross-chain assets transfers is one of its main use-cases. Unfortunately, in its current spec, it does not support initiating on a remote chain, one or more transfers that combine assets with different transfer types.
                @@ -5871,14 +6112,14 @@ For example, allows single XCM program execution to transfer multiple assets fro Kusama Asset Hub, over the bridge through Polkadot Asset Hub with final destination ParaP on Polkadot.

                With current XCM, we are limited to doing multiple independent transfers for each individual hop in order to move both "interesting" assets, but also "supporting" assets (used to pay fees).

                -

                Stakeholders

                +

                Stakeholders

                • Runtime users
                • Runtime devs
                • Wallet devs
                • dApps devs
                -

                Explanation

                +

                Explanation

                A new instruction InitiateAssetsTransfer is introduced that initiates an assets transfer from the chain it is executed on, to another chain. The executed transfer is point-to-point (chain-to-chain) with all of the transfer properties specified in the instruction parameters. The instruction also @@ -6077,7 +6318,7 @@ which minimizes the potential free/unpaid work that a receiving chain has to do. required execution fee payment, part of the instruction logic through the remote_fees: Option<AssetTransferFilter> parameter, which will make sure the remote XCM starts with a single-asset-holding-loading-instruction, immediately followed by a BuyExecution using said asset.

                -

                Performance, Ergonomics, and Compatibility

                +

                Performance, Ergonomics, and Compatibility

                This brings no impact to the rest of the XCM spec. It is a new, independent instruction, no changes to existing instructions.

                Enhances the exposed functionality of Polkadot. Will allow multi-chain transfers that are currently forced to happen in multiple programs per asset per "hop", to be possible in a single XCM program.

                @@ -6095,11 +6336,11 @@ success. A program where the new instruction is used to initiate multiple types of asset transfers, cannot be downgraded to older XCM versions, because there is no equivalent capability there. Such conversion attempts will explicitly fail.

                -

                Prior Art and References

                +

                Prior Art and References

                None.

                -

                Unresolved Questions

                +

                Unresolved Questions

                None.

                - +

                None.

                (source)

                Table of Contents

                @@ -6132,10 +6373,10 @@ Such conversion attempts will explicitly fail.

                AuthorsAdrian Catangiu -

                Summary

                +

                Summary

                The Transact XCM instruction currently forces the user to set a specific maximum weight allowed to the inner call and then also pay for that much weight regardless of how much the call actually needs in practice.

                This RFC proposes improving the usability of Transact by removing that parameter and instead get and charge the actual weight of the inner call from its dispatch info on the remote chain.

                -

                Motivation

                +

                Motivation

                The UX of using Transact is poor because of having to guess/estimate the require_weight_at_most weight used by the inner call on the target.

                We've seen multiple Transact on-chain failures caused by guessing wrong values for this require_weight_at_most even though the rest of the XCM program would have worked.

                In practice, this parameter only adds UX overhead with no real practical value. Use cases fall in one of two categories:

                @@ -6148,14 +6389,14 @@ weight limit parameter.

                We've had multiple OpenGov root/whitelisted_caller proposals initiated by core-devs completely or partially fail because of incorrect configuration of require_weight_at_most parameter. This is a strong indication that the instruction is hard to use.

                -

                Stakeholders

                +

                Stakeholders

                • Runtime Users,
                • Runtime Devs,
                • Wallets,
                • dApps,
                -

                Explanation

                +

                Explanation

                The proposed enhancement is simple: remove require_weight_at_most parameter from the instruction:

                - Transact { origin_kind: OriginKind, require_weight_at_most: Weight, call: DoubleEncoded<Call> },
                 + Transact { origin_kind: OriginKind, call: DoubleEncoded<Call> },
                @@ -6170,18 +6411,18 @@ instruction is hard to use.

                The security considerations around how much can someone execute for free are the same for both this new version and the old. In both cases, an "attacker" can do the XCM decoding (including Transact inner calls) for free by adding a large enough BuyExecution without actually having the funds available.

                In both cases, decoding is done for free, but in both cases execution fails early on BuyExecution.

                -

                Performance, Ergonomics, and Compatibility

                +

                Performance, Ergonomics, and Compatibility

                Performance

                No performance change.

                Ergonomics

                Ergonomics are slightly improved by simplifying Transact API.

                Compatibility

                Compatible with previous XCM programs.

                -

                Prior Art and References

                +

                Prior Art and References

                None.

                -

                Unresolved Questions

                +

                Unresolved Questions

                None.

                - +

                None.

                (source)

                Table of Contents

                @@ -6225,13 +6466,13 @@ both this new version and the old. In both cases, an "attacker" can do AuthorsAndrei Sandu -

                Summary

                +

                Summary

                Elastic scaling is not resilient against griefing attacks without a way for a PoV (Proof of Validity) to commit to the particular core index it was intended for. This RFC proposes a way to include core index information in the candidate commitments and the CandidateDescriptor data structure in a backward compatible way. Additionally, it proposes the addition of a SessionIndex field in the CandidateDescriptor to make dispute resolution more secure and robust.

                -

                Motivation

                +

                Motivation

                This RFC proposes a way to solve two different problems:

                1. For Elastic Scaling, it prevents anyone who has acquired a valid collation to DoS the parachain @@ -6246,14 +6487,14 @@ dispute. The dispute may concern a relay chain block not yet imported by a validator. In this case, validators can safely assume the session index refers to the session the candidate has appeared in, otherwise, the chain would have rejected the candidate.
                -

                Stakeholders

                +

                Stakeholders

                • Polkadot core developers.
                • Cumulus node developers.
                • Tooling, block explorer developers.

                This approach and alternatives have been considered and discussed in this issue.

                -

                Explanation

                +

                Explanation

                The approach proposed below was chosen primarily because it minimizes the number of breaking changes, the complexity and takes less implementation and testing time. The proposal is to change the existing primitives while keeping binary compatibility with the older versions. We repurpose @@ -6445,12 +6686,12 @@ present in the receipt.

                Any tooling that decodes UMP XCM messages needs an update to support or ignore the new UMP messages, but they should be fine to decode the regular XCM messages that come before the separator.

                -

                Prior Art and References

                +

                Prior Art and References

                Forum discussion about a new CandidateReceipt format: https://forum.polkadot.network/t/pre-rfc-discussion-candidate-receipt-format-v2/3738

                -

                Unresolved Questions

                +

                Unresolved Questions

                N/A

                - +

                The implementation is extensible and future-proof to some extent. With minimal or no breaking changes, additional fields can be added in the candidate descriptor until the reserved space is exhausted

                @@ -6492,7 +6733,7 @@ by using the version field of the descriptor introduced in this RFC AuthorsFrancisco Aguirre -

                Summary

                +

                Summary

                XCM already handles execution fees in an effective and efficient manner using the BuyExecution instruction. However, other types of fees are not handled as effectively -- for example, delivery fees. Fees exist that can't be measured using Weight -- as execution fees can -- so a new method should be thought up for those cases. @@ -6501,7 +6742,7 @@ This RFC proposes making the fee handling system simpler and more general, by do

              • Adding a fees register
              • Deprecating BuyExecution and adding a new instruction PayFees with new semantics to ultimately replace it.
              -

              Motivation

              +

              Motivation

              Execution fees are handled correctly by XCM right now. However, the addition of extra fees, like for message delivery, result in awkward ways of integrating them into the XCVM implementation. This is because these types of fees are not included in the language. @@ -6509,14 +6750,14 @@ The standard should have a way to correctly deal with these implementation speci The new instruction moves the specified amount of fees from the holding register to a dedicated fees register that the XCVM can use in flexible ways depending on its implementation. The XCVM implementation is free to use these fees to pay for execution fees, transport fees, or any other type of fee that might be necessary. This moves the specifics of fees further away from the XCM standard, and more into the actual underlying XCVM implementation, which is a good thing.

              -

              Stakeholders

              +

              Stakeholders

              • Runtime Users
              • Runtime Devs
              • Wallets
              • dApps
              -

              Explanation

              +

              Explanation

              The new instruction that will replace BuyExecution is a much simpler and general version: PayFees. This instruction takes one Asset, takes it from the holding register, and puts it into a new fees register. The XCVM implementation can now use this Asset to make sure every necessary fee is paid for, this includes execution fees, delivery fees, and any other type of fee @@ -6551,7 +6792,7 @@ PayFees { asset }

              There needs to be an explicit change from BuyExecution to PayFees, most often accompanied by a reduction in the assets passed in.

              Testing, Security, and Privacy

              It might become a security concern if leftover fees are trapped, since a lot of them are expected.

              -

              Performance, Ergonomics, and Compatibility

              +

              Performance, Ergonomics, and Compatibility

              Performance

              There should be no performance downsides to this approach. The fees register is a simplification that may actually result in better performance, in the case an implementation is doing a workaround to achieve what this RFC proposes.

              @@ -6563,11 +6804,11 @@ That asset will allow users to limit the amount of fees they are willing to pay.

              This RFC can't just change the semantics of the BuyExecution instruction since that instruction accepts any funds, uses what it needs and returns the rest immediately. The new proposed instruction, PayFees, doesn't return the leftover immediately, it keeps it in the fees register. In practice, the deprecated BuyExecution needs to be slowly rolled out in favour of PayFees.

              -

              Prior Art and References

              +

              Prior Art and References

              The closed RFC PR on the xcm-format repository, before XCM RFCs got moved to fellowship RFCs: https://github.com/polkadot-fellows/xcm-format/pull/53.

              -

              Unresolved Questions

              +

              Unresolved Questions

              None

              - +

              This proposal would greatly benefit from an improved asset trapping system.

              CustomAssetClaimer is also related, as it directly improves the ergonomics of this proposal.

              LeftoverAssetsDestination execution hint would also similarly improve the ergonomics.

              @@ -6603,12 +6844,12 @@ In practice, the deprecated BuyExecution needs to be slowly rolled AuthorsFrancisco Aguirre -

              Summary

              +

              Summary

              A previous XCM RFC (https://github.com/polkadot-fellows/xcm-format/pull/37) introduced a SetAssetClaimer instruction. This idea of instructing the XCVM to change some implementation-specific behavior is useful. In order to generalize this mechanism, this RFC introduces a new instruction SetHints and makes the SetAssetClaimer be just one of many possible execution hints.

              -

              Motivation

              +

              Motivation

              There is a need for specifying how certain implementation-specific things should behave. Things like who can claim the assets or what can be done instead of trapping assets. Another idea for a hint:

              @@ -6616,13 +6857,13 @@ Another idea for a hint:

            • AssetForFees: to signify to the executor what asset the user prefers to use for fees.
            • LeftoverAssetsDestination: for depositing leftover assets to a destination instead of trapping them
            -

            Stakeholders

            +

            Stakeholders

            • Runtime devs
            • Wallets
            • dApps
            -

            Explanation

            +

            Explanation

            A new instruction, SetHints, will be added. This instruction will take a single parameter of type Hint, an enumeration. The first variant for this enum is AssetClaimer, which allows to specify a location that should be able to claim trapped assets. @@ -6649,7 +6890,7 @@ type NumVariants = /* Number of variants of the `Hint` enum */;

            Hints are specified on a per-message basis, so they have to be specified at the beginning of a message. If they were to be specified at the end, hints like AssetClaimer would be useless if an error occurs beforehand and assets get trapped before ever reaching the hint.

            The instruction takes a bounded vector of hints so as to not force barriers to allow an arbitrary number of SetHint instructions.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            None.

            Ergonomics

            @@ -6659,11 +6900,11 @@ Also, this instruction would make it simpler to write XCM programs. You only need to specify the hints you want in one single instruction at the top of your program.

            Compatibility

            None.

            -

            Prior Art and References

            +

            Prior Art and References

            The previous RFC PR in the xcm-format repository before XCM RFCs moved to fellowship RFCs: https://github.com/polkadot-fellows/xcm-format/pull/59.

            -

            Unresolved Questions

            +

            Unresolved Questions

            None.

            - +

            None.

            (source)

            Table of Contents

            @@ -6696,36 +6937,36 @@ You only need to specify the hints you want in one single instruction at the top Authors -

            Summary

            +

            Summary

            This RFC aims to remove the NetworkIds of Westend and Rococo, arguing that testnets shouldn't go in the language.

            -

            Motivation

            +

            Motivation

            We've already seen the plans to phase out Rococo and Paseo has appeared. Instead of constantly changing the testnets included in the language, we should favor specifying them via their genesis hash, using NetworkId::ByGenesis.

            -

            Stakeholders

            +

            Stakeholders

            • Runtime devs
            • Wallets
            • dApps
            -

            Explanation

            +

            Explanation

            Remove Westend and Rococo from the included NetworkIds in the language.

            Drawbacks

            This RFC will make it less convenient to specify a testnet, but not by a large amount.

            Testing, Security, and Privacy

            None.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            None.

            Ergonomics

            It will very slightly reduce the ergonomics of testnet developers but improve the stability of the language.

            Compatibility

            NetworkId::Rococo and NetworkId::Westend can just use NetworkId::ByGenesis, as can other testnets.

            -

            Prior Art and References

            +

            Prior Art and References

            A previous attempt to add NetworkId::Paseo: https://github.com/polkadot-fellows/xcm-format/pull/58.

            -

            Unresolved Questions

            +

            Unresolved Questions

            None.

            - +

            None.

            (source)

            Table of Contents

            @@ -6764,11 +7005,11 @@ using NetworkId::ByGenesis.

            AuthorsAdrian Catangiu -

            Summary

            +

            Summary

            XCM programs generated by the InitiateAssetTransfer instruction shall have the option to carry over the original origin all the way to the final destination. They shall do so by internally making use of AliasOrigin or ClearOrigin depending on given parameters.

            This allows asset transfers to retain their original origin even across multiple hops.

            Ecosystem chains would have to change their trusted aliasing rules to effectively make use of this feature.

            -

            Motivation

            +

            Motivation

            Currently, all XCM asset transfer instructions ultimately clear the origin in the remote XCM message by use of the ClearOrigin instruction. This is done for security considerations to ensure that subsequent (user-controlled) instructions cannot command the authority of the sending chain.

            The problem with this approach is that it limits what can be achieved on remote chains through XCM. Most XCM operations require having an origin, and following any asset transfer the origin is lost, meaning not much can be done other than depositing the transferred assets to some local account or transferring them onward to another chain.

            For example, we cannot transfer some funds for buying execution, then do a Transact (all in the same XCM message).

            @@ -6776,9 +7017,9 @@ using NetworkId::ByGenesis.

            Transact XCM programs today require a two step process:

            Transact Today

            And we want to be able to do it using a single XCM program.

            -

            Stakeholders

            +

            Stakeholders

            Runtime Users, Runtime Devs, wallets, cross-chain dApps.

            -

            Explanation

            +

            Explanation

            In the case of XCM programs going from source-chain directly to dest-chain without an intermediary hop, we can enable scenarios such as above by using the AliasOrigin instruction instead of the ClearOrigin instruction.

            Instead of clearing the source-chain origin, the destination chain shall attempt to alias source-chain to "original origin" on the source chain. Most common such origin aliasing would be X1(Parachain(source-chain)) -> X2(Parachain(source-chain), AccountId32(origin-account)) for the case of a single hop transfer where the initiator is a (signed/pure/proxy) account origin-account on source-chain. @@ -6837,7 +7078,7 @@ involved chains.

            Normally, XCM program builders should audit their programs and eliminate assumptions of "no origin" on remote side of this instruction. In this case, the InitiateAssetsTransfer has not been released yet, it will be part of XCMv5, and we can make this change part of the same XCMv5 so that there isn't even the possibility of someone in the wild having built XCM programs using this instruction on those wrong assumptions.

            The working assumption going forward is that the origin on the remote side can either be cleared or it can be the local origin's reanchored location. This assumption is in line with the current behavior of remote XCM programs sent over using pallet_xcm::send.

            The existing DepositReserveAsset, InitiateReserveWithdraw and InitiateTeleport cross chain asset transfer instructions will not attempt to do origin aliasing and will always clear origin same as before for compatibility reasons.

            -

            Performance, Ergonomics, and Compatibility

            +

            Performance, Ergonomics, and Compatibility

            Performance

            No impact.

            Ergonomics

            @@ -6849,14 +7090,14 @@ involved chains.

            For compatibility reasons, this RFC proposes this mechanism be added as an enhancement to the yet unreleased InitiateAssetsTransfer instruction, thus eliminating possibilities of XCM logic breakages in the wild. Following the same logic, the existing DepositReserveAsset, InitiateReserveWithdraw and InitiateTeleport cross chain asset transfer instructions will not attempt to do origin aliasing and will always clear the origin same as before for compatibility reasons.

            Any one of DepositReserveAsset, InitiateReserveWithdraw and InitiateTeleport instructions can be replaced with a InitiateAssetsTransfer instruction with or without origin aliasing, thus providing a clean and clear upgrade path for opting-in this new feature.

            -

            Prior Art and References

            +

            Prior Art and References

            -

            Unresolved Questions

            +

            Unresolved Questions

            None

            - +

            (source)

            Table of Contents

              @@ -6887,18 +7128,18 @@ Following the same logic, the existing DepositReserveAsset, I AuthorsBastian Köcher -

              Summary

              +

              Summary

              The code of a runtime is stored in its own state, and when performing a runtime upgrade, this code is replaced. The new runtime can contain runtime migrations that adapt the state to the state layout as defined by the runtime code. This runtime migration is executed when building the first block with the new runtime code. Anything that interacts with the runtime state uses the state layout as defined by the runtime code. So, when trying to load something from the state in the block that applied the runtime upgrade, it will use the new state layout but will decode the data from the non-migrated state. In the worst case, the data is incorrectly decoded, which may lead to crashes or halting of the chain.

              This RFC proposes to store the new runtime code under a different storage key when applying a runtime upgrade. This way, all the off-chain logic can still load the old runtime code under the default storage key and decode the state correctly. The block producer is then required to use this new runtime code to build the next block. While building the next block, the runtime is executing the migrations and moves the new runtime code to the default runtime code location. So, the runtime code found under the default location is always the correct one to decode the state from which the runtime code was loaded.

              -

              Motivation

              +

              Motivation

              While the issue of having undecodable state only exists for the one block in which the runtime upgrade was applied, it still impacts anything that reads state data, like block explorers, UIs, nodes, etc. For block explorers, the issue mainly results in indexing invalid data and UIs may show invalid data to the user. For nodes, reading incorrect data may lead to a performance degradation of the network. There are also ways to prevent certain decoding issues from happening, but it requires that developers are aware of this issue and also requires introducing extra code, which could introduce further bugs down the line.

              So, this RFC tries to solve these issues by fixing the underlying problem of having temporary undecodable state.

              -

              Stakeholders

              +

              Stakeholders

              • Relay chain/Parachain node developers
              • Relay chain/Parachain node operators
              -

              Explanation

              +

              Explanation

              The runtime code is stored under the special key :code in the state. Nodes and other tooling read the runtime code under this storage key when they want to interact with the runtime for e.g., building/importing blocks or getting the metadata to read the state. To update the runtime code the runtime overwrites the value at :code, and then from the next block on, the new runtime will be loaded. This RFC proposes to first store the new runtime code under :pending_code in the state for one block. When the next block is being built, the block builder first needs to check if :pending_code is set, and if so, it needs to load the runtime from this storage key. While building the block the runtime will move :pending_code to :code to have the runtime code at the default location. Nodes importing the block will also need to load :pending_code if it exists to ensure that the correct runtime code is used. By doing it this way, the runtime code found at :code in the state of a block will always be able to decode the state. Furthermore, this RFC proposes to introduce system_version: 3. The system_version was introduced in RFC42. Version 3 would then enable the usage of :pending_code when applying a runtime code upgrade. This way, the feature can be introduced first and enabled later when the majority of the nodes have upgraded.

              @@ -6907,7 +7148,7 @@ Furthermore, this RFC proposes to introduce system_version: 3. The There is still the possibility of having state that is not migrated even when following the proposal as presented by this RFC. The issue is that if the amount of data to be migrated is too big, not all of it can be migrated in one block, because either it takes more time than there is assigned for a block or parachains for example have a fixed budget for their proof of validity. To solve this issue there already exist multi-block migrations that can chunk the migration across multiple blocks. Consensus-critical data needs to be migrated in the first block to ensure that block production etc., can continue. For the other data being migrated by multi-block migrations the migrations could for example expose to the outside which keys are being migrated and should not be indexed until the migration is finished.

              Testing, Security, and Privacy

              Testing should be straightforward and most of the existing testing should already be good enough. Extending with some checks that :pending_code is moved to :code.

              -

              Performance, Ergonomics, and Compatibility

              +

              Performance, Ergonomics, and Compatibility

              Performance

              The performance should not be impacted besides requiring loading the runtime code in the first block being built with the new runtime code.

              Ergonomics

              @@ -6915,11 +7156,11 @@ There is still the possibility of having state that is not migrated even when fo

              Compatibility

              The change will require that the nodes are upgraded before the runtime starts using this feature. Otherwise they will fail to import the block build by :pending_code. For Polkadot/Kusama this means that also the parachain nodes need to be running with a relay chain node version that supports this new feature. Otherwise the parachains will stop producing/finalizing nodes as they can not sync the relay chain any more.

              -

              Prior Art and References

              +

              Prior Art and References

              The issue initially reported a bug that led to this RFC. It also discusses multiple solutions for the problem.

              -

              Unresolved Questions

              +

              Unresolved Questions

              None

              - +
              • Solve the issue of requiring loading the entire runtime code to move it into a different location by introducing a low-level move function. When using the V1 trie layout every value bigger than 32 bytes is put into the db separately. This means a low level move function would only need to move the hash of the runtime code from :code to :pending_code.
              @@ -6958,9 +7199,9 @@ For Polkadot/Kusama this means that also the parachain nodes need to be running AuthorsDaniel Shiposha -

              Summary

              +

              Summary

              This RFC proposes a metadata format for XCM-identifiable assets (i.e., for fungible/non-fungible collections and non-fungible tokens) and a set of instructions to communicate it across chains.

              -

              Motivation

              +

              Motivation

              Currently, there is no way to communicate metadata of an asset (or an asset instance) via XCM.

              The ability to query and modify the metadata is useful for two kinds of entities:

                @@ -6980,9 +7221,9 @@ For Polkadot/Kusama this means that also the parachain nodes need to be running

              Besides metadata modification, the ability to read it is also valuable. On-chain logic can interpret the NFT metadata, i.e., the metadata could have not only the media meaning but also a utility function within a consensus system. Currently, such a way of using NFT metadata is possible only within one consensus system. This RFC proposes making it possible between different systems via XCM so different chains can fetch and analyze the asset metadata from other chains.

              -

              Stakeholders

              +

              Stakeholders

              Runtime users, Runtime devs, Cross-chain dApps, Wallets.

              -

              Explanation

              +

              Explanation

              The Asset Metadata is information bound to an asset class (fungible or NFT collection) or an asset instance (an NFT). The Asset Metadata could be represented differently on different chains (or in other consensus entities). However, to communicate metadata between consensus entities via XCM, we need a general format so that any consensus entity can make sense of such information.

              @@ -7134,16 +7375,16 @@ This RFC proposes to use the Undefined variant of a collection iden

              In terms of performance and privacy, there will be no changes.

              Testing, Security, and Privacy

              The implementations must honor the contract for the new instructions. Namely, if the instance field has the value of AssetInstance::Undefined, the metadata must relate to the asset collection but not to a non-fungible token inside it.

              -

              Performance, Ergonomics, and Compatibility

              +

              Performance, Ergonomics, and Compatibility

              Performance

              No significant impact.

              Ergonomics

              Introducing a standard metadata format and a way of communicating it is a valuable addition to the XCM format that potentially increases cross-chain interoperability without the need to form ad-hoc chain-to-chain integrations via Transact.

              Compatibility

              This RFC proposes new functionality, so there are no compatibility issues.

              -

              Prior Art and References

              +

              Prior Art and References

              RFC: XCM Asset Metadata

              - +

              The original RFC draft contained additional metadata instructions. Though they could be useful, they're clearly outside the basic logic. So, this RFC version omits them to make the metadata discussion more focused on the core things. Nonetheless, there is hope that metadata approval instructions might be useful in the future, so they are mentioned here.

              You can read about the details in the original draft.

              (source)

              @@ -7188,9 +7429,9 @@ This RFC proposes to use the Undefined variant of a collection iden AuthorsBryan Chen, Jiyuan Zheng -

              Summary

              +

              Summary

              This proposal introduces PVQ (PolkaVM Query), a unified query interface that bridges different chain runtime implementations and client tools/UIs. PVQ provides an extension-based system where runtime developers can expose chain-specific functionality through standardized interfaces, while allowing client-side developers to perform custom computations on the data through PolkaVM programs. By abstracting away concrete implementations across chains and supporting both off-chain and cross-chain scenarios, PVQ aims to reduce code duplication and development complexity while maintaining flexibility for custom use cases.

              -

              Motivation

              +

              Motivation

              In Substrate, runtime APIs facilitate off-chain clients in reading the state of the consensus system. However, the APIs defined and implemented by individual chains often fall short of meeting the diverse requirements of client-side developers. For example, client-side developers may want some aggregated data from multiple pallets, or apply various custom transformations on the raw data. @@ -7225,12 +7466,12 @@ As a result, client-side developers frequently resort to directly accessing stor

          -

          Stakeholders

          +

          Stakeholders

          • Runtime Developers
          • Tools/UI Developers
          -

          Explanation

          +

          Explanation

          The core idea of PVQ is to have a unified interface that meets the aforementioned requirements.

          On the runtime side, an extension-based system is introduced to serve as a standardization layer across different chains. Each extension specification defines a set of cohesive APIs. @@ -7539,7 +7780,7 @@ enum PvqError { N/A

        -

        Performance, Ergonomics, and Compatibility

        +

        Performance, Ergonomics, and Compatibility

        Performance

        As a newly introduced feature, PVQ operates independently and does not impact or degrade the performance of existing runtime implementations.

        Ergonomics

        @@ -7548,18 +7789,18 @@ This significantly benefits wallet and dApp developers by eliminating the need t

        Compatibility

        For RuntimeAPI integration, the proposal defines new APIs, which do not break compatibility with existing interfaces. For XCM Integration, the proposal does not modify the existing XCM message format, which is backwards compatible.

        -

        Prior Art and References

        +

        Prior Art and References

        There are several discussions related to the proposal, including:

        • Original discussion about having a mechanism to avoid code duplications between the runtime and front-ends/wallets. In the original design, the custom computations are compiled as a wasm function.
        • View functions aims to provide view-only functions at the pallet level. Additionally, Facade Project aims to gather and return commonly wanted information in runtime level. PVQ does not conflict with them, and it can take advantage of these Pallet View Functions / Runtime APIs and allow people to build arbitrary PVQ programs to obtain more custom/complex data that is not otherwise expressed by these two proposals.
        -

        Unresolved Questions

        +

        Unresolved Questions

        • The specific conversion between gas and weight has not been finalized and will likely require development of a suitable benchmarking methodology.
        - +

        Once PVQ and the aforementioned Facade Project are ready, there are opportunities to consolidate overlapping functionality between the two systems. For example, the metadata APIs could potentially be unified to provide a more cohesive interface for runtime information. This would help reduce duplication and improve maintainability while preserving the distinct benefits of each approach.

        (source)

        Table of Contents

        @@ -7597,14 +7838,14 @@ PVQ does not conflict with them, and it can take advantage of these Pallet View Authorss0me0ne-unkn0wn (13WGadgNgqSjiGQvfhimw9pX26mvGdYQ6XgrjPANSEDRoGMt) -

        Summary

        +

        Summary

        This RFC proposes a change that makes it possible to identify types of compressed blobs stored on-chain, as well as used off-chain, without the need for decompression.

        -

        Motivation

        +

        Motivation

        Currently, a compressed blob does not give any idea of what's inside because the only thing that can be inside, according to the spec, is Wasm. In reality, other blob types are already being used, and more are to come. Apart from being error-prone by itself, the current approach does not allow to properly route the blob through the execution paths before its decompression, which will result in suboptimal implementations when more blob types are used. Thus, it is necessary to introduce a mechanism allowing to identify the blob type without decompressing it.

        This proposal is intended to support future work enabling Polkadot to execute PolkaVM and, more generally, other-than-Wasm parachain runtimes, and allow developers to introduce arbitrary compression methods seamlessly in the future.

        -

        Stakeholders

        +

        Stakeholders

        Node developers are the main stakeholders for this proposal. It also creates a foundation on which parachain runtime developers will build.

        -

        Explanation

        +

        Explanation

        Overview

        The current approach to compressing binary blobs involves using zstd compression, and the resulting compressed blob is prefixed with a unique 64-bit magic value specified in that subsection. The same procedure is used to compress both Wasm code blobs and proofs-of-validity. Currently, having solely a compressed blob, it's impossible to tell what's inside it without decompression, a Wasm blob, or a PoV. That doesn't cause problems in the current protocol, as Wasm blobs and PoV blobs take completely different execution paths in the code.

        The changes proposed below are intended to define the means for distinguishing compressed blob types in a backward-compatible and future-proof way.

        @@ -7633,18 +7874,18 @@ PVQ does not conflict with them, and it can take advantage of these Pallet View

      Testing, Security, and Privacy

      As the change increases granularity, it will positively affect both testing possibilities and security, allowing developers to check what's inside a given compressed blob precisely. Testing the change itself is trivial. Privacy is not affected by this change.

      -

      Performance, Ergonomics, and Compatibility

      +

      Performance, Ergonomics, and Compatibility

      Performance

      The current implementation's performance is not affected by this change. Future implementations allowing for the execution of other-than-Wasm parachain runtimes will benefit from this change performance-wise.

      Ergonomics

      The end-user ergonomics is not affected. The ergonomics for developers will benefit from this change as it enables exact checks and less guessing.

      Compatibility

      The change is designed to be backward-compatible.

      -

      Prior Art and References

      +

      Prior Art and References

      SDK PR#6704 (WIP) introduces a mechanism similar to that described in this proposal and proves the necessity of such a change.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None

      - +

      This proposal creates a foundation for two future work directions:

      • Proposing to introduce other-than-Wasm code executors, including PolkaVM, allowing parachain runtime authors to seamlessly change execution platform using the existing mechanism of runtime upgrades;
      • @@ -7684,9 +7925,9 @@ PVQ does not conflict with them, and it can take advantage of these Pallet View Authorsordian -

        Summary

        +

        Summary

        This RFC proposes changes to the erasure coding algorithm and the method for computing the erasure root on Polkadot to improve performance of both processes.

        -

        Motivation

        +

        Motivation

        The Data Availability (DA) Layer in Polkadot provides a foundation for shared security, enabling Approval Checkers and Collators to download Proofs-of-Validity (PoV) for security and liveness purposes respectively. @@ -7703,12 +7944,12 @@ The proposed change is orthogonal to RFC-47 and can be used in conjunction with collator nodes), we propose bundling another performance-enhancing breaking change that addresses the CPU bottleneck in the erasure coding process, but using a separate node feature (NodeFeatures part of HostConfiguration) for its activation.

        -

        Stakeholders

        +

        Stakeholders

        • Infrastructure providers (operators of validator/collator nodes) will need to upgrade their client version in a timely manner
        -

        Explanation

        +

        Explanation

        We propose two specific changes:

        1. @@ -7755,11 +7996,11 @@ faster deployment for most parachains but would add complexity.

      Compatibility

      This requires a breaking change that can be coordinated following the same approach as in RFC-47.

      -

      Prior Art and References

      +

      Prior Art and References

      JAM already utilizes the same optimizations described in the Graypaper.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None.

      - +

      Future improvements could include:

      • Using ZK proofs to eliminate the need for re-encoding data to verify correct encoding
      • @@ -7789,7 +8030,7 @@ faster deployment for most parachains but would add complexity.

        AuthorsJonas Gehrlein -

        Summary

        +

        Summary

        This RFC proposes burning 80% of transaction fees accrued on Polkadot’s Relay Chain and, more significantly, on all its system parachains. The remaining 20% would continue to incentivize Validators (on the Relay Chain) and Collators (on system parachains) for including transactions. The 80:20 split is motivated by preserving the incentives for Validators, which are crucial for the security of the network, while establishing a consistent fee policy across the Relay Chain and all system parachains.

        • @@ -7800,7 +8041,7 @@ faster deployment for most parachains but would add complexity.

        This proposal extends the system's deflationary direction and is enabling direct value capture for DOT holders of an overall increased activity on the network.

        -

        Motivation

        +

        Motivation

        Historically, transaction fees on both the Relay Chain and the system parachains (with a few exceptions) have been relatively low. This is by design—Polkadot is built to scale and offer low-cost transactions. While this principle remains unchanged, growing network activity could still result in a meaningful accumulation of fees over time.

        Implementing this RFC ensures that potentially increasing activity manifesting in more fees is captured for all token holders. It further aligns the way that the network is handling fees (such as from transactions or for coretime usage) is handled. The arguments in support of this are close to those outlined in RFC0010. Specifically, burning transaction fees has the following benefits:

        Compensation for Coretime Usage

        @@ -7808,7 +8049,7 @@ faster deployment for most parachains but would add complexity.

        Value Accrual and Deflationary Pressure

        By burning the transaction fees, the system effectively reduces the token supply and thereby increase the scarcity of the native token. This deflationary pressure can increase the token's long-term value and ensures that the value captured is translated equally to all existing token holders.

        This proposal requires only minimal code changes, making it inexpensive to implement, yet it introduces a consistent policy for handling transaction fees across the network. Crucially, it positions Polkadot for a future where fee burning could serve as a counterweight to an otherwise inflationary token model, ensuring that value generated by network usage is returned to all DOT holders.

        -

        Stakeholders

        +

        Stakeholders

        • All DOT Token Holders: Benefit from reduced supply and direct value capture as network usage increases.

          @@ -7846,12 +8087,12 @@ faster deployment for most parachains but would add complexity.

          Authorseskimor -

          Summary

          +

          Summary

          This RFC proposes an amendment to RFC-1 Agile Coretime: Renewal prices will no longer only be adjusted based on a configurable renewal bump, but also to the lower end of the current sale - if that turns out higher.

          An implementation can be found here.

          -

          Motivation

          +

          Motivation

          In RFC-1, we strived for perfect predictability on renewal prices, but what we expected unfortunately got proven in practice: Perfect predictability allows for core hoarding and cheap market manipulation, with the effect that both on @@ -7863,9 +8104,9 @@ extend to elastic scaling and in practice, even existing teams wanting to keep their core, because they forgot to renew in the interlude.

          In a nutshell the current situation is severely hindering teams from deploying on Polkadot: We are essentially in a Denial of Service situation.

          -

          Stakeholders

          +

          Stakeholders

          Stakeholders should be existing teams already having a core and new teams wanting to join the ecosystem.

          -

          Explanation

          +

          Explanation

          This RFC proposes to fix this situation, by limiting renewal price predictability to reasonable levels, by introducing a weak coupling to the current market price: We ensure that the price for renewals is at least as high @@ -7962,15 +8203,15 @@ tenants. Having them exposed at least with this 10x reduction seems a sensible valuation.

        • There are no privacy concerns.

          -

          Performance, Ergonomics, and Compatibility

          +

          Performance, Ergonomics, and Compatibility

          The proposed changes are backwards compatible. No interfaces are changed. Performance is not affected. Ergonomics should be greatly improved especially for new entrants, as cores will be available for sale again. A configured minimum price also ensures that the starting price of the Dutch auction stays reasonably high, deterring sniping all the cores at the beginning of a sale.

          -

          Prior Art and References

          +

          Prior Art and References

          This RFC is altering RFC-1 and taking ideas from RFC-17, mainly the introduction of a minimum price.

          - +

          This RFC should solve the immediate problems we are seeing in production right now. Longer term, improvements to the market in terms of price discovery (RFC-17) should be considered, especially once demand grows.

          @@ -8028,15 +8269,15 @@ a few cores not for sale should be enough to mitigate such a situation.

          AuthorsJeff Burdges, Alistair Stewart -

          Summary

          +

          Summary

          Availability (bitfield) votes gain a preferred_fork flag which expresses the validator's opinion upon relay chain equivocations and babe forks, while still sharing availability votes for all relay chain blocks. We make relay chain block production require a supermajority with preferred_fork set, so forks cannot advance if they split the honest validators, which creates an early soft concensus. We similarly defend ELVES from relay chain equivocation attacks and prevent redundent approvals across babe forks.

          -

          Motivation

          +

          Motivation

          We've always known relay chain equivocations break the ELVES threat model. We originally envisioned ELVES having fallback pathways, but doing fallbacks requires dangerous subtle debugging. We support more assignment schemes in ELVES this way too, including one novel post-quantum one, and very low CPU usage schemes.

          We expect this early soft concensus creates back pressure that improves performance under babe forks.

          Alistair: TODO?

          -

          Stakeholders

          +

          Stakeholders

          We modify the availability votes and restrict relay chain blocks, fork choice, and ELVES start conditions, so mostly the parachain. See alternatives notes on the flag under sassafras chains like JAM.

          -

          Explanation

          +

          Explanation

          Availability voting

          At present, availability votes have a bitfield representing the cores, a relay_parent, and a signature. We process these on-chain in several steps: We first validate the signatures, zero any bits for cores included/enacted between the relay_parent and our predecessor, sum the set bits for each core, and finally include/enact the core if this exceeds 2/3rds of the validators.

          Availability votes gain a preferred_fork flag, which honest validators set for exactly one relay_parent on their availability votes in a block production slot. We say a validator prefers a fork given by chain head h if it provides an availability vote with relay_parent = h and preferred_fork set.

          @@ -8057,19 +8298,19 @@ a few cores not for sale should be enough to mitigate such a situation.

          We somtimes obtain an even more preferable header during import, chunk distribution, and delays for our first tentatively preferred fork. Also, the first could simply turn out invalid. In either case, we loop to repeat this second step on our new tentative preferred fork. We repeat this process until an import succeeds and its timers run out, without receiving any more preferable header. Actual equivocations cannot be preferable over one another, so all this loops terminates reasonably quickly.

          Next, we broadcast our availability vote with its relay_parent set to our tentatively preferred fork, and with its preferred_fork set.

          Finally, if 2 f + 1 other validators have a different preference from us, then we download and import their preferred relay chain block, fetch chunks for it, and provide availability votes with preferred_fork zero. It's possible this occurs earlier than our preference finishes, in which case we probably still send out our preference, if only for forensic evidence.

          -

          Concerns: Drawbacks, Testing, Security, and Privacy

          +

          Concerns: Drawbacks, Testing, Security, and Privacy

          Adds subtle timing constraints, which could entrench existing performanceg obstacles. We might explore variations that ignore wall clock time.

          We've always known relay chain equivocations break the ELVES threat model. We originally envisioned ELVES having fallback pathways, but these were complex and demanded unused code paths, which cannot realistically be debugged. Although complex, the early soft concensus scheme feels less complex overall. We know timing sucks to optimise a distributed system, but at least doing so use everyday code paths.

          -

          Performance, Ergonomics, and Compatibility

          +

          Performance, Ergonomics, and Compatibility

          We expect early soft concensus introduce back pressure that radically alters performance. We no longer run approvals checks upon all forks. As primary slots occur once every other slot in expectation, one might expect a 25% reduction in CPU load, but this depends upon diverse factors.

          We apply back pressure by dropping some whole relay chain blocks though, so this shall increase the expected parachain blocktime somewhat, but how much depens upon future optimisation work.

          Compatibility

          Major upgrade

          -

          Prior Art and References

          +

          Prior Art and References

          ...

          -

          Unresolved Questions

          +

          Unresolved Questions

          We halt the chain when less than 2/3 of validators are online. We consider this reasonable since governance now runs on a parachain, ELVES would not secure, and nothing can be finalized anyways. We could perhaps add some "recovery mode" where the relay chain embeds entire system parachain blocks, but doing so might not warrant the effort required.

          - +

          Sassafras

          Arguably, a sassafras RC like JAM could avoid preferred_fork flag, by only releasing availability votes for at most one sassafras equivocation. We wanted availability for babe forks, but sassafras has only equivocations, so those block can simply be dropped.

          In principle, a sassafras equivocation could still enter the valid chain, assuming 2/3rd of validators provide availability votes for the same equivocations. If JAM lacks the preferred_fork flag then enactment proceeds slower in this case, but this should almost never occur.

          @@ -8082,246 +8323,6 @@ a few cores not for sale should be enough to mitigate such a situation.

          We know parachains could baset heir slots upon relay chain slots, instaed of wall clock time (RFC ToDo). After this happens, we could avoid or minimize wall clock timing in the relay chain too, so that relay chain slots could've a floating duration based upon workload.

          Partial relay chain blocks

          Above, we only discuss abandoning realy chain blocks which fail early soft concensus. We could alternatively treat them as partial blocks and build extension partial blocks that complete them, with elves probably using randomness from the final partial block.

          -

          (source)

          -

          Table of Contents

          - -

          RFC-0000: Validator Rewards

          -
          - - - -
          Start DateDate of initial proposal
          DescriptionRewards protocol for Polkadot validators
          AuthorsJeff Burdges, ...
          -
          -

          Summary

          -

          An off-chain approximation protocol should assign rewards based upon the approvals and availability work done by validators.

          -

          All validators track which approval votes they actually use, reporting the aggregate, after which an on-chain median computation gives a good approximation under byzantine assumptions. Approval checkers report aggregate information about which availability chunks they use too, but in availability we need a tit-for-tat game to enforce honesty, because approval committees could often bias results thanks to their small size.

          -

          Motivation

          -

          We want all or most polkadot subsystems be profitable for validataors, because otherwise operators might profit from running modified code. In particular, almost all rewards in Kusama/Polkadot should come from work done securing parachains, primarily approval checking, but also backing, availability, and support of XCMP.

          -

          Among these task, our highest priorities must be approval checks, which ensure soundness, and sending availability chunks to approval checkers. We prove backers must be paid strictly less than approval checkers.

          -

          At present though, validators' rewards have relatively little relationship to validators operating costs, in terms of bandwidth and CPU time. Worse, polkadot's scaling makes us particular vulnerable "no-shows" caused by validators skipping their approval checks.

          -

          We're particularly concernned about hardware specks impact upon the number of parachain cores. We've requested relatively low spec machines so far, only four physical CPU cores, although some run even lower specs like only two physical CPU cores. Alone, rewards cannot fix our low speced validator problem, but rewards and outreach together should far more impact than either alone.

          -

          In future, we'll further increase validator spec requirements, which directly improve polkadot's throughput, and which repeats this dynamic of purging underspeced nodes, except outreach becomes more important because de facto too many slow validators can "out vote" the faster ones

          -

          Stakeholders

          -

          We alter the validators rewards protocol, but with negligable impact upon rewards for honest validators who comply with hardware and bandwidth recommendations.

          -

          We shall still reward participation in relay chain concensus of course, which de facto means block production but not finality, but these current reward levels shall wind up greatly reduced. Any validators who manipulate block rewards now could lose rewards here, simply because of rewards being shifted from block production to availability, but this sounds desirable.

          -

          We've discussed roughly this rewards protocol in https://hackmd.io/@rgbPIkIdTwSICPuAq67Jbw/S1fHcvXSF and https://github.com/paritytech/polkadot-sdk/issues/1811 as well as related topics like https://github.com/paritytech/polkadot-sdk/issues/5122

          -

          Logic

          -

          Categories

          -

          We alter the current rewards scheme by reducing to roughly these proportions of total rewards:

          -
            -
          • 15-20% - Relay chain block production and uncle logic
          • -
          • 5% - Anything else related to relay chain finality, primarily beefy proving, but maybe other tastes exist.
          • -
          • Any existing rewards for on-chain validity statements would only cover backers, so those rewards must be removed.
          • -
          -

          We add roughly these proportions of total rewards covering parachain work:

          -
            -
          • 70-75% - approval and backing validity checks, with the backing rewards being required to be less than approval rewards.
          • -
          • 5-10% - Availability redistribution from availability providers to approval checkers. We do not reward for availability distribution from backers to availability providers.
          • -
          -

          Observation

          -

          We track this data for each candidate during the approvals process:

          -
          /// Our subjective record of out availability transfers for this candidate.
          -CandidateRewards {
          -    /// Anyone who backed this parablock
          -    backers: [AuthorityId; NumBackers],
          -    /// Anyone to whome we think no-showed, even only briefly.
          -    noshows: HashSet<AuthorityId>,
          -    /// Anyone who sent us chunks for this candidate
          -    downloaded_from: HashMap<AuthorityId,u16>,    
          -    /// Anyone to whome we sent chunks for this candidate
          -    uploaded_to: HashMap<AuthorityId,u16>,
          -}
          -
          -

          We no longer require this data during disputes.

          - -

          After we approve a relay chain block, then we collect all its CandidateRewards into an ApprovalsTally, with one ApprovalTallyLine for each validator. In this, we compute approval_usages from the final run of the approvals loop, plus 0.8 for each backer.

          -

          We say a validator 𝑢 uses an approval vote by a validator 𝑣 on a candidate 𝑐 if the approval assignments loop by 𝑢 counted the vote by 𝑣 towards approving the candidate 𝑐.

          -
          /// Our subjective record of what we used from, and provided to, all other validators on the finalized chain
          -pub struct ApprovalsTally(Vec<ApprovalTallyLine>);
          -
          -/// Our subjective record of what we used from, and provided to, all one other validators on the finalized chain
          -pub struct ApprovalTallyLine {
          -    /// Approvals by this validator which our approvals gadget used in marking candidates approved.
          -    approval_usages: u32,
          -    /// How many times we think this validator no-showed, even only briefly.
          -    noshows: u32
          -    /// Availability chunks we downloaded from this validator for our approval checks we used.
          -    used_downloads: u32,
          -    /// Availability chunks we uploaded to this validator which whose approval checks we used.
          -    used_uploads: u32,
          -}
          -
          -

          At finality we sum these ApprovalsTally for one for the whole epoch so far, into another ApprovalsTally. We can optionally sum them earlier at chain heads, but this requires mutablity.

          -

          Messages

          -

          After the epoch is finalized, we share the first three field of each ApprovalTallyLine in its ApprovalTally.

          -
          /// Our subjective record of what we used from some other validator on the finalized chain
          -pub struct ApprovalTallyMessageLine {
          -    /// Approvals by this validator which our approvals gadget used in marking candidates approved.
          -    approval_usages: u32,
          -    /// How many times we think this validator no-showed, even only briefly.
          -    noshows: u32
          -    /// Availability chunks we downloaded from this validator for our approval checks we used.
          -    used_downloads: u32,
          -}
          -
          -/// Our subjective record of what we used from all other validators on the finalized chain
          -pub struct ApprovalsTallyMessage(Vec<ApprovalTallyMessageLine>);
          -
          -

          Actual ApprovalsTallyMessages sent over the wire must be signed of course, likely by the grandpa ed25519 key.

          -

          Rewards computation

          -

          We compute the approvals rewards for each validator by taking the median of the approval_usages fields for each validator across all validators ApprovalsTallyMessages. We compute some noshows_percentiles for each validator similarly, but using a 2/3 precentile instead of the median.

          -
          let mut approval_usages_medians = Vec::new(); 
          -let mut noshows_percentiles = = Vec::new(); 
          -for i in 0..num_validators {
          -    let mut v: Vec<u32> = approvals_tally_messages.iter().map(|atm| atm.0[i].approval_usages);
          -    v.sort();
          -    approval_usages_medians.push(v[num_validators/2]);
          -    let mut v: Vec<u32> = approvals_tally_messages.iter().map(|atm| atm.0[i].noshows);
          -    v.sort();
          -    noshows_percentiles.push(v[num_validators/3]); 
          -}
          -
          -

          Assuming more than 50% honersty, these median tell us how many approval votes form each validator.

          -

          We re-weight the used_downloads from the ith validator by their median times their expected f+1 chunks and divided by how many chunks downloads they claimed, and sum them

          -
          #[cfg(offchain)]
          -let mut my_missing_uploads = my_approvals_tally.iter().map(|l| l.used_uploads).collect();
          -let mut reweighted_total_used_downloads = vec[0u64; num_validators];
          -for (mmu,atm) in my_missing_uploads.iter_mut().zip(approvals_tally_messages) {
          -    let d = atm.0.iter().map(|l| l.used_downloads).sum();
          -    for i in 0..num_validators {
          -        let atm_from_i = approval_usages_medians[i] * (f+1) / d;
          -        #[cfg(offchain)]
          -        if i == me { mmu -= atm_from_i };
          -        reweighted_total_used_downloads[i] += atm_from_i;
          -    }
          -}
          -
          -

          We distribute rewards on-chain using approval_usages_medians and reweighted_total_used_downloads. Approval checkers could later change from who they download chunks using my_missing_uploads.

          -

          We deduct small amount of rewards using noshows_medians too, likely 1% of the rewards for an approval, but excuse some small number of noshows, ala noshows_medians[i].saturating_sub(MAX_NO_PENALTY_NOSHOWS).

          -

          Strategies

          -

          In theory, validators could adopt whatever strategy they like to penalize validators who stiff them on availability redistribution rewards, except they should not stiff back, only choose other availability providers. We discuss one good strategy below, but initially this could go unimplemented.

          -

          Concensus

          -

          We avoid placing rewards logic on the relay chain now, so we must either collect the signed ApprovalsTallyMessages and do the above computations somewhere sufficently trusted, like a parachain, or via some distributed protocol with its own assumptions.

          -

          In-core

          -

          A dedicated rewards parachain could easily collect the ApprovalsTallyMessages and do the above computations. In this, we logically have two phases, first we build the on-chain Merkle tree M of ApprovalsTallyMessages, and second we process those into the rewards data.

          -

          Any in-core approach risks enough malicious collators biasing the rewards by censoring the ApprovalsTallyMessages messages for some validators during the first phase. After this first phase completes, our second phase proceeds deterministically.

          -

          As an option, each validator could handle this second phase itself by creating single heavy transaction with n state accesses in this Merkle tree M, and this transaction sends the era points.

          -

          A remark for future developments..

          -

          JAM-like non/sub-parachain accumulation could mitigate the risk of the rewards parachain being captured.

          -

          JAM services all have either parachain accumulation or else non/sub-parachain accumulation.

          -
            -
          • A parachain should mean any service that tracks mutable state roots onto the relay chain, with its accumulation updating the state roots. Inherently, these state roots create some capture risk for the parachain, although how much depends upon numerous other factors.
          • -
          • A non/sub-parachain means the service does not maintain state like a blockchain does, but could use some tiny state within the relay chain. Although seemingly less powerful than parachains, these non/sub-parachain accumulations could reduce the capture risk so that any validator could create a block for the service, without knowing any existing state.
          • -
          -

          In our case, each ApprovalsTallyMessage would become a block for the first phase rewards service, so then the accumulation tracks an MMR of the rewards service block hashes, which becomes M from Option 1. At 1024 validators this requires 9 * 32 = 288 bytes for the MMR and 1024/8 = 128 bytes for a bitfield, so 416 bytes of relay chain state in total. Any validator could then add their ApprovalsTallyMessage in any order, but only one per relay chain block, so the submission timeframe should be long enough to prevent censorship.

          -

          Arguably after JAM, we should migrate critical functions to non/sub-parachain aka JAM services without mutable state, so this covers validator elections, DKGs, and rewards. Yet, non/sub-parachains cannot eliminate all censorship risks, so the near term benefits seem questionable.

          -

          Off-core

          -

          All validators could collect ApprovalsTallyMessages and independently compute rewards off-core. At that point, all validators have opinions about all other validators rewards, but even among honest validators these opinions could differ if some lack some ApprovalsTallyMessages.

          -

          We'd have the same in-core computation problem if we perform statistics like medians upon these opinions. We could however take an optimistic approach where each validator computes medians like above, but then shares their hash of the final rewards list. If 2/3rds voted for the same hash, then we distribute rewards as above. If not, then we distribute no rewards until governance selects the correct hash.

          -

          We never validate in-core the signatures on ApprovalsTallyMessages or the computation, so this approach permits more direct cheating by malicious 2/3rd majority, but if that occurs then we've broken our security assumptions anyways. It's likely these hashes do diverge during some network disruptions though, which increases our "drama" factor considerably, which maybe unacceptable.

          -

          Explanation

          -

          Backing

          -

          Polkadot's efficency creates subtle liveness concerns: Anytime one node cannot perform one of its approval checks then Polkadot loses in expectation 3.25 approval checks, or 0.10833 parablocks. This makes back pressure essential.

          -

          We cannot throttle approval checks securely either, so reactive off-chain back pressure only makes sense during or before the backing phase. In other words, if nodes feel overworked themselves, or perhaps beleive others to be, then they should drop backing checks, never approval checks. It follows backing work must be rewarded less well and less reliably than approvals, as otherwise validators could benefit from behavior that harms the network.

          -

          We propose that one backing statement be rewarded at 80% of one approval statement, so backers earn only 80% of what approval checkers earn. We omit rewards for availability distribution, so backers spend more on bandwidth too. Approval checkers always fetch chunks first from backers though, so good backers earn roughly 7% there, meaning backing checks earn roughly 13% less than approval checks. We should lower this 80% if we ever increase availability redistribution rewards.

          -

          Although imperfect, we believe this simplifies implementation, and provides robustness against mistakes elsewhere, including by governance mistakes, but incurs minimal risk. In principle, backer might not distribute systemic chunks, but approval checkers fetch systemic chunks from backers first anyways, so likely this yields negligable gains.

          -

          As always we require that backers' rewards covers their operational costs plus some profit, but approval checks must be more profitable.

          -

          Approvals

          -

          In polkadot, all validators run an approval assignment loop for each candidate, in which the validator listens to other approval checkers assignments and approval statements/votes, with which it marks checkers no-show or done, and marks candidates approved. Also, this loop determines and announces validators' own approval checker assignments.

          -

          Any validator should always conclude whatever approval checks it begins, but our approval assignment loop ignore some approval checks, either because they were announced too soon or because an earlier no-show delivered its approval vote before the final approval. We say a validator $u$ uses an approval vote by a validator $v$ on a candidate $c$ if the approval assignments loop by $u$ counted the vote by $v$ towards approving the candidate $c$. We should not rewards votes announced too soon, so we unavoidably omit rewards for some honest no-show replacements too. We expect the 80% discount for backing covers these losses, so approval checks remain more profitable than backing.

          -

          We propose a simple approximate solution based upon computing medians across validators for used votes.

          -
            -
          1. -

            In an epoch $e$, each validator $u$ counts of the number $\alpha_{u,v}$ of votes they used from each validator $v$, including themselves. Any time a validator marks a candidate approved, they increment these counts appropriately.

            -
          2. -
          3. -

            After epoch $e$'s last block gets finalized, all validators of epoch $e$ submit an approvals tally message ApprovalsTallyMessage that reveals their number $\alpha_{u,v}$ of useful approvals they saw from each validator $v$ on candidates that became available in epoch $n$. We do not send $\alpha_{u,u}$ for tit-for-tat reasons discussed below, not for bias concerns. We record these approvals tally messages on-chain.

            -
          4. -
          5. -

            After some delay, we compute on-chain the median $\alpha_v := \textrm{median} { \alpha_{u,v} : u }$ used approvals statements for each validator $v$.

            -
          6. -
          -

          As discussed in https://hackmd.io/@rgbPIkIdTwSICPuAq67Jbw/S1fHcvXSF we could compute these medians using the on-line algorithm if substrate had a nice priority queue.

          -

          We never achieve true consensus on approval checkers and their approval votes. Yet, our approval assignment loop gives a rough concensus, under our Byzantine assumption and some synchrony assumption. It then follows that miss-reporting by malicious validators should not appreciably alter the median $\alpha_v$ and hence rewards.

          -

          We never tally used approval assignments to candidate equivocations or other forks. Any validator should always conclude whatever approval checks it begins, even on other forks, but we expect relay chain equivocations should be vanishingly rare, and sassafras should make forks uncommon.

          -

          We account for noshows similarly, and deduce a much smaller amount of rewards, but require a 2/3 precentile level, not kjust a median.

          -

          Availability redistribution

          -

          As approval checkers could easily perform useless checks, we shall reward availability providers for the availability chunks they provide that resulted in useful approval checks. We enforce honesty using a tit-for-tat mechanism because chunk transfers are inherently subjective.

          -

          An approval checker reconstructs the full parachain block by downloading distinct $f+1$ chunks from other validators, where at most $f$ validators are byzantine, out of the $n \ge 3 f + 1$ total validators. In downloading chunks, validators prefer the $f+1$ systemic chunks over the non-systemic chunks, and prefer fetching from validators who already voted valid, like backing checkers. It follows some validators should recieve credit for more than one chunk per candidate.

          -

          We expect a validator $v$ has actually performed more approval checks $\omega_v$ than the median $\alpha_v$ for which they actually received credit. In fact, approval checkers even ignore some of their own approval checks, meaning $\alpha_{v,v} \le \omega_v$ too.

          -

          Alongside approvals count for epoch $e$, approval checker $v$ computes the counts $\beta_{u,v}$ of the number of chunks they downloaded from each availability provider $u$, excluding themselves, for which they percieve the approval check turned out useful, meaning their own approval counts in $\alpha_{v,v}$. Approval checkers publish $\beta_{u,v}$ alongside $\alpha_{u,v}$ in the approvals tally message ApprovalsTallyMessage. We originally proposed include the self availability usage $\beta_{v,v}$ here, but this should not matter, and excluding simplifies the code.

          -

          Symmetrically, availability provider $u$ computes the counts $\gamma_{u,v}$ of the number of chunks they uploaded to each approval checker $v$, again including themselves, again for which they percieve the approval check turned out useful. Availability provider $u$ never reveal its $\gamma_{u,v}$ however.

          -

          At this point, $\alpha_v$, $\alpha_{v,v}$, and $\alpha_{u,v}$ all potentially differ. We established consensus upon $\alpha_v$ above however, with which we avoid approval checkers printing unearned availability provider rewards:

          -

          After receiving "all" pairs $(\alpha_{u,v},\beta_{u,v})$, validator $w$ re-weights the $\beta_{u,v}$ and their own $\gamma_{w,v}$. -$$ -\begin{aligned} -\beta\prime_{w,v} &= {(f+1) \alpha_v \over \sum_u \beta_{u,v}} \beta_{w,v} \ -\gamma\prime_{w,v} &= {(f+1) \alpha_w \over \sum_v \gamma_{w,v}} \gamma_{w,v} \ -\end{aligned} -$$ -At this point, we compute $\beta\prime_w = \sum_v \beta\prime_{w,v}$ on-chain for each $w$ and reward $w$ proportionally.

          -

          Tit-for-tat

          -

          We employ a tit-for-tat strategy to punish validators who lie about from whome they obtain availability chunks. We only alter validators future choices in from whom they obtain availability chunks, and never punish by lying ourselves, so nothing here breaks polkadot, but not having roughly this strategy enables cheating.

          -

          An availability provider $w$ defines $\delta\prime_{w,v} := \gamma\prime_{w,v} - \beta\prime_{w,v}$ to be the re-weighted number of chunks by which $v$ stiffed $w$. Now $w$ increments their cumulative stiffing perception $\eta_{w,v}$ from $v$ by the value $\delta\prime_{w,v}$, so $\eta_{w,v} \mathrel{+}= \delta\prime_{w,v}$

          -

          In future, anytime $w$ seeks chunks in reconstruction $w$ skips $v$ proportional to $\eta_{w,v} / \sum_u \eta_{w,u}$, with each skip reducing $\eta_{w,u}$ by 1. We expect honest accedental availability stiffs have only small $\delta\prime_{w,v}$, so they clear out quickly, but intentional skips add up more quickly.

          -

          We keep $\gamma_{w,v}$ and $\alpha_{u,u}$ secret so that approval checkers cannot really know others stiffing perceptions, although $\alpha_{u,v}$ leaks some relevant information. We expect this secrecy keeps skips secret and thus prevents the tit-for-tat escalating beyond one round, which hopefully creates a desirable Nash equilibrium.

          -

          We favor skiping systematic chunks to reduce reconstructon costs, so we face costs when skipping them. We could however fetch systematic chunks from availability providers as well as backers, or even other approval checkers, so this might not become problematic in practice.

          -

          Concerns: Drawbacks, Testing, Security, and Privacy

          -

          We do not pay backers individually for availability distribution per se. We could only do so by including this information into the availability bitfields, which complicates on-chain computation. Also, if one of the two backers does not distribute then the availability core should remain occupied longer, meaning the lazy backer loses some rewards too. It's likely future protocol improbvements change this, so we should monitor for lazy backers outside the rewards system.

          -

          We discuss approvals being considered by the tit-for-tat in earlier drafts. An adversary who successfuly manipulates the rewards median votes would've alraedy violated polkadot's security assumptions though, which requires a hard fork and correcting the dot allocation. Incorrect report wrong approval_usages remain interesting statistics though.

          -

          Adversarial validators could manipulates their availability votes though, even without being a supermajority. If they still download honestly, then this costs them more rewards than they earn. We do not prevent validators from preferentially obtaining their pieces from their friends though. We should analyze, or at least observe, the long-term consequences.

          -

          A priori, whale nominator's validators could stiff validators but then rotate their validators quickly enough so that they never suffered being skipped back. We discuss several possible solution, and their difficulties, under "Rob's nominator-wise skipping" in https://hackmd.io/@rgbPIkIdTwSICPuAq67Jbw/S1fHcvXSF but overall less seems like more here. Also frequent validator rotation could be penalized elsewhere.

          -

          Performance, Ergonomics, and Compatibility

          - -

          We operate off-chain except for final rewards votes and median tallies. We expect lower overhead rewards protocols would lack information, thereby admitting easier cheating.

          -

          Initially, we designed the ELVES approval gadget to allow on-chain operation, in part for rewards computation, but doing so looks expensive. Also, on-chain rewards computaiton remains only an approximation too, but could even be biased more easily than our off-chain protocol presented here.

          - -

          We alraedy teach validators about missed parachain blocks, but we'll teach approval checking more going forwards, because current efforts focus more upon backing.

          - -

          JAM's block exports should not complicate availability rewards, but could impact some alternative schemes.

          -

          Prior Art and References

          -

          None

          -

          Unresolved Questions

          -

          Provide specific questions to discuss and address before the RFC is voted on by the Fellowship. This should include, for example, alternatives to aspects of the proposed design where the appropriate trade-off to make is unclear.

          - -

          Synthetic parachain flag

          -

          Any rewards protocol could simply be "out voted" by too many slow validators: An increase the number of parachain cores increases more workload, but this creates no-shows if too few validators could handle this workload.

          -

          We could add a synthetic parachain flag, only settable by governance, which treats no-shows as positive approval votes for that parachain, but without adding rewards. We should never enable this for real parachains, only for synthetic ones like gluttons. We should not enable the synthetic parachain flag long-term even for gluttonsm, because validators could easily modify their code. Yet, synthetic approval checks might enable pushing the hardware upgrades more agressively over the short-term.

          (source)

          Table of Contents

            diff --git a/stale/0000-rewards.html b/proposed/0000-rewards.html similarity index 75% rename from stale/0000-rewards.html rename to proposed/0000-rewards.html index 626b155..e311cb1 100644 --- a/stale/0000-rewards.html +++ b/proposed/0000-rewards.html @@ -90,7 +90,7 @@ @@ -262,7 +262,7 @@ CandidateRewards {

            We no longer require this data during disputes.

            After we approve a relay chain block, then we collect all its CandidateRewards into an ApprovalsTally, with one ApprovalTallyLine for each validator. In this, we compute approval_usages from the final run of the approvals loop, plus 0.8 for each backer.

            -

            We say a validator 𝑢 uses an approval vote by a validator 𝑣 on a candidate 𝑐 if the approval assignments loop by 𝑢 counted the vote by 𝑣 towards approving the candidate 𝑐.

            +

            As discussed below, we say a validator 𝑢 uses an approval vote by a validator 𝑣 on a candidate 𝑐 if the the final approving run of the elves approval loop by 𝑢 counted the vote by 𝑣 towards approving the candidate 𝑐. We only count these useful votes that actually gets used.

            /// Our subjective record of what we used from, and provided to, all other validators on the finalized chain
             pub struct ApprovalsTally(Vec<ApprovalTallyLine>);
             
            @@ -345,7 +345,7 @@ for (mmu,atm) in my_missing_uploads.iter_mut().zip(approvals_tally_messages) {
             

            Off-core

            All validators could collect ApprovalsTallyMessages and independently compute rewards off-core. At that point, all validators have opinions about all other validators rewards, but even among honest validators these opinions could differ if some lack some ApprovalsTallyMessages.

            We'd have the same in-core computation problem if we perform statistics like medians upon these opinions. We could however take an optimistic approach where each validator computes medians like above, but then shares their hash of the final rewards list. If 2/3rds voted for the same hash, then we distribute rewards as above. If not, then we distribute no rewards until governance selects the correct hash.

            -

            We never validate in-core the signatures on ApprovalsTallyMessages or the computation, so this approach permits more direct cheating by malicious 2/3rd majority, but if that occurs then we've broken our security assumptions anyways. It's likely these hashes do diverge during some network disruptions though, which increases our "drama" factor considerably, which maybe unacceptable.

            +

            We never validate in-core the signatures on ApprovalsTallyMessages or the computation, so this approach permits more direct cheating by malicious 2/3rd majority, but if that occurs then we've broken our security assumptions anyways. It's somewhat likely these hashes do diverge during some network disruptions though, which increases our "drama" factor considerably, which maybe unacceptable.

            Explanation

            Backing

            Polkadot's efficency creates subtle liveness concerns: Anytime one node cannot perform one of its approval checks then Polkadot loses in expectation 3.25 approval checks, or 0.10833 parablocks. This makes back pressure essential.

            @@ -354,8 +354,9 @@ for (mmu,atm) in my_missing_uploads.iter_mut().zip(approvals_tally_messages) {

            Although imperfect, we believe this simplifies implementation, and provides robustness against mistakes elsewhere, including by governance mistakes, but incurs minimal risk. In principle, backer might not distribute systemic chunks, but approval checkers fetch systemic chunks from backers first anyways, so likely this yields negligable gains.

            As always we require that backers' rewards covers their operational costs plus some profit, but approval checks must be more profitable.

            Approvals

            -

            In polkadot, all validators run an approval assignment loop for each candidate, in which the validator listens to other approval checkers assignments and approval statements/votes, with which it marks checkers no-show or done, and marks candidates approved. Also, this loop determines and announces validators' own approval checker assignments.

            -

            Any validator should always conclude whatever approval checks it begins, but our approval assignment loop ignore some approval checks, either because they were announced too soon or because an earlier no-show delivered its approval vote before the final approval. We say a validator $u$ uses an approval vote by a validator $v$ on a candidate $c$ if the approval assignments loop by $u$ counted the vote by $v$ towards approving the candidate $c$. We should not rewards votes announced too soon, so we unavoidably omit rewards for some honest no-show replacements too. We expect the 80% discount for backing covers these losses, so approval checks remain more profitable than backing.

            +

            In polkadot, all validators run the elves approval loop for each candidate, in which the validator listens to other approval checkers assignments and approval statements/votes, and with which it marks checkers no-show or done, and marks candidates approved. Also, this loop determines and announces validators' own approval checker assignments.

            +

            Any validator should always conclude whatever approval checks it begins, but our approval assignment loop ignore some approval checks, either because they were announced too soon or because an earlier no-show delivered its approval vote before the final approval. We say a validator $u$ uses an approval vote by a validator $v$ on a candidate $c$ if the approval assignments loop by $u$ counted the vote by $v$ towards approving the candidate $c$. We actually rerun the elves approval loop quite frequently, but only the final run that marks the candidate approved determines the useful approval votes.

            +

            We should not rewards votes announced too soon, so by only counting the final run we unavoidably omit rewards for some honest no-show replacements too. We expect the 80%-ish discount for backing covers these losses, so approval checks remain more profitable than backing.

            We propose a simple approximate solution based upon computing medians across validators for used votes.

            1. @@ -419,11 +420,11 @@ At this point, we compute $\beta\prime_w = \sum_v \beta\prime_{w,v}$ on-chain fo