diff --git a/404.html b/404.html index 5e6e3f0..edc20e5 100644 --- a/404.html +++ b/404.html @@ -91,7 +91,7 @@ diff --git a/approved/0001-agile-coretime.html b/approved/0001-agile-coretime.html index 1a6a310..8f87fca 100644 --- a/approved/0001-agile-coretime.html +++ b/approved/0001-agile-coretime.html @@ -90,7 +90,7 @@ diff --git a/approved/0005-coretime-interface.html b/approved/0005-coretime-interface.html index 4da25d9..b535d91 100644 --- a/approved/0005-coretime-interface.html +++ b/approved/0005-coretime-interface.html @@ -90,7 +90,7 @@ diff --git a/approved/0007-system-collator-selection.html b/approved/0007-system-collator-selection.html index 00dc451..057aa30 100644 --- a/approved/0007-system-collator-selection.html +++ b/approved/0007-system-collator-selection.html @@ -90,7 +90,7 @@ diff --git a/approved/0008-parachain-bootnodes-dht.html b/approved/0008-parachain-bootnodes-dht.html index ef2307e..2cba851 100644 --- a/approved/0008-parachain-bootnodes-dht.html +++ b/approved/0008-parachain-bootnodes-dht.html @@ -90,7 +90,7 @@ diff --git a/approved/0009-improved-net-light-client-requests.html b/approved/0009-improved-net-light-client-requests.html index ae87577..fc8f2bb 100644 --- a/approved/0009-improved-net-light-client-requests.html +++ b/approved/0009-improved-net-light-client-requests.html @@ -90,7 +90,7 @@ diff --git a/approved/0010-burn-coretime-revenue.html b/approved/0010-burn-coretime-revenue.html index 96bc0e6..cb516df 100644 --- a/approved/0010-burn-coretime-revenue.html +++ b/approved/0010-burn-coretime-revenue.html @@ -90,7 +90,7 @@ diff --git a/approved/0012-process-for-adding-new-collectives.html b/approved/0012-process-for-adding-new-collectives.html index 3f2d43c..c044dae 100644 --- a/approved/0012-process-for-adding-new-collectives.html +++ b/approved/0012-process-for-adding-new-collectives.html @@ -90,7 +90,7 @@ diff --git a/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html b/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html index 0fd0c09..88d0af7 100644 --- a/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html +++ b/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html @@ -90,7 +90,7 @@ diff --git a/approved/0014-improve-locking-mechanism-for-parachains.html b/approved/0014-improve-locking-mechanism-for-parachains.html index ae4ce38..3f41862 100644 --- a/approved/0014-improve-locking-mechanism-for-parachains.html +++ b/approved/0014-improve-locking-mechanism-for-parachains.html @@ -90,7 +90,7 @@ diff --git a/approved/0022-adopt-encointer-runtime.html b/approved/0022-adopt-encointer-runtime.html index db50892..6cf88d8 100644 --- a/approved/0022-adopt-encointer-runtime.html +++ b/approved/0022-adopt-encointer-runtime.html @@ -90,7 +90,7 @@ diff --git a/approved/0026-sassafras-consensus.html b/approved/0026-sassafras-consensus.html index 6ec8075..379f7f1 100644 --- a/approved/0026-sassafras-consensus.html +++ b/approved/0026-sassafras-consensus.html @@ -90,7 +90,7 @@ diff --git a/approved/0032-minimal-relay.html b/approved/0032-minimal-relay.html index de61224..ab7bedb 100644 --- a/approved/0032-minimal-relay.html +++ b/approved/0032-minimal-relay.html @@ -90,7 +90,7 @@ diff --git a/approved/0042-extrinsics-state-version.html b/approved/0042-extrinsics-state-version.html index bf8a0ad..cb7e3ea 100644 --- a/approved/0042-extrinsics-state-version.html +++ b/approved/0042-extrinsics-state-version.html @@ -90,7 +90,7 @@ diff --git a/approved/0043-storage-proof-size-hostfunction.html b/approved/0043-storage-proof-size-hostfunction.html index 25ee897..f46fa7a 100644 --- a/approved/0043-storage-proof-size-hostfunction.html +++ b/approved/0043-storage-proof-size-hostfunction.html @@ -90,7 +90,7 @@ diff --git a/approved/0045-nft-deposits-asset-hub.html b/approved/0045-nft-deposits-asset-hub.html index 74d96a8..8af7609 100644 --- a/approved/0045-nft-deposits-asset-hub.html +++ b/approved/0045-nft-deposits-asset-hub.html @@ -90,7 +90,7 @@ diff --git a/approved/0047-assignment-of-availability-chunks.html b/approved/0047-assignment-of-availability-chunks.html index 8dc60fd..c091537 100644 --- a/approved/0047-assignment-of-availability-chunks.html +++ b/approved/0047-assignment-of-availability-chunks.html @@ -90,7 +90,7 @@ diff --git a/approved/0048-session-keys-runtime-api.html b/approved/0048-session-keys-runtime-api.html index f5d18e7..57251b5 100644 --- a/approved/0048-session-keys-runtime-api.html +++ b/approved/0048-session-keys-runtime-api.html @@ -90,7 +90,7 @@ diff --git a/approved/0050-fellowship-salaries.html b/approved/0050-fellowship-salaries.html index 699c53a..974b281 100644 --- a/approved/0050-fellowship-salaries.html +++ b/approved/0050-fellowship-salaries.html @@ -90,7 +90,7 @@ diff --git a/approved/0056-one-transaction-per-notification.html b/approved/0056-one-transaction-per-notification.html index 77a0560..d58663e 100644 --- a/approved/0056-one-transaction-per-notification.html +++ b/approved/0056-one-transaction-per-notification.html @@ -90,7 +90,7 @@ diff --git a/approved/0059-nodes-capabilities-discovery.html b/approved/0059-nodes-capabilities-discovery.html index e20dc67..a172f49 100644 --- a/approved/0059-nodes-capabilities-discovery.html +++ b/approved/0059-nodes-capabilities-discovery.html @@ -90,7 +90,7 @@ diff --git a/approved/0078-merkleized-metadata.html b/approved/0078-merkleized-metadata.html index 43a6964..41f79db 100644 --- a/approved/0078-merkleized-metadata.html +++ b/approved/0078-merkleized-metadata.html @@ -90,7 +90,7 @@ diff --git a/approved/0084-general-transaction-extrinsic-format.html b/approved/0084-general-transaction-extrinsic-format.html index 94bcd95..91f9a08 100644 --- a/approved/0084-general-transaction-extrinsic-format.html +++ b/approved/0084-general-transaction-extrinsic-format.html @@ -90,7 +90,7 @@ diff --git a/approved/0091-dht-record-creation-time.html b/approved/0091-dht-record-creation-time.html index 07f2c09..15886f9 100644 --- a/approved/0091-dht-record-creation-time.html +++ b/approved/0091-dht-record-creation-time.html @@ -90,7 +90,7 @@ diff --git a/approved/0097-unbonding_queue.html b/approved/0097-unbonding_queue.html index 5fe5b23..446b6ee 100644 --- a/approved/0097-unbonding_queue.html +++ b/approved/0097-unbonding_queue.html @@ -90,7 +90,7 @@ diff --git a/approved/0099-transaction-extension-version.html b/approved/0099-transaction-extension-version.html index e6777d1..c46a51e 100644 --- a/approved/0099-transaction-extension-version.html +++ b/approved/0099-transaction-extension-version.html @@ -90,7 +90,7 @@ diff --git a/approved/0100-xcm-multi-type-asset-transfer.html b/approved/0100-xcm-multi-type-asset-transfer.html index 0c2fb3b..59ec599 100644 --- a/approved/0100-xcm-multi-type-asset-transfer.html +++ b/approved/0100-xcm-multi-type-asset-transfer.html @@ -90,7 +90,7 @@ diff --git a/approved/0101-xcm-transact-remove-max-weight-param.html b/approved/0101-xcm-transact-remove-max-weight-param.html index 64a6a2c..3f2a126 100644 --- a/approved/0101-xcm-transact-remove-max-weight-param.html +++ b/approved/0101-xcm-transact-remove-max-weight-param.html @@ -90,7 +90,7 @@ diff --git a/approved/0103-introduce-core-index-commitment.html b/approved/0103-introduce-core-index-commitment.html index 0498d53..6f0968c 100644 --- a/approved/0103-introduce-core-index-commitment.html +++ b/approved/0103-introduce-core-index-commitment.html @@ -90,7 +90,7 @@ diff --git a/approved/0105-xcm-improved-fee-mechanism.html b/approved/0105-xcm-improved-fee-mechanism.html index 11c5884..b084be4 100644 --- a/approved/0105-xcm-improved-fee-mechanism.html +++ b/approved/0105-xcm-improved-fee-mechanism.html @@ -90,7 +90,7 @@ diff --git a/approved/0107-xcm-execution-hints.html b/approved/0107-xcm-execution-hints.html index cf00956..aea9526 100644 --- a/approved/0107-xcm-execution-hints.html +++ b/approved/0107-xcm-execution-hints.html @@ -90,7 +90,7 @@ diff --git a/approved/0108-xcm-remove-testnet-ids.html b/approved/0108-xcm-remove-testnet-ids.html index 367b052..35bc607 100644 --- a/approved/0108-xcm-remove-testnet-ids.html +++ b/approved/0108-xcm-remove-testnet-ids.html @@ -90,7 +90,7 @@ diff --git a/approved/0122-alias-origin-on-asset-transfers.html b/approved/0122-alias-origin-on-asset-transfers.html index c6ecf4c..4d14c63 100644 --- a/approved/0122-alias-origin-on-asset-transfers.html +++ b/approved/0122-alias-origin-on-asset-transfers.html @@ -90,7 +90,7 @@ diff --git a/approved/0123-pending-code-as-storage-location-for-runtime-upgrades.html b/approved/0123-pending-code-as-storage-location-for-runtime-upgrades.html index 442e3a1..75f3f07 100644 --- a/approved/0123-pending-code-as-storage-location-for-runtime-upgrades.html +++ b/approved/0123-pending-code-as-storage-location-for-runtime-upgrades.html @@ -90,7 +90,7 @@ diff --git a/approved/0125-xcm-asset-metadata.html b/approved/0125-xcm-asset-metadata.html index 72983ee..3f573d3 100644 --- a/approved/0125-xcm-asset-metadata.html +++ b/approved/0125-xcm-asset-metadata.html @@ -90,7 +90,7 @@ diff --git a/approved/0126-introduce-pvq.html b/approved/0126-introduce-pvq.html index 13a86ee..236d1c9 100644 --- a/approved/0126-introduce-pvq.html +++ b/approved/0126-introduce-pvq.html @@ -90,7 +90,7 @@ diff --git a/approved/0135-compressed-blob-prefixes.html b/approved/0135-compressed-blob-prefixes.html index 36b0388..b3589d6 100644 --- a/approved/0135-compressed-blob-prefixes.html +++ b/approved/0135-compressed-blob-prefixes.html @@ -90,7 +90,7 @@ diff --git a/approved/0139-faster-erasure-coding.html b/approved/0139-faster-erasure-coding.html index 8f5db4c..25ee590 100644 --- a/approved/0139-faster-erasure-coding.html +++ b/approved/0139-faster-erasure-coding.html @@ -90,7 +90,7 @@ diff --git a/index.html b/index.html index 8d58ff7..82d8998 100644 --- a/index.html +++ b/index.html @@ -90,7 +90,7 @@ diff --git a/introduction.html b/introduction.html index 8d58ff7..82d8998 100644 --- a/introduction.html +++ b/introduction.html @@ -90,7 +90,7 @@ diff --git a/new/0000-pre-elves_soft.html b/new/0000-pre-elves_soft.html index 12ae5cc..1ed2432 100644 --- a/new/0000-pre-elves_soft.html +++ b/new/0000-pre-elves_soft.html @@ -90,7 +90,7 @@ @@ -187,6 +187,7 @@
  • Availability voting
  • Fork choice
  • Elves
  • +
  • Fast prunning
  • Concerns: Drawbacks, Testing, Security, and Privacy
  • @@ -199,7 +200,9 @@
  • Unresolved Questions
  • Future Directions and Related Material @@ -232,14 +235,22 @@

    Fork choice

    We require relay chain block producers build upon forks preferred by 2 f + 1 validators. In other words, a relay chain block with parent p must contain availability bitfield votes from 2 f + 1 validators with relay_parent = p and preferred_fork set. It follows our preferred fork votes override other fork choice priorities.

    A relay chain block producer could lack this 2 f + 1 threshold for a prespective parent block p, in which case they must build upon the parent of p instead. We know availability votes simply being slow would cause this somtimes, in which case adding slightly more delay could save the relay chain slot Alternatively though, two distinct relay chain blocks in the same slot could each wind up prefered by f+1 validators, in which case we must abandond the slot entirely.

    -

    It's critical that honest validators carefully time when they judge their preferences. In babe, this adds complexity: We always prefer a primary slot over a secondary slot, so the validators should delay preferring a secondary slot, giving the primary slot enough time. We prefer the primary slot with smallest VRF as well, so we need some delay even once we recieve a primary.

    Elves

    -

    We launch the approvals process aka (machine) elves for a relay chain block p once 2 f + 1 validators prefer that block, aka 2 f + 1 validators provide availability votes with relay_parent = p and preferred_fork set. We could optionally delay this further until we have some valid decendent of p.

    +

    We only launch the approvals process aka (machine) elves for a relay chain block p once 2 f + 1 validators prefer that block, aka 2 f + 1 validators provide availability votes with relay_parent = p and preferred_fork set. We could optionally delay this further until we have some valid decendent of p.

    +

    Fast prunning

    +

    In fact, this new fork choice logic creates more short relay chain forks than exist currently: If the validators split their votes, then we create a new fork in a later slot. We no longer need to process every fork now though.

    +

    Instead, availability votes from honest validators must express the correct preferred fork, which requires validators carefully time when they judge and announce their preference flags. In babe, we need primary slots to be preferred over secondary slots, so the validators need logic that delays sending availability votes for a secondary slot, giving the primary slot enough time. We also prefer the primary slot with smallest VRF as well, so we need some delay even once we recieve a primary.

    +

    We suggest roughly this approach:

    +

    First, download only relay chain block headers, from which we determine our tentative preferred fork.

    +

    Second, we download and import only our currently tentatively preferred fork. We download our availability chunks as soon as we import a currently tentatively preferred relay chain block. We've no particular target for availability chunks other than simply some delay timer. In babe, we add some extra delay here for secondary slots, like perhaps 2 seconds minus the actual execution time, so that a fast secondary slot cannot beat a primary slot.

    +

    We somtimes obtain an even more preferable header during import, chunk distribution, and delays for our first tentatively preferred fork. Also, the first could simply turn out invalid. In either case, we loop to repeat this second step on our new tentative preferred fork. We repeat this process until an import succeeds and its timers run out, without receiving any more preferable header. Actual equivocations cannot be preferable over one another, so all this loops terminates reasonably quickly.

    +

    Next, we broadcast our availability vote with its relay_parent set to our tentatively preferred fork, and with its preferred_fork set.

    +

    Finally, if 2 f + 1 other validators have a different preference from us, then we download and import their preferred relay chain block, fetch chunks for it, and provide availability votes with preferred_fork zero. It's possible this occurs earlier than our preference finishes, in which case we probably still send out our preference, if only for forensic evidence.

    Concerns: Drawbacks, Testing, Security, and Privacy

    Adds subtle timing constraints, which could entrench existing performanceg obstacles. We might explore variations that ignore wall clock time.

    We've always known relay chain equivocations break the ELVES threat model. We originally envisioned ELVES having fallback pathways, but these were complex and demanded unused code paths, which cannot realistically be debugged. Although complex, the early soft concensus scheme feels less complex overall. We know timing sucks to optimise a distributed system, but at least doing so use everyday code paths.

    Performance, Ergonomics, and Compatibility

    -

    We expect early soft concensus introduce back pressure that radically alters performance. We no longer run approvals checks upon all forks. As primary slots occur once every other slot in expectation, one might expect a 30% reduction in CPU load, but this depends upon diverse factors.

    +

    We expect early soft concensus introduce back pressure that radically alters performance. We no longer run approvals checks upon all forks. As primary slots occur once every other slot in expectation, one might expect a 25% reduction in CPU load, but this depends upon diverse factors.

    We apply back pressure by dropping some whole relay chain blocks though, so this shall increase the expected parachain blocktime somewhat, but how much depens upon future optimisation work.

    Compatibility

    Major upgrade

    @@ -248,14 +259,16 @@

    Unresolved Questions

    Provide specific questions to discuss and address before the RFC is voted on by the Fellowship. This should include, for example, alternatives to aspects of the proposed design where the appropriate trade-off to make is unclear.

    +

    Sassafras

    Arguably, a sassafras RC like JAM could avoid preferred_fork flag, by only releasing availability votes for at most one sassafras equivocation. We wanted availability for babe forks, but sassafras has only equivocations, so those block can simply be dropped.

    In principle, a sassafras equivocation could still enter the valid chain, assuming 2/3rd of validators provide availability votes for the same equivocations. If JAM lacks the preferred_fork flag then enactment proceeds slower in this case, but this should almost never occur.

    Thresahold randomness

    We think threshold randomness could reduce the tranche zero approcha checker assigments by roughly 40%, meaning a fixed 15 vs the expected 25 in the elves paper (30 in production now).

    We do know threshold VRF based schemes that address relay chain equivocations directly, by using as input the relay chain block hash. We have many more options with early soft concensus though. TODO In particular, we only know two post-quantum approaches to elves, and the bandwidth efficent one needs early soft concensus.

    +

    Mid-strenght concensus

    +

    In this RFC, we only require that each relay chain block contain preference votes for its parent from 2/3rds of validators. We could enforce the opposite direction too: Around y>2 seconds after a validator V has seen preference votes for a chain head X from 2/3rd of validators, the V begins rejecting any relay chain block that does not build upon X. This is tricky because the y>2 second delay must be long enough so that most honest nodes learn both X and its preference votes. This strengthens MEV defenses that assume some honest nodes.

    Avoid wall clock time

    -

    Avoiding or minimizing wall clock time could provide an interesting development direction.

    -

    ...

    +

    We know parachains could baset heir slots upon relay chain slots, instaed of wall clock time (RFC ToDo). After this happens, we could avoid or minimize wall clock timing in the relay chain too, so that relay chain slots could've a floating duration based upon workload.

    Partial relay chain blocks

    Above, we only discuss abandoning realy chain blocks which fail early soft concensus. We could alternatively treat them as partial blocks and build extension partial blocks that complete them, with elves probably using randomness from the final partial block.

    @@ -267,7 +280,7 @@ - @@ -281,7 +294,7 @@ - diff --git a/new/0145-remove-unnecessary-allocator-usage.html b/new/0145-remove-unnecessary-allocator-usage.html new file mode 100644 index 0000000..0d1a577 --- /dev/null +++ b/new/0145-remove-unnecessary-allocator-usage.html @@ -0,0 +1,500 @@ + + + + + + + RFC-0145: Remove the host-side runtime memory allocator - Polkadot Fellowship RFCs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + + + +
    +
    +

    (source)

    +

    Table of Contents

    + +

    RFC-0145: Remove the host-side runtime memory allocator

    +
    + + + +
    Start Date2025-05-16
    DescriptionUpdate the runtime-host interface to no longer make use of a host-side allocator
    AuthorsPierre Krieger, Someone Unknown
    +
    +

    Summary

    +

    Update the runtime-host interface so that it no longer uses the host-side allocator.

    +

    Prior Art

    +

    The API of these new functions was heavily inspired by the API used by the C programming language.

    +

    This RFC is mainly based on RFC-4 by @tomaka, which has never been adopted, and supercedes it.

    +

    Changes

    +
      +
    • The original RFC required checking if an output buffer address provided to a host function is inside the VM address space range and to stop the runtime execution if that's not the case. That requirement has been removed in this version of the RFC, as in the general case, the host doesn't have exhaustive information about the VM's memory organization. Thus, attempting to write to an out-of-bound region will result in a "normal" runtime panic.
    • +
    • Function signatures introduced by PPP#7 have been used in this RFC, as the PPP has already been properly implemented and documented. However, it has never been officially adopted, nor have its functions been in use.
    • +
    • Added new versions of ext_misc_runtime_version and ext_offchain_random_seed.
    • +
    • Addressed discussions from the original RFC-4 discussion flow.
    • +
    +

    Motivation

    +

    The heap allocation of the runtime is currently controlled by the host using a memory allocator on the host side.

    +

    The API of many host functions contains buffer allocations. For example, when calling ext_hashing_twox_256_version_1, the host allocates a 32-byte buffer using the host allocator, and returns a pointer to this buffer to the runtime. The runtime later has to call ext_allocator_free_version_1 on this pointer to free the buffer.

    +

    Even though no benchmark has been done, it is pretty obvious that this design is very inefficient. To continue with the example of ext_hashing_twox_256_version_1, it would be more efficient to instead write the output hash to a buffer allocated by the runtime on its stack and passed by pointer to the function. Allocating a buffer on the stack in the worst-case scenario consists of simply decreasing a number; in the best-case scenario, it is free. Doing so would save many VM memory reads and writes by the allocator, and would save a function call to ext_allocator_free_version_1.

    +

    Furthermore, the existence of the host-side allocator has become questionable over time. It is implemented in a very naive way, and for determinism and backwards compatibility reasons, it needs to be implemented exactly identically in every client implementation. Runtimes make substantial use of heap memory allocations, and each allocation needs to go through the runtime <-> host boundary twice (once for allocating and once for freeing). Moving the allocator to the runtime side would be a good idea, although it would increase the runtime size. But before the host-side allocator can be deprecated, all the host functions that use it must be updated to avoid using it.

    +

    Stakeholders

    +

    No attempt was made to convince stakeholders.

    +

    Explanation

    +

    New host functions

    +

    This section contains a list of new host functions to introduce and amendments to the existing ones.

    +
    (func $ext_storage_read_version_2
    +    (param $key i64) (param $value_out i64) (param $offset i32) (result i64))
    +(func $ext_default_child_storage_read_version_2
    +    (param $child_storage_key i64) (param $key i64) (param $value_out i64)
    +    (param $offset i32) (result i64))
    +
    +

    The signature and behaviour of ext_storage_read_version_2 and ext_default_child_storage_read_version_2 are identical to their version 1 counterparts, but the return value has a different meaning.

    +

    The new functions directly return the number of bytes written into the value_out buffer. If the entry doesn't exist, -1 is returned. Given that the host must never write more bytes than the size of the buffer in value_out, and that the size of this buffer is expressed as a 32-bit number, the 64-bit value of -1 is not ambiguous.

    +
    (func $ext_storage_next_key_version_2
    +    (param $key i64) (param $out i64) (return i32))
    +(func $ext_default_child_storage_next_key_version_2
    +    (param $child_storage_key i64) (param $key i64) (param $out i64) (return i32))
    +
    +

    The behaviour of these functions is identical to their version 1 counterparts.

    +

    Instead of allocating a buffer, writing the next key to it, and returning a pointer to it, the new version of these functions accepts an out parameter containing a pointer-size to the memory location where the host writes the output.

    +

    These functions return the size, in bytes, of the next key, or 0 if there is no next key. If the size of the next key is larger than the buffer in out, the bytes of the key that fit the buffer are written to out, and any extra bytes that don't fit are discarded.

    +

    Some notes:

    +
      +
    • It is never possible for the next key to be an empty buffer, because an empty key has no preceding key. For this reason, a return value of 0 can unambiguously be used to indicate the lack of the next key.
    • +
    • The ext_storage_next_key_version_2 and ext_default_child_storage_next_key_version_2 are typically used to enumerate keys that start with a certain prefix. Since storage keys are constructed by concatenating hashes, the runtime is expected to know the size of the next key and can allocate a buffer that can fit said key. When the next key doesn't belong to the desired prefix, it might not fit the buffer, but given that the start of the key is written to the buffer anyway, this can be detected to avoid calling the function the second time with a larger buffer.
    • +
    +
    (func $ext_hashing_keccak_256_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_keccak_512_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_sha2_256_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_blake2_128_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_blake2_256_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_twox_64_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_twox_128_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_twox_256_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_trie_blake2_256_root_version_3
    +    (param $data i64) (param $version i32) (param $out i32))
    +(func $ext_trie_blake2_256_ordered_root_version_3
    +    (param $data i64) (param $version i32) (param $out i32))
    +(func $ext_trie_keccak_256_root_version_3
    +    (param $data i64) (param $version i32) (param $out i32))
    +(func $ext_trie_keccak_256_ordered_root_version_3
    +    (param $data i64) (param $version i32) (param $out i32))
    +(func $ext_crypto_ed25519_generate_version_2
    +    (param $key_type_id i32) (param $seed i64) (param $out i32))
    +(func $ext_crypto_sr25519_generate_version_2
    +    (param $key_type_id i32) (param $seed i64) (param $out i32) (return i32))
    +(func $ext_crypto_ecdsa_generate_version_2
    +    (param $key_type_id i32) (param $seed i64) (param $out i32) (return i32))
    +
    +

    The behaviour of these functions is identical to their version 1 or version 2 counterparts. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new version of these functions accepts an out parameter containing the memory location where the host writes the output. The output is always of a size known at compilation time.

    +
    (func $ext_default_child_storage_root_version_3
    +    (param $child_storage_key i64) (param $out i32))
    +(func $ext_storage_root_version_3
    +    (param $out i32))
    +
    +

    The behaviour of these functions is identical to their version 1 and version 2 counterparts. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new versions of these functions accept an out parameter containing the memory location where the host writes the output. The output is always of a size known at compilation time.

    +

    The version 1 of these functions has been taken as a base rather than the version 2, as a PPP#6 deprecating the version 2 of these functions has previously been accepted.

    +
    (func $ext_storage_clear_prefix_version_3
    +    (param $maybe_prefix i64) (param $maybe_limit i64)
    +    (param $maybe_cursor_in_out i64) (param $backend_out i32)
    +    (param $unique_out i32) (param $loops_out i32) (return i32))
    +(func $ext_default_child_storage_clear_prefix_version_3
    +    (param $child_storage_key i64) (param $prefix i64) (param $maybe_limit i64)
    +    (param $maybe_cursor_in_out i64) (param $backend_out i32)
    +    (param $unique_out i32) (param $loops_out i32) (return i32))
    +(func $ext_default_child_storage_kill_version_4
    +    (param $child_storage_key i64) (param $maybe_limit i64)
    +    (param $maybe_cursor_in_out i64) (param $backend_out i32)
    +    (param $unique_out i32) (param $loops_out i32) (return i32))
    +
    +

    These functions amend already implemented but still unused functions introduced by PPP#7, hence there's no version number change. maybe_limit defines the limit of backend deletions, not counting keys in the current overlay. maybe_cursor_in_out may be used to pass a continuation cursor. The cursor is written into the same field if the limit was reached and not all the keys were cleared; otherwise, None is written. (CAVEAT: It's impossible to determine appropriate buffer size; the approach is discussible). backend_out, unique_out and loops_out parameters contain the memory location where the output is written (respectively, the number of items removed from the backend DB; the number of unique keys removes, including overlay; the number of iterations done). Any of the output parameters may be -1, in which case no output is written. The functions return 0 to indicate success, or 1 if maybe_cursor_in_out buffer length was not enough to write the new cursor; in the latter case, None is written to the buffer.

    +
    (func $ext_crypto_ed25519_sign_version_2
    +    (param $key_type_id i32) (param $key i32) (param $msg i64) (param $out i32) (return i32))
    +(func $ext_crypto_sr25519_sign_version_2
    +    (param $key_type_id i32) (param $key i32) (param $msg i64) (param $out i32) (return i32))
    +func $ext_crypto_ecdsa_sign_version_2
    +    (param $key_type_id i32) (param $key i32) (param $msg i64) (param $out i32) (return i32))
    +(func $ext_crypto_ecdsa_sign_prehashed_version_2
    +    (param $key_type_id i32) (param $key i32) (param $msg i64) (param $out i32) (return i64))
    +
    +

    The behaviour of these functions is identical to their version 1 counterparts. The new versions of these functions accept an out parameter containing the memory location where the host writes the signature. The signatures are always of a size known at compilation time. On success, these functions return 0. If the public key can't be found in the keystore, these functions return 1 and do not write anything to out.

    +

    Note that the return value is 0 on success and 1 on failure, while the previous version of these functions wrote 1 on success (as it represents a SCALE-encoded Some) and 0 on failure (as it represents a SCALE-encoded None). Returning 0 on success and non-zero on failure is consistent with standard practices in the C programming language and is less surprising than the opposite.

    +
    (func $ext_crypto_secp256k1_ecdsa_recover_version_3
    +    (param $sig i32) (param $msg i32) (param $out i32) (return i32))
    +(func $ext_crypto_secp256k1_ecdsa_recover_compressed_version_3
    +    (param $sig i32) (param $msg i32) (param $out i32) (return i32))
    +
    +

    The behaviour of these functions is identical to their version 2 counterparts. The new versions of these functions accept an out parameter containing the memory location where the host writes the signature. The signatures are always of a size known at compilation time. On success, these functions return 0. On failure, these functions return a non-zero value and do not write anything to out.

    +

    The non-zero value written on failure is:

    +
      +
    • 1: incorrect value of R or S
    • +
    • 2: incorrect value of V
    • +
    • 3: invalid signature
    • +
    +

    These values are equal to the values returned on error by the version 2 (see https://spec.polkadot.network/chap-host-api#defn-ecdsa-verify-error), but incremented by 1 to reserve 0 for success.

    +
    (func $ext_crypto_ed25519_num_public_keys_version_1
    +    (param $key_type_id i32) (return i32))
    +(func $ext_crypto_ed25519_public_key_version_2
    +    (param $key_type_id i32) (param $key_index i32) (param $out i32))
    +(func $ext_crypto_sr25519_num_public_keys_version_1
    +    (param $key_type_id i32) (return i32))
    +(func $ext_crypto_sr25519_public_key_version_2
    +    (param $key_type_id i32) (param $key_index i32) (param $out i32))
    +(func $ext_crypto_ecdsa_num_public_keys_version_1
    +    (param $key_type_id i32) (return i32))
    +(func $ext_crypto_ecdsa_public_key_version_2
    +    (param $key_type_id i32) (param $key_index i32) (param $out i32))
    +
    +

    The functions supersede the ext_crypto_ed25519_public_key_version_1, ext_crypto_sr25519_public_key_version_1, and ext_crypto_ecdsa_public_key_version_1 host functions.

    +

    Instead of calling ext_crypto_ed25519_public_key_version_1 to obtain the list of all the keys at once, the runtime should instead call ext_crypto_ed25519_num_public_keys_version_1 to get the number of public keys available, then ext_crypto_ed25519_public_key_version_2 repeatedly. +The ext_crypto_ed25519_public_key_version_2 function writes the public key of the given key_index to the memory location designated by out. The key_index must be between 0 (included) and n (excluded), where n is the value returned by ext_crypto_ed25519_num_public_keys_version_1. Execution must trap if n is out of range.

    +

    The same explanations apply for ext_crypto_sr25519_public_key_version_1 and ext_crypto_ecdsa_public_key_version_1.

    +

    Host implementers should be aware that the list of public keys (including their ordering) must not change while the runtime is running. That is most likely done by copying the list of all available keys either at the start of the execution or the first time the list is accessed.

    +
    (func $ext_offchain_http_request_start_version_2
    +  (param $method i64) (param $uri i64) (param $meta i64) (result i32))
    +
    +

    The behaviour of this function is identical to its version 1 counterpart. Instead of allocating a buffer, writing the request identifier in it, and returning a pointer to it, version 2 of this function simply returns the newly-assigned identifier to the HTTP request. On failure, this function returns -1. An identifier of -1 is invalid and is reserved to indicate failure.

    +
    (func $ext_offchain_http_request_write_body_version_2
    +  (param $method i64) (param $uri i64) (param $meta i64) (result i32))
    +(func $ext_offchain_http_response_read_body_version_2
    +  (param $request_id i32) (param $buffer i64) (param $deadline i64) (result i64))
    +
    +

    The behaviour of these functions is identical to their version 1 counterpart. Instead of allocating a buffer, writing two bytes in it, and returning a pointer to it, the new version of these functions simply indicates what happened:

    +
      +
    • For ext_offchain_http_request_write_body_version_2, 0 on success.
    • +
    • For ext_offchain_http_response_read_body_version_2, 0 or a non-zero number of bytes on success.
    • +
    • -1 if the deadline was reached.
    • +
    • -2 if there was an I/O error while processing the request.
    • +
    • -3 if the identifier of the request is invalid.
    • +
    +

    These values are equal to the values returned on error by version 1 (see https://spec.polkadot.network/chap-host-api#defn-http-error), but tweaked to reserve positive numbers for success.

    +

    When it comes to ext_offchain_http_response_read_body_version_2, the host implementers must not read too much data at once to avoid ambiguity in the returned value. Given that the buffer size is always inferior or equal to 4 GiB, this is not a problem.

    +
    (func $ext_offchain_http_response_wait_version_2
    +    (param $ids i64) (param $deadline i64) (param $out i32))
    +
    +

    The behaviour of this function is identical to its version 1 counterpart. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new version of this function accepts an out parameter containing the memory location where the host writes the output.

    +

    The encoding of the response code is also modified compared to its version 1 counterpart, and each response code now encodes up to 4 little-endian bytes as described below:

    +
      +
    • 100-999: The request has finished with the given HTTP status code.
    • +
    • -1: The deadline was reached.
    • +
    • -2: There was an I/O error while processing the request.
    • +
    • -3: The identifier of the request is invalid.
    • +
    +

    The buffer passed to out must always have a size of 4 * n where n is the number of elements in the ids.

    +
    (func $ext_offchain_http_response_header_name_version_1
    +    (param $request_id i32) (param $header_index i32) (param $out i64) (result i64))
    +(func $ext_offchain_http_response_header_value_version_1
    +    (param $request_id i32) (param $header_index i32) (param $out i64) (result i64))
    +
    +

    These functions supersede the ext_offchain_http_response_headers_version_1 host function.

    +

    Contrary to ext_offchain_http_response_headers_version_1, only one header indicated by header_index can be read at a time. Instead of calling ext_offchain_http_response_headers_version_1 once, the runtime should call ext_offchain_http_response_header_name_version_1 and ext_offchain_http_response_header_value_version_1 multiple times with an increasing header_index, until a value of -1 is returned.

    +

    These functions accept an out parameter containing a pointer-size to the memory location where the header name or value should be written.

    +

    These functions return the size, in bytes, of the header name or header value. If the request doesn't exist or is in an invalid state (as documented for ext_offchain_http_response_headers_version_1) or the header_index is out of range, a value of -1 is returned. Given that the host must never write more bytes than the size of the buffer in out, and that the size of this buffer is expressed as a 32-bit number, a 64-bit value of -1 is not ambiguous.

    +

    If the buffer in out is too small to fit the entire header name or value, only the bytes that fit are written, and the rest are discarded.

    +
    (func $ext_offchain_submit_transaction_version_2
    +    (param $data i64) (return i32))
    +(func $ext_offchain_http_request_add_header_version_2
    +    (param $request_id i32) (param $name i64) (param $value i64) (result i32))
    +
    +

    Instead of allocating a buffer, writing 1 or 0 in it, and returning a pointer to it, the version 2 of these functions returns 0 or 1, where 0 indicates success and 1 indicates failure. The runtime must interpret any non-0 value as failure, but the client must always return 1 in case of failure.

    +
    (func $ext_offchain_local_storage_read_version_1
    +    (param $kind i32) (param $key i64) (param $value_out i64) (param $offset i32) (result i64))
    +
    +

    This function supercedes the ext_offchain_local_storage_get_version_1 host function, and uses an API and logic similar to ext_storage_read_version_2.

    +

    It reads the offchain local storage key indicated by kind and key starting at the byte indicated by offset, and writes the value to the pointer-size indicated by value_out.

    +

    The function returns the number of bytes written into the value_out buffer. If the entry doesn't exist, the -1 value is returned. Given that the host must never write more bytes than the size of the buffer in value_out, and that the size of this buffer is expressed as a 32-bit number, a 64-bit value of -1 is not ambiguous.

    +
    (func $ext_offchain_network_peer_id_version_1
    +    (param $out i64))
    +
    +

    This function writes the PeerId of the local node to the memory location indicated by out. A PeerId is always 38 bytes long.

    +
    (func $ext_misc_runtime_version_version_2
    +    (param $wasm i64) (param $out i64))
    +
    +

    The behaviour of this function is identical to its version 1 counterpart. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new version of this function accepts an out parameter containing pointer-size to the memory location where the host writes the output.

    +
    (func $ext_offchain_random_seed_version_2 (param $out i32))
    +
    +

    The behaviour of this function is identical to its version 1 counterpart. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new version of this function accepts an out parameter containing the address of the memory location where the host writes the output. The size is output is always 32 bytes.

    +
    (func $ext_misc_input_read_version_1
    +    (param $offset i64) (param $out i64) (return i32))
    +
    +

    When a runtime function is called, the host uses the allocator to allocate memory within the runtime to write some input data. The new host function provides an alternative way to access the input that doesn't use the allocator.

    +

    The function copies some data from the input data to the runtime's memory. The offset parameter indicates the offset within the input data from which to start copying, and must lie inside the output buffer provided. The out parameter is a pointer-size and contains the buffer where to write.

    +

    The runtime execution stops with an error if offset is strictly greater than the input data size.

    +

    The return value is the number of bytes written unless out has zero length, in which case the full length of input data in bytes is returned, and nothing is written into the output buffer.

    +

    Other changes

    +

    In addition to the new host functions, this RFC proposes two changes to the runtime-host interface:

    +
      +
    • The following function signature is now also accepted for runtime entry points: (func (result i64)).
    • +
    • Runtimes no longer need to expose a constant named __heap_base.
    • +
    +

    All the host functions superseded by new host functions are now considered deprecated and should no longer be used.

    +

    The following other host functions are also considered deprecated:

    +
      +
    • ext_storage_get_version_1
    • +
    • ext_storage_changes_root_version_1
    • +
    • ext_default_child_storage_get_version_1
    • +
    • ext_allocator_malloc_version_1
    • +
    • ext_allocator_free_version_1
    • +
    • ext_offchain_network_state_version_1
    • +
    +

    Unresolved Questions

    +

    The changes in this RFC would need to be benchmarked. That involves implementing the RFC and measuring the speed difference.

    +

    It is expected that most host functions are faster or equal in speed to their deprecated counterparts, with the following exceptions:

    +
      +
    • +

      ext_misc_input_read_version_1 is inherently slower than obtaining a buffer with the entire data due to the two extra function calls and the extra copying. However, given that this only happens once per runtime call, the cost is expected to be negligible.

      +
    • +
    • +

      The ext_crypto_*_public_keys, ext_offchain_network_state, and ext_offchain_http_* host functions are likely slightly slower than their deprecated counterparts, but given that they are used only in offchain workers, that is acceptable.

      +
    • +
    • +

      It is unclear how replacing ext_storage_get with ext_storage_read and ext_default_child_storage_get with ext_default_child_storage_read will impact performance.

      +
    • +
    • +

      It is unclear how the changes to ext_storage_next_key and ext_default_child_storage_next_key will impact performance.

      +
    • +
    + +
    + + +
    +
    + + + +
    + + + + + + + + + + + + + + + + + + +
    + + diff --git a/print.html b/print.html index 20bb2a8..702487f 100644 --- a/print.html +++ b/print.html @@ -91,7 +91,7 @@ @@ -193,6 +193,7 @@ detailing proposed changes to the technical implementation of the Polkadot netwo
  • Availability voting
  • Fork choice
  • Elves
  • +
  • Fast prunning
  • Concerns: Drawbacks, Testing, Security, and Privacy
  • @@ -205,7 +206,9 @@ detailing proposed changes to the technical implementation of the Polkadot netwo
  • Unresolved Questions
  • Future Directions and Related Material @@ -238,14 +241,22 @@ detailing proposed changes to the technical implementation of the Polkadot netwo

    Fork choice

    We require relay chain block producers build upon forks preferred by 2 f + 1 validators. In other words, a relay chain block with parent p must contain availability bitfield votes from 2 f + 1 validators with relay_parent = p and preferred_fork set. It follows our preferred fork votes override other fork choice priorities.

    A relay chain block producer could lack this 2 f + 1 threshold for a prespective parent block p, in which case they must build upon the parent of p instead. We know availability votes simply being slow would cause this somtimes, in which case adding slightly more delay could save the relay chain slot Alternatively though, two distinct relay chain blocks in the same slot could each wind up prefered by f+1 validators, in which case we must abandond the slot entirely.

    -

    It's critical that honest validators carefully time when they judge their preferences. In babe, this adds complexity: We always prefer a primary slot over a secondary slot, so the validators should delay preferring a secondary slot, giving the primary slot enough time. We prefer the primary slot with smallest VRF as well, so we need some delay even once we recieve a primary.

    Elves

    -

    We launch the approvals process aka (machine) elves for a relay chain block p once 2 f + 1 validators prefer that block, aka 2 f + 1 validators provide availability votes with relay_parent = p and preferred_fork set. We could optionally delay this further until we have some valid decendent of p.

    +

    We only launch the approvals process aka (machine) elves for a relay chain block p once 2 f + 1 validators prefer that block, aka 2 f + 1 validators provide availability votes with relay_parent = p and preferred_fork set. We could optionally delay this further until we have some valid decendent of p.

    +

    Fast prunning

    +

    In fact, this new fork choice logic creates more short relay chain forks than exist currently: If the validators split their votes, then we create a new fork in a later slot. We no longer need to process every fork now though.

    +

    Instead, availability votes from honest validators must express the correct preferred fork, which requires validators carefully time when they judge and announce their preference flags. In babe, we need primary slots to be preferred over secondary slots, so the validators need logic that delays sending availability votes for a secondary slot, giving the primary slot enough time. We also prefer the primary slot with smallest VRF as well, so we need some delay even once we recieve a primary.

    +

    We suggest roughly this approach:

    +

    First, download only relay chain block headers, from which we determine our tentative preferred fork.

    +

    Second, we download and import only our currently tentatively preferred fork. We download our availability chunks as soon as we import a currently tentatively preferred relay chain block. We've no particular target for availability chunks other than simply some delay timer. In babe, we add some extra delay here for secondary slots, like perhaps 2 seconds minus the actual execution time, so that a fast secondary slot cannot beat a primary slot.

    +

    We somtimes obtain an even more preferable header during import, chunk distribution, and delays for our first tentatively preferred fork. Also, the first could simply turn out invalid. In either case, we loop to repeat this second step on our new tentative preferred fork. We repeat this process until an import succeeds and its timers run out, without receiving any more preferable header. Actual equivocations cannot be preferable over one another, so all this loops terminates reasonably quickly.

    +

    Next, we broadcast our availability vote with its relay_parent set to our tentatively preferred fork, and with its preferred_fork set.

    +

    Finally, if 2 f + 1 other validators have a different preference from us, then we download and import their preferred relay chain block, fetch chunks for it, and provide availability votes with preferred_fork zero. It's possible this occurs earlier than our preference finishes, in which case we probably still send out our preference, if only for forensic evidence.

    Concerns: Drawbacks, Testing, Security, and Privacy

    Adds subtle timing constraints, which could entrench existing performanceg obstacles. We might explore variations that ignore wall clock time.

    We've always known relay chain equivocations break the ELVES threat model. We originally envisioned ELVES having fallback pathways, but these were complex and demanded unused code paths, which cannot realistically be debugged. Although complex, the early soft concensus scheme feels less complex overall. We know timing sucks to optimise a distributed system, but at least doing so use everyday code paths.

    Performance, Ergonomics, and Compatibility

    -

    We expect early soft concensus introduce back pressure that radically alters performance. We no longer run approvals checks upon all forks. As primary slots occur once every other slot in expectation, one might expect a 30% reduction in CPU load, but this depends upon diverse factors.

    +

    We expect early soft concensus introduce back pressure that radically alters performance. We no longer run approvals checks upon all forks. As primary slots occur once every other slot in expectation, one might expect a 25% reduction in CPU load, but this depends upon diverse factors.

    We apply back pressure by dropping some whole relay chain blocks though, so this shall increase the expected parachain blocktime somewhat, but how much depens upon future optimisation work.

    Compatibility

    Major upgrade

    @@ -254,16 +265,290 @@ detailing proposed changes to the technical implementation of the Polkadot netwo

    Unresolved Questions

    Provide specific questions to discuss and address before the RFC is voted on by the Fellowship. This should include, for example, alternatives to aspects of the proposed design where the appropriate trade-off to make is unclear.

    +

    Sassafras

    Arguably, a sassafras RC like JAM could avoid preferred_fork flag, by only releasing availability votes for at most one sassafras equivocation. We wanted availability for babe forks, but sassafras has only equivocations, so those block can simply be dropped.

    In principle, a sassafras equivocation could still enter the valid chain, assuming 2/3rd of validators provide availability votes for the same equivocations. If JAM lacks the preferred_fork flag then enactment proceeds slower in this case, but this should almost never occur.

    Thresahold randomness

    We think threshold randomness could reduce the tranche zero approcha checker assigments by roughly 40%, meaning a fixed 15 vs the expected 25 in the elves paper (30 in production now).

    We do know threshold VRF based schemes that address relay chain equivocations directly, by using as input the relay chain block hash. We have many more options with early soft concensus though. TODO In particular, we only know two post-quantum approaches to elves, and the bandwidth efficent one needs early soft concensus.

    +

    Mid-strenght concensus

    +

    In this RFC, we only require that each relay chain block contain preference votes for its parent from 2/3rds of validators. We could enforce the opposite direction too: Around y>2 seconds after a validator V has seen preference votes for a chain head X from 2/3rd of validators, the V begins rejecting any relay chain block that does not build upon X. This is tricky because the y>2 second delay must be long enough so that most honest nodes learn both X and its preference votes. This strengthens MEV defenses that assume some honest nodes.

    Avoid wall clock time

    -

    Avoiding or minimizing wall clock time could provide an interesting development direction.

    -

    ...

    +

    We know parachains could baset heir slots upon relay chain slots, instaed of wall clock time (RFC ToDo). After this happens, we could avoid or minimize wall clock timing in the relay chain too, so that relay chain slots could've a floating duration based upon workload.

    Partial relay chain blocks

    Above, we only discuss abandoning realy chain blocks which fail early soft concensus. We could alternatively treat them as partial blocks and build extension partial blocks that complete them, with elves probably using randomness from the final partial block.

    +

    (source)

    +

    Table of Contents

    + +

    RFC-0145: Remove the host-side runtime memory allocator

    +
    + + + +
    Start Date2025-05-16
    DescriptionUpdate the runtime-host interface to no longer make use of a host-side allocator
    AuthorsPierre Krieger, Someone Unknown
    +
    +

    Summary

    +

    Update the runtime-host interface so that it no longer uses the host-side allocator.

    +

    Prior Art

    +

    The API of these new functions was heavily inspired by the API used by the C programming language.

    +

    This RFC is mainly based on RFC-4 by @tomaka, which has never been adopted, and supercedes it.

    +

    Changes

    + +

    Motivation

    +

    The heap allocation of the runtime is currently controlled by the host using a memory allocator on the host side.

    +

    The API of many host functions contains buffer allocations. For example, when calling ext_hashing_twox_256_version_1, the host allocates a 32-byte buffer using the host allocator, and returns a pointer to this buffer to the runtime. The runtime later has to call ext_allocator_free_version_1 on this pointer to free the buffer.

    +

    Even though no benchmark has been done, it is pretty obvious that this design is very inefficient. To continue with the example of ext_hashing_twox_256_version_1, it would be more efficient to instead write the output hash to a buffer allocated by the runtime on its stack and passed by pointer to the function. Allocating a buffer on the stack in the worst-case scenario consists of simply decreasing a number; in the best-case scenario, it is free. Doing so would save many VM memory reads and writes by the allocator, and would save a function call to ext_allocator_free_version_1.

    +

    Furthermore, the existence of the host-side allocator has become questionable over time. It is implemented in a very naive way, and for determinism and backwards compatibility reasons, it needs to be implemented exactly identically in every client implementation. Runtimes make substantial use of heap memory allocations, and each allocation needs to go through the runtime <-> host boundary twice (once for allocating and once for freeing). Moving the allocator to the runtime side would be a good idea, although it would increase the runtime size. But before the host-side allocator can be deprecated, all the host functions that use it must be updated to avoid using it.

    +

    Stakeholders

    +

    No attempt was made to convince stakeholders.

    +

    Explanation

    +

    New host functions

    +

    This section contains a list of new host functions to introduce and amendments to the existing ones.

    +
    (func $ext_storage_read_version_2
    +    (param $key i64) (param $value_out i64) (param $offset i32) (result i64))
    +(func $ext_default_child_storage_read_version_2
    +    (param $child_storage_key i64) (param $key i64) (param $value_out i64)
    +    (param $offset i32) (result i64))
    +
    +

    The signature and behaviour of ext_storage_read_version_2 and ext_default_child_storage_read_version_2 are identical to their version 1 counterparts, but the return value has a different meaning.

    +

    The new functions directly return the number of bytes written into the value_out buffer. If the entry doesn't exist, -1 is returned. Given that the host must never write more bytes than the size of the buffer in value_out, and that the size of this buffer is expressed as a 32-bit number, the 64-bit value of -1 is not ambiguous.

    +
    (func $ext_storage_next_key_version_2
    +    (param $key i64) (param $out i64) (return i32))
    +(func $ext_default_child_storage_next_key_version_2
    +    (param $child_storage_key i64) (param $key i64) (param $out i64) (return i32))
    +
    +

    The behaviour of these functions is identical to their version 1 counterparts.

    +

    Instead of allocating a buffer, writing the next key to it, and returning a pointer to it, the new version of these functions accepts an out parameter containing a pointer-size to the memory location where the host writes the output.

    +

    These functions return the size, in bytes, of the next key, or 0 if there is no next key. If the size of the next key is larger than the buffer in out, the bytes of the key that fit the buffer are written to out, and any extra bytes that don't fit are discarded.

    +

    Some notes:

    + +
    (func $ext_hashing_keccak_256_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_keccak_512_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_sha2_256_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_blake2_128_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_blake2_256_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_twox_64_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_twox_128_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_hashing_twox_256_version_2
    +    (param $data i64) (param $out i32))
    +(func $ext_trie_blake2_256_root_version_3
    +    (param $data i64) (param $version i32) (param $out i32))
    +(func $ext_trie_blake2_256_ordered_root_version_3
    +    (param $data i64) (param $version i32) (param $out i32))
    +(func $ext_trie_keccak_256_root_version_3
    +    (param $data i64) (param $version i32) (param $out i32))
    +(func $ext_trie_keccak_256_ordered_root_version_3
    +    (param $data i64) (param $version i32) (param $out i32))
    +(func $ext_crypto_ed25519_generate_version_2
    +    (param $key_type_id i32) (param $seed i64) (param $out i32))
    +(func $ext_crypto_sr25519_generate_version_2
    +    (param $key_type_id i32) (param $seed i64) (param $out i32) (return i32))
    +(func $ext_crypto_ecdsa_generate_version_2
    +    (param $key_type_id i32) (param $seed i64) (param $out i32) (return i32))
    +
    +

    The behaviour of these functions is identical to their version 1 or version 2 counterparts. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new version of these functions accepts an out parameter containing the memory location where the host writes the output. The output is always of a size known at compilation time.

    +
    (func $ext_default_child_storage_root_version_3
    +    (param $child_storage_key i64) (param $out i32))
    +(func $ext_storage_root_version_3
    +    (param $out i32))
    +
    +

    The behaviour of these functions is identical to their version 1 and version 2 counterparts. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new versions of these functions accept an out parameter containing the memory location where the host writes the output. The output is always of a size known at compilation time.

    +

    The version 1 of these functions has been taken as a base rather than the version 2, as a PPP#6 deprecating the version 2 of these functions has previously been accepted.

    +
    (func $ext_storage_clear_prefix_version_3
    +    (param $maybe_prefix i64) (param $maybe_limit i64)
    +    (param $maybe_cursor_in_out i64) (param $backend_out i32)
    +    (param $unique_out i32) (param $loops_out i32) (return i32))
    +(func $ext_default_child_storage_clear_prefix_version_3
    +    (param $child_storage_key i64) (param $prefix i64) (param $maybe_limit i64)
    +    (param $maybe_cursor_in_out i64) (param $backend_out i32)
    +    (param $unique_out i32) (param $loops_out i32) (return i32))
    +(func $ext_default_child_storage_kill_version_4
    +    (param $child_storage_key i64) (param $maybe_limit i64)
    +    (param $maybe_cursor_in_out i64) (param $backend_out i32)
    +    (param $unique_out i32) (param $loops_out i32) (return i32))
    +
    +

    These functions amend already implemented but still unused functions introduced by PPP#7, hence there's no version number change. maybe_limit defines the limit of backend deletions, not counting keys in the current overlay. maybe_cursor_in_out may be used to pass a continuation cursor. The cursor is written into the same field if the limit was reached and not all the keys were cleared; otherwise, None is written. (CAVEAT: It's impossible to determine appropriate buffer size; the approach is discussible). backend_out, unique_out and loops_out parameters contain the memory location where the output is written (respectively, the number of items removed from the backend DB; the number of unique keys removes, including overlay; the number of iterations done). Any of the output parameters may be -1, in which case no output is written. The functions return 0 to indicate success, or 1 if maybe_cursor_in_out buffer length was not enough to write the new cursor; in the latter case, None is written to the buffer.

    +
    (func $ext_crypto_ed25519_sign_version_2
    +    (param $key_type_id i32) (param $key i32) (param $msg i64) (param $out i32) (return i32))
    +(func $ext_crypto_sr25519_sign_version_2
    +    (param $key_type_id i32) (param $key i32) (param $msg i64) (param $out i32) (return i32))
    +func $ext_crypto_ecdsa_sign_version_2
    +    (param $key_type_id i32) (param $key i32) (param $msg i64) (param $out i32) (return i32))
    +(func $ext_crypto_ecdsa_sign_prehashed_version_2
    +    (param $key_type_id i32) (param $key i32) (param $msg i64) (param $out i32) (return i64))
    +
    +

    The behaviour of these functions is identical to their version 1 counterparts. The new versions of these functions accept an out parameter containing the memory location where the host writes the signature. The signatures are always of a size known at compilation time. On success, these functions return 0. If the public key can't be found in the keystore, these functions return 1 and do not write anything to out.

    +

    Note that the return value is 0 on success and 1 on failure, while the previous version of these functions wrote 1 on success (as it represents a SCALE-encoded Some) and 0 on failure (as it represents a SCALE-encoded None). Returning 0 on success and non-zero on failure is consistent with standard practices in the C programming language and is less surprising than the opposite.

    +
    (func $ext_crypto_secp256k1_ecdsa_recover_version_3
    +    (param $sig i32) (param $msg i32) (param $out i32) (return i32))
    +(func $ext_crypto_secp256k1_ecdsa_recover_compressed_version_3
    +    (param $sig i32) (param $msg i32) (param $out i32) (return i32))
    +
    +

    The behaviour of these functions is identical to their version 2 counterparts. The new versions of these functions accept an out parameter containing the memory location where the host writes the signature. The signatures are always of a size known at compilation time. On success, these functions return 0. On failure, these functions return a non-zero value and do not write anything to out.

    +

    The non-zero value written on failure is:

    + +

    These values are equal to the values returned on error by the version 2 (see https://spec.polkadot.network/chap-host-api#defn-ecdsa-verify-error), but incremented by 1 to reserve 0 for success.

    +
    (func $ext_crypto_ed25519_num_public_keys_version_1
    +    (param $key_type_id i32) (return i32))
    +(func $ext_crypto_ed25519_public_key_version_2
    +    (param $key_type_id i32) (param $key_index i32) (param $out i32))
    +(func $ext_crypto_sr25519_num_public_keys_version_1
    +    (param $key_type_id i32) (return i32))
    +(func $ext_crypto_sr25519_public_key_version_2
    +    (param $key_type_id i32) (param $key_index i32) (param $out i32))
    +(func $ext_crypto_ecdsa_num_public_keys_version_1
    +    (param $key_type_id i32) (return i32))
    +(func $ext_crypto_ecdsa_public_key_version_2
    +    (param $key_type_id i32) (param $key_index i32) (param $out i32))
    +
    +

    The functions supersede the ext_crypto_ed25519_public_key_version_1, ext_crypto_sr25519_public_key_version_1, and ext_crypto_ecdsa_public_key_version_1 host functions.

    +

    Instead of calling ext_crypto_ed25519_public_key_version_1 to obtain the list of all the keys at once, the runtime should instead call ext_crypto_ed25519_num_public_keys_version_1 to get the number of public keys available, then ext_crypto_ed25519_public_key_version_2 repeatedly. +The ext_crypto_ed25519_public_key_version_2 function writes the public key of the given key_index to the memory location designated by out. The key_index must be between 0 (included) and n (excluded), where n is the value returned by ext_crypto_ed25519_num_public_keys_version_1. Execution must trap if n is out of range.

    +

    The same explanations apply for ext_crypto_sr25519_public_key_version_1 and ext_crypto_ecdsa_public_key_version_1.

    +

    Host implementers should be aware that the list of public keys (including their ordering) must not change while the runtime is running. That is most likely done by copying the list of all available keys either at the start of the execution or the first time the list is accessed.

    +
    (func $ext_offchain_http_request_start_version_2
    +  (param $method i64) (param $uri i64) (param $meta i64) (result i32))
    +
    +

    The behaviour of this function is identical to its version 1 counterpart. Instead of allocating a buffer, writing the request identifier in it, and returning a pointer to it, version 2 of this function simply returns the newly-assigned identifier to the HTTP request. On failure, this function returns -1. An identifier of -1 is invalid and is reserved to indicate failure.

    +
    (func $ext_offchain_http_request_write_body_version_2
    +  (param $method i64) (param $uri i64) (param $meta i64) (result i32))
    +(func $ext_offchain_http_response_read_body_version_2
    +  (param $request_id i32) (param $buffer i64) (param $deadline i64) (result i64))
    +
    +

    The behaviour of these functions is identical to their version 1 counterpart. Instead of allocating a buffer, writing two bytes in it, and returning a pointer to it, the new version of these functions simply indicates what happened:

    + +

    These values are equal to the values returned on error by version 1 (see https://spec.polkadot.network/chap-host-api#defn-http-error), but tweaked to reserve positive numbers for success.

    +

    When it comes to ext_offchain_http_response_read_body_version_2, the host implementers must not read too much data at once to avoid ambiguity in the returned value. Given that the buffer size is always inferior or equal to 4 GiB, this is not a problem.

    +
    (func $ext_offchain_http_response_wait_version_2
    +    (param $ids i64) (param $deadline i64) (param $out i32))
    +
    +

    The behaviour of this function is identical to its version 1 counterpart. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new version of this function accepts an out parameter containing the memory location where the host writes the output.

    +

    The encoding of the response code is also modified compared to its version 1 counterpart, and each response code now encodes up to 4 little-endian bytes as described below:

    + +

    The buffer passed to out must always have a size of 4 * n where n is the number of elements in the ids.

    +
    (func $ext_offchain_http_response_header_name_version_1
    +    (param $request_id i32) (param $header_index i32) (param $out i64) (result i64))
    +(func $ext_offchain_http_response_header_value_version_1
    +    (param $request_id i32) (param $header_index i32) (param $out i64) (result i64))
    +
    +

    These functions supersede the ext_offchain_http_response_headers_version_1 host function.

    +

    Contrary to ext_offchain_http_response_headers_version_1, only one header indicated by header_index can be read at a time. Instead of calling ext_offchain_http_response_headers_version_1 once, the runtime should call ext_offchain_http_response_header_name_version_1 and ext_offchain_http_response_header_value_version_1 multiple times with an increasing header_index, until a value of -1 is returned.

    +

    These functions accept an out parameter containing a pointer-size to the memory location where the header name or value should be written.

    +

    These functions return the size, in bytes, of the header name or header value. If the request doesn't exist or is in an invalid state (as documented for ext_offchain_http_response_headers_version_1) or the header_index is out of range, a value of -1 is returned. Given that the host must never write more bytes than the size of the buffer in out, and that the size of this buffer is expressed as a 32-bit number, a 64-bit value of -1 is not ambiguous.

    +

    If the buffer in out is too small to fit the entire header name or value, only the bytes that fit are written, and the rest are discarded.

    +
    (func $ext_offchain_submit_transaction_version_2
    +    (param $data i64) (return i32))
    +(func $ext_offchain_http_request_add_header_version_2
    +    (param $request_id i32) (param $name i64) (param $value i64) (result i32))
    +
    +

    Instead of allocating a buffer, writing 1 or 0 in it, and returning a pointer to it, the version 2 of these functions returns 0 or 1, where 0 indicates success and 1 indicates failure. The runtime must interpret any non-0 value as failure, but the client must always return 1 in case of failure.

    +
    (func $ext_offchain_local_storage_read_version_1
    +    (param $kind i32) (param $key i64) (param $value_out i64) (param $offset i32) (result i64))
    +
    +

    This function supercedes the ext_offchain_local_storage_get_version_1 host function, and uses an API and logic similar to ext_storage_read_version_2.

    +

    It reads the offchain local storage key indicated by kind and key starting at the byte indicated by offset, and writes the value to the pointer-size indicated by value_out.

    +

    The function returns the number of bytes written into the value_out buffer. If the entry doesn't exist, the -1 value is returned. Given that the host must never write more bytes than the size of the buffer in value_out, and that the size of this buffer is expressed as a 32-bit number, a 64-bit value of -1 is not ambiguous.

    +
    (func $ext_offchain_network_peer_id_version_1
    +    (param $out i64))
    +
    +

    This function writes the PeerId of the local node to the memory location indicated by out. A PeerId is always 38 bytes long.

    +
    (func $ext_misc_runtime_version_version_2
    +    (param $wasm i64) (param $out i64))
    +
    +

    The behaviour of this function is identical to its version 1 counterpart. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new version of this function accepts an out parameter containing pointer-size to the memory location where the host writes the output.

    +
    (func $ext_offchain_random_seed_version_2 (param $out i32))
    +
    +

    The behaviour of this function is identical to its version 1 counterpart. Instead of allocating a buffer, writing the output to it, and returning a pointer to it, the new version of this function accepts an out parameter containing the address of the memory location where the host writes the output. The size is output is always 32 bytes.

    +
    (func $ext_misc_input_read_version_1
    +    (param $offset i64) (param $out i64) (return i32))
    +
    +

    When a runtime function is called, the host uses the allocator to allocate memory within the runtime to write some input data. The new host function provides an alternative way to access the input that doesn't use the allocator.

    +

    The function copies some data from the input data to the runtime's memory. The offset parameter indicates the offset within the input data from which to start copying, and must lie inside the output buffer provided. The out parameter is a pointer-size and contains the buffer where to write.

    +

    The runtime execution stops with an error if offset is strictly greater than the input data size.

    +

    The return value is the number of bytes written unless out has zero length, in which case the full length of input data in bytes is returned, and nothing is written into the output buffer.

    +

    Other changes

    +

    In addition to the new host functions, this RFC proposes two changes to the runtime-host interface:

    + +

    All the host functions superseded by new host functions are now considered deprecated and should no longer be used.

    +

    The following other host functions are also considered deprecated:

    + +

    Unresolved Questions

    +

    The changes in this RFC would need to be benchmarked. That involves implementing the RFC and measuring the speed difference.

    +

    It is expected that most host functions are faster or equal in speed to their deprecated counterparts, with the following exceptions:

    +

    (source)

    Table of Contents

    -

    Stakeholders

    +

    Stakeholders

    -

    Explanation

    +

    Explanation

    This protocol builds on the existing Collator Selection pallet and its notion of Invulnerables. Invulnerables are collators (identified by their AccountIds) who @@ -1231,7 +1516,7 @@ migration.

  • SR Labs Auditors
  • Current collators including Paranodes, Stake Plus, Turboflakes, Peter Mensik, SIK, and many more.
  • -

    Unresolved Questions

    +

    Unresolved Questions

    None at this time.

    There may exist in the future system chains for which this model of collator selection is not @@ -1272,10 +1557,10 @@ appropriate. These chains should be evaluated on a case-by-case basis.

    AuthorsPierre Krieger -

    Summary

    +

    Summary

    The full nodes of the Polkadot peer-to-peer network maintain a distributed hash table (DHT), which is currently used for full nodes discovery and validators discovery purposes.

    This RFC proposes to extend this DHT to be used to discover full nodes of the parachains of Polkadot.

    -

    Motivation

    +

    Motivation

    The maintenance of bootnodes has long been an annoyance for everyone.

    When a bootnode is newly-deployed or removed, every chain specification must be updated in order to take the update into account. This has lead to various non-optimal solutions, such as pulling chain specifications from GitHub repositories. When it comes to RPC nodes, UX developers often have trouble finding up-to-date addresses of parachain RPC nodes. With the ongoing migration from RPC nodes to light clients, similar problems would happen with chain specifications as well.

    @@ -1284,9 +1569,9 @@ When it comes to RPC nodes, UX developers often have trouble finding up-to-date

    Because the list of bootnodes in chain specifications is so annoying to modify, the consequence is that the number of bootnodes is rather low (typically between 2 and 15). In order to better resist downtimes and DoS attacks, a better solution would be to use every node of a certain chain as potential bootnode, rather than special-casing some specific nodes.

    While this RFC doesn't solve these problems for relay chains, it aims at solving it for parachains by storing the list of all the full nodes of a parachain on the relay chain DHT.

    Assuming that this RFC is implemented, and that light clients are used, deploying a parachain wouldn't require more work than registering it onto the relay chain and starting the collators. There wouldn't be any need for special infrastructure nodes anymore.

    -

    Stakeholders

    +

    Stakeholders

    This RFC has been opened on my own initiative because I think that this is a good technical solution to a usability problem that many people are encountering and that they don't realize can be solved.

    -

    Explanation

    +

    Explanation

    The content of this RFC only applies for parachains and parachain nodes that are "Substrate-compatible". It is in no way mandatory for parachains to comply to this RFC.

    Note that "Substrate-compatible" is very loosely defined as "implements the same mechanisms and networking protocols as Substrate". The author of this RFC believes that "Substrate-compatible" should be very precisely specified, but there is controversy on this topic.

    While a lot of this RFC concerns the implementation of parachain nodes, it makes use of the resources of the Polkadot chain, and as such it is important to describe them in the Polkadot specification.

    @@ -1348,7 +1633,7 @@ If this every becomes a problem, this value of 20 is an arbitrary constant that

    Irrelevant.

    Prior Art and References

    None.

    -

    Unresolved Questions

    +

    Unresolved Questions

    While it fundamentally doesn't change much to this RFC, using BabeApi_currentEpoch and BabeApi_nextEpoch might be inappropriate. I'm not familiar enough with good practices within the runtime to have an opinion here. Should it be an entirely new pallet?

    It is possible that in the future a client could connect to a parachain without having to rely on a trusted parachain specification.

    @@ -1383,9 +1668,9 @@ If this every becomes a problem, this value of 20 is an arbitrary constant that AuthorsPierre Krieger -

    Summary

    +

    Summary

    Improve the networking messages that query storage items from the remote, in order to reduce the bandwidth usage and number of round trips of light clients.

    -

    Motivation

    +

    Motivation

    Clients on the Polkadot peer-to-peer network can be divided into two categories: full nodes and light clients. So-called full nodes are nodes that store the content of the chain locally on their disk, while light clients are nodes that don't. In order to access for example the balance of an account, a full node can do a disk read, while a light client needs to send a network message to a full node and wait for the full node to reply with the desired value. This reply is in the form of a Merkle proof, which makes it possible for the light client to verify the exactness of the value.

    Unfortunately, this network protocol is suffering from some issues:

    Once Polkadot and Kusama will have transitioned to state_version = 1, which modifies the format of the trie entries, it will be possible to generate Merkle proofs that contain only the hashes of values in the storage. Thanks to this, it is already possible to prove the existence of a key without sending its entire value (only its hash), or to prove that a value has changed or not between two blocks (by sending just their hashes). Thus, the only reason why aforementioned issues exist is because the existing networking messages don't give the possibility for the querier to query this. This is what this proposal aims at fixing.

    -

    Stakeholders

    +

    Stakeholders

    This is the continuation of https://github.com/w3f/PPPs/pull/10, which itself is the continuation of https://github.com/w3f/PPPs/pull/5.

    -

    Explanation

    +

    Explanation

    The protobuf schema of the networking protocol can be found here: https://github.com/paritytech/substrate/blob/5b6519a7ff4a2d3cc424d78bc4830688f3b184c0/client/network/light/src/schema/light.v1.proto

    The proposal is to modify this protocol in this way:

    @@ -11,6 +11,7 @@ message Request {
    @@ -1472,7 +1757,7 @@ Also note that child tries aren't considered as descendants of the main trie whe
     

    The prior networking protocol is maintained for now. The older version of this protocol could get removed in a long time.

    Prior Art and References

    None. This RFC is a clean-up of an existing mechanism.

    -

    Unresolved Questions

    +

    Unresolved Questions

    None

    The current networking protocol could be deprecated in a long time. Additionally, the current "state requests" protocol (used for warp syncing) could also be deprecated in favor of this one.

    @@ -1495,13 +1780,13 @@ Also note that child tries aren't considered as descendants of the main trie whe AuthorsJonas Gehrlein -

    Summary

    +

    Summary

    The Polkadot UC will generate revenue from the sale of available Coretime. The question then arises: how should we handle these revenues? Broadly, there are two reasonable paths – burning the revenue and thereby removing it from total issuance or divert it to the Treasury. This Request for Comment (RFC) presents arguments favoring burning as the preferred mechanism for handling revenues from Coretime sales.

    -

    Motivation

    +

    Motivation

    How to handle the revenue accrued from Coretime sales is an important economic question that influences the value of DOT and should be properly discussed before deciding for either of the options. Now is the best time to start this discussion.

    -

    Stakeholders

    +

    Stakeholders

    Polkadot DOT token holders.

    -

    Explanation

    +

    Explanation

    This RFC discusses potential benefits of burning the revenue accrued from Coretime sales instead of diverting them to Treasury. Here are the following arguments for it.

    It's in the interest of the Polkadot community to have a consistent and predictable Treasury income, because volatility in the inflow can be damaging, especially in situations when it is insufficient. As such, this RFC operates under the presumption of a steady and sustainable Treasury income flow, which is crucial for the Polkadot community's stability. The assurance of a predictable Treasury income, as outlined in a prior discussion here, or through other equally effective measures, serves as a baseline assumption for this argument.

    Consequently, we need not concern ourselves with this particular issue here. This naturally begs the question - why should we introduce additional volatility to the Treasury by aligning it with the variable Coretime sales? It's worth noting that Coretime revenues often exhibit an inverse relationship with periods when Treasury spending should ideally be ramped up. During periods of low Coretime utilization (indicated by lower revenue), Treasury should spend more on projects and endeavours to increase the demand for Coretime. This pattern underscores that Coretime sales, by their very nature, are an inconsistent and unpredictable source of funding for the Treasury. Given the importance of maintaining a steady and predictable inflow, it's unnecessary to rely on another volatile mechanism. Some might argue that we could have both: a steady inflow (from inflation) and some added bonus from Coretime sales, but burning the revenue would offer further benefits as described below.

    @@ -1544,13 +1829,13 @@ Also note that child tries aren't considered as descendants of the main trie whe AuthorsJoe Petrowski -

    Summary

    +

    Summary

    Since the introduction of the Collectives parachain, many groups have expressed interest in forming new -- or migrating existing groups into -- on-chain collectives. While adding a new collective is relatively simple from a technical standpoint, the Fellowship will need to merge new pallets into the Collectives parachain for each new collective. This RFC proposes a means for the network to ratify a new collective, thus instructing the Fellowship to instate it in the runtime.

    -

    Motivation

    +

    Motivation

    Many groups have expressed interest in representing collectives on-chain. Some of these include:

    -

    Stakeholders

    +

    Stakeholders

    • Parachain teams
    • Parachain users
    -

    Explanation

    +

    Explanation

    Status quo

    A parachain can either be locked or unlocked3. With parachain locked, the parachain manager does not have any privileges. With parachain unlocked, the parachain manager can perform following actions with the paras_registrar pallet:

      @@ -1855,7 +2140,7 @@ This can be unified and simplified by moving both parts into the runtime.

    • Allow parachain to renew lease without actually run another parachain: https://github.com/paritytech/polkadot/issues/6685
    • Always treat parachain that never produced block for a significant amount of time as unlocked: https://github.com/paritytech/polkadot/issues/7539
    -

    Unresolved Questions

    +

    Unresolved Questions

    None at this stage.

    This RFC is only intended to be a short term solution. Slots will be removed in future and lock mechanism is likely going to be replaced with a more generalized parachain manage & recovery system in future. Therefore long term impacts of this RFC are not considered.

    @@ -1891,19 +2176,19 @@ This can be unified and simplified by moving both parts into the runtime.

    Authors@brenzi for Encointer Association, 8000 Zurich, Switzerland -

    Summary

    +

    Summary

    Encointer is a system chain on Kusama since Jan 2022 and has been developed and maintained by the Encointer association. This RFC proposes to treat Encointer like any other system chain and include it in the fellowship repo with this PR.

    -

    Motivation

    +

    Motivation

    Encointer does not seek to be in control of its runtime repository. As a decentralized system, the fellowship has a more suitable structure to maintain a system chain runtime repo than the Encointer association does.

    Also, Encointer aims to update its runtime in batches with other system chains in order to have consistency for interoperability across system chains.

    -

    Stakeholders

    +

    Stakeholders

    • Fellowship: Will continue to take upon them the review and auditing work for the Encointer runtime, but the process is streamlined with other system chains and therefore less time-consuming compared to the separate repo and CI process we currently have.
    • Kusama Network: Tokenholders can easily see the changes of all system chains in one place.
    • Encointer Association: Further decentralization of the Encointer Network necessities like devops.
    • Encointer devs: Being able to work directly in the Fellowship runtimes repo to streamline and synergize with other developers.
    -

    Explanation

    +

    Explanation

    Our PR has all details about our runtime and how we would move it into the fellowship repo.

    Noteworthy: All Encointer-specific pallets will still be located in encointer's repo for the time being: https://github.com/encointer/pallets

    It will still be the duty of the Encointer team to keep its runtime up to date and provide adequate test fixtures. Frequent dependency bumps with Polkadot releases would be beneficial for interoperability and could be streamlined with other system chains but that will not be a duty of fellowship. Whenever possible, all system chains could be upgraded jointly (including Encointer) with a batch referendum.

    @@ -1920,7 +2205,7 @@ This can be unified and simplified by moving both parts into the runtime.

    No changes

    Prior Art and References

    Existing Encointer runtime repo

    -

    Unresolved Questions

    +

    Unresolved Questions

    None identified

    More info on Encointer: encointer.org

    @@ -2842,11 +3127,11 @@ other privacy-enhancing mechanisms to address this concern. AuthorsJoe Petrowski, Gavin Wood -

    Summary

    +

    Summary

    The Relay Chain contains most of the core logic for the Polkadot network. While this was necessary prior to the launch of parachains and development of XCM, most of this logic can exist in parachains. This is a proposal to migrate several subsystems into system parachains.

    -

    Motivation

    +

    Motivation

    Polkadot's scaling approach allows many distinct state machines (known generally as parachains) to operate with common guarantees about the validity and security of their state transitions. Polkadot provides these common guarantees by executing the state transitions on a strict subset (a backing @@ -2858,13 +3143,13 @@ blockspace) to the network.

    By minimising state transition logic on the Relay Chain by migrating it into "system chains" -- a set of parachains that, with the Relay Chain, make up the Polkadot protocol -- the Polkadot Ubiquitous Computer can maximise its primary offering: secure blockspace.

    -

    Stakeholders

    +

    Stakeholders

    • Parachains that interact with affected logic on the Relay Chain;
    • Core protocol and XCM format developers;
    • Tooling, block explorer, and UI developers.
    -

    Explanation

    +

    Explanation

    The following pallets and subsystems are good candidates to migrate from the Relay Chain:

    -

    Unresolved Questions

    +

    Unresolved Questions

    There remain some implementation questions, like how to use balances for both Staking and Governance. See, for example, Moving Staking off the Relay Chain.

    @@ -3074,13 +3359,13 @@ With Staking and Governance off the Relay Chain, this is not an unreasonable nex AuthorsVedhavyas Singareddi -

    Summary

    +

    Summary

    At the moment, we have system_version field on RuntimeVersion that derives which state version is used for the Storage. We have a use case where we want extrinsics root is derived using StateVersion::V1. Without defining a new field under RuntimeVersion, we would like to propose adding system_version that can be used to derive both storage and extrinsic state version.

    -

    Motivation

    +

    Motivation

    Since the extrinsic state version is always StateVersion::V0, deriving extrinsic root requires full extrinsic data. This would be problematic when we need to verify the extrinsics root if the extrinsic sizes are bigger. This problem is further explored in https://github.com/polkadot-fellows/RFCs/issues/19

    @@ -3092,11 +3377,11 @@ One of the main challenge here is some extrinsics could be big enough that this included in the Consensus block due to Block's weight restriction. If the extrinsic root is derived using StateVersion::V1, then we do not need to pass the full extrinsic data but rather at maximum, 32 byte of extrinsic data.

    -

    Stakeholders

    +

    Stakeholders

    • Technical Fellowship, in its role of maintaining system runtimes.
    -

    Explanation

    +

    Explanation

    In order to use project specific StateVersion for extrinsic roots, we proposed an implementation that introduced parameter to frame_system::Config but that unfortunately did not feel correct. @@ -3139,7 +3424,7 @@ so that chains know which system_version to use.

    We proposed introducing a similar change by introducing a parameter to frame_system::Config but did not feel that is the correct way of introducing this change.

    -

    Unresolved Questions

    +

    Unresolved Questions

    I do not have any specific questions about this change at the moment.

    IMO, this change is pretty self-contained and there won't be any future work necessary.

    @@ -3170,9 +3455,9 @@ is the correct way of introducing this change.

    AuthorsSebastian Kunert -

    Summary

    +

    Summary

    This RFC proposes a new host function for parachains, storage_proof_size. It shall provide the size of the currently recorded storage proof to the runtime. Runtime authors can use the proof size to improve block utilization by retroactively reclaiming unused storage weight.

    -

    Motivation

    +

    Motivation

    The number of extrinsics that are included in a parachain block is limited by two constraints: execution time and proof size. FRAME weights cover both concepts, and block-builders use them to decide how many extrinsics to include in a block. However, these weights are calculated ahead of time by benchmarking on a machine with reference hardware. The execution-time properties of the state-trie and its storage items are unknown at benchmarking time. Therefore, we make some assumptions about the state-trie:

    • Trie Depth: We assume a trie depth to account for intermediary nodes.
    • @@ -3181,12 +3466,12 @@ is the correct way of introducing this change.

      These pessimistic assumptions lead to an overestimation of storage weight, negatively impacting block utilization on parachains.

      In addition, the current model does not account for multiple accesses to the same storage items. While these repetitive accesses will not increase storage-proof size, the runtime-side weight monitoring will account for them multiple times. Since the proof size is completely opaque to the runtime, we can not implement retroactive storage weight correction.

      A solution must provide a way for the runtime to track the exact storage-proof size consumed on a per-extrinsic basis.

      -

      Stakeholders

      +

      Stakeholders

      • Parachain Teams: They MUST include this host function in their runtime and node.
      • Light-client Implementors: They SHOULD include this host function in their runtime and node.
      -

      Explanation

      +

      Explanation

      This RFC proposes a new host function that exposes the storage-proof size to the runtime. As a result, runtimes can implement storage weight reclaiming mechanisms that improve block utilization.

      This RFC proposes the following host function signature:

      #![allow(unused)]
      @@ -3255,12 +3540,12 @@ is the correct way of introducing this change.

      AuthorsAurora Poppyseed, Just_Luuuu, Viki Val, Joe Petrowski -

      Summary

      +

      Summary

      This RFC proposes changing the current deposit requirements on the Polkadot and Kusama Asset Hub for creating an NFT collection, minting an individual NFT, and lowering its corresponding metadata and attribute deposits. The objective is to lower the barrier to entry for NFT creators, fostering a more inclusive and vibrant ecosystem while maintaining network integrity and preventing spam.

      -

      Motivation

      +

      Motivation

      The current deposit of 10 DOT for collection creation (along with 0.01 DOT for item deposit and 0.2 DOT for metadata and attribute deposits) on the Polkadot Asset Hub and 0.1 KSM on Kusama Asset Hub presents a significant financial barrier for many NFT creators. By lowering the deposit @@ -3277,7 +3562,7 @@ low.

      • Deposits SHOULD be derived from deposit function, adjusted by correspoding pricing mechansim.
      -

      Stakeholders

      +

      Stakeholders

      • NFT Creators: Primary beneficiaries of the proposed change, particularly those who found the current deposit requirements prohibitive.
      • @@ -3291,7 +3576,7 @@ collections, enhancing the overall ecosystem.

        Previous discussions have been held within the Polkadot Forum, with artists expressing their concerns about the deposit amounts.

        -

        Explanation

        +

        Explanation

        This RFC proposes a revision of the deposit constants in the configuration of the NFTs pallet on the Polkadot Asset Hub. The new deposit amounts would be determined by a standard deposit formula.

        As of v1.1.1, the Collection Deposit is 10 DOT and the Item Deposit is 0.01 DOT (see @@ -3406,7 +3691,7 @@ efficient data management and periodic reviews of storage requirements, will be Kusama and Polkadot Asset Hubs, making Polkadot and Kusama more accessible and user-friendly.

        Compatibility

        The change does not impact compatibility as a redeposit function is already implemented.

        -

        Unresolved Questions

        +

        Unresolved Questions

        If this RFC is accepted, there should not be any unresolved questions regarding how to adapt the implementation of deposits for NFT collections.

        Addendum

        @@ -3494,11 +3779,11 @@ Polkadot and Kusama networks.

        AuthorsAlin Dima -

        Summary

        +

        Summary

        Propose a way of permuting the availability chunk indices assigned to validators, in the context of recovering available data from systematic chunks, with the purpose of fairly distributing network bandwidth usage.

        -

        Motivation

        +

        Motivation

        Currently, the ValidatorIndex is always identical to the ChunkIndex. Since the validator array is only shuffled once per session, naively using the ValidatorIndex as the ChunkIndex would pose an unreasonable stress on the first N/3 validators during an entire session, when favouring availability recovery from systematic chunks.

        @@ -3506,9 +3791,9 @@ validators during an entire session, when favouring availability recovery from s systematic availability chunks to different validators, based on the relay chain block and core. The main purpose is to ensure fair distribution of network bandwidth usage for availability recovery in general and in particular for systematic chunk holders.

        -

        Stakeholders

        +

        Stakeholders

        Relay chain node core developers.

        -

        Explanation

        +

        Explanation

        Systematic erasure codes

        An erasure coding algorithm is considered systematic if it preserves the original unencoded data as part of the resulting code. @@ -3691,7 +3976,7 @@ governance call.

        Prior Art and References

        See comments on the tracking issue and the in-progress PR

        -

        Unresolved Questions

        +

        Unresolved Questions

        Not applicable.

        This enables future optimisations for the performance of availability recovery, such as retrieving batched systematic @@ -3768,7 +4053,7 @@ dispute scenarios.

        AuthorsBastian Köcher -

        Summary

        +

        Summary

        This RFC proposes to changes the SessionKeys::generate_session_keys runtime api interface. This runtime api is used by validator operators to generate new session keys on a node. The public session keys are then registered manually on chain by the validator operator. Before this RFC it was not possible by the on chain logic to ensure that the account setting the public session keys is also in @@ -3776,7 +4061,7 @@ possession of the private session keys. To solve this the RFC proposes to pass t registration on chain to generate_session_keys. Further this RFC proposes to change the return value of the generate_session_keys function also to not only return the public session keys, but also the proof of ownership for the private session keys. The validator operator will then need to send the public session keys and the proof together when registering new session keys on chain.

        -

        Motivation

        +

        Motivation

        When submitting the new public session keys to the on chain logic there doesn't exist any verification of possession of the private session keys. This means that users can basically register any kind of public session keys on chain. While the on chain logic ensures that there are no duplicate keys, someone could try to prevent others from registering new session keys by setting them first. While this wouldn't bring @@ -3784,13 +4069,13 @@ the "attacker" any kind of advantage, more like disadvantages (potenti e.g. changing its session key in the event of a private session key leak.

        After this RFC this kind of attack would not be possible anymore, because the on chain logic can verify that the sending account is in ownership of the private session keys.

        -

        Stakeholders

        +

        Stakeholders

        • Polkadot runtime implementors
        • Polkadot node implementors
        • Validator operators
        -

        Explanation

        +

        Explanation

        We are first going to explain the proof format being used:

        #![allow(unused)]
         fn main() {
        @@ -3846,7 +4131,7 @@ and for returning the ownership proof alongside the public session keys.

        UIs would need to be updated to support the new RPC and the changed on chain logic.

        Prior Art and References

        None.

        -

        Unresolved Questions

        +

        Unresolved Questions

        None.

        Substrate implementation of the RFC.

        @@ -3886,10 +4171,10 @@ and for returning the ownership proof alongside the public session keys.

        AuthorsJoe Petrowski, Gavin Wood -

        Summary

        +

        Summary

        The Fellowship Manifesto states that members should receive a monthly allowance on par with gross income in OECD countries. This RFC proposes concrete amounts.

        -

        Motivation

        +

        Motivation

        One motivation for the Technical Fellowship is to provide an incentive mechanism that can induct and retain technical talent for the continued progress of the network.

        In order for members to uphold their commitment to the network, they should receive support to @@ -3899,12 +4184,12 @@ on par with a full-time job. Providing a livable wage to those making such contr pragmatic to work full-time on Polkadot.

        Note: Goals of the Fellowship, expectations for each Dan, and conditions for promotion and demotion are all explained in the Manifesto. This RFC is only to propose concrete values for allowances.

        -

        Stakeholders

        +

        Stakeholders

        • Fellowship members
        • Polkadot Treasury
        -

        Explanation

        +

        Explanation

        This RFC proposes agreeing on salaries relative to a single level, the III Dan. As such, changes to the amount or asset used would only be on a single value, and all others would adjust relatively. A III Dan is someone whose contributions match the expectations of a full-time individual contributor. @@ -3984,7 +4269,7 @@ Manifesto

      • Indeed: Average Salary for Engineers, United States
      -

      Unresolved Questions

      +

      Unresolved Questions

      None at present.

      (source)

      Table of Contents

      @@ -4017,11 +4302,11 @@ States AuthorsPierre Krieger -

      Summary

      +

      Summary

      When two peers connect to each other, they open (amongst other things) a so-called "notifications protocol" substream dedicated to gossiping transactions to each other.

      Each notification on this substream currently consists in a SCALE-encoded Vec<Transaction> where Transaction is defined in the runtime.

      This RFC proposes to modify the format of the notification to become (Compact(1), Transaction). This maintains backwards compatibility, as this new format decodes as a Vec of length equal to 1.

      -

      Motivation

      +

      Motivation

      There exists three motivations behind this change:

      • @@ -4034,9 +4319,9 @@ States
      • It makes the implementation way more straight-forward by not having to repeat code related to back-pressure. See explanations below.

      -

      Stakeholders

      +

      Stakeholders

      Low-level developers.

      -

      Explanation

      +

      Explanation

      To give an example, if you send one notification with three transactions, the bytes that are sent on the wire are:

      concat(
           leb128(total-size-in-bytes-of-the-rest),
      @@ -4070,7 +4355,7 @@ This is equivalent to forcing the Vec<Transaction> to always
       

      The change is backwards compatible if done in two steps: modify the sender to always send one transaction per notification, then, after a while, modify the receiver to enforce the new format.

      Prior Art and References

      Irrelevant.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None.

      None. This is a simple isolated change.

      @@ -4112,20 +4397,20 @@ This is equivalent to forcing the Vec<Transaction> to always AuthorsPierre Krieger -

      Summary

      +

      Summary

      This RFC proposes to make the mechanism of RFC #8 more generic by introducing the concept of "capabilities".

      Implementations can implement certain "capabilities", such as serving old block headers or being a parachain bootnode.

      The discovery mechanism of RFC #8 is extended to be able to discover nodes of specific capabilities.

      -

      Motivation

      +

      Motivation

      The Polkadot peer-to-peer network is made of nodes. Not all these nodes are equal. Some nodes store only the headers of recent blocks, some nodes store all the block headers and bodies since the genesis, some nodes store the storage of all blocks since the genesis, and so on.

      It is currently not possible to know ahead of time (without connecting to it and asking) which nodes have which data available, and it is not easily possible to build a list of nodes that have a specific piece of data available.

      If you want to download for example the header of block 500, you have to connect to a randomly-chosen node, ask it for block 500, and if it says that it doesn't have the block, disconnect and try another randomly-chosen node. In certain situations such as downloading the storage of old blocks, nodes that have the information are relatively rare, and finding through trial and error a node that has the data can take a long time.

      This RFC attempts to solve this problem by giving the possibility to build a list of nodes that are capable of serving specific data.

      -

      Stakeholders

      +

      Stakeholders

      Low-level client developers. People interested in accessing the archive of the chain.

      -

      Explanation

      +

      Explanation

      Reading RFC #8 first might help with comprehension, as this RFC is very similar.

      Please keep in mind while reading that everything below applies for both relay chains and parachains, except mentioned otherwise.

      Capabilities

      @@ -4182,7 +4467,7 @@ Furthermore, when a large number of providers are registered, only the providers

      Irrelevant.

      Prior Art and References

      Unknown.

      -

      Unresolved Questions

      +

      Unresolved Questions

      While it fundamentally doesn't change much to this RFC, using BabeApi_currentEpoch and BabeApi_nextEpoch might be inappropriate. I'm not familiar enough with good practices within the runtime to have an opinion here. Should it be an entirely new pallet?

      This RFC would make it possible to reliably discover archive nodes, which would make it possible to reliably send archive node requests, something that isn't currently possible. This could solve the problem of finding archive RPC node providers by migrating archive-related request to using the native peer-to-peer protocol rather than JSON-RPC.

      @@ -4233,12 +4518,12 @@ We could even add to the peer-to-peer network nodes that are only capable of ser AuthorsZondax AG, Parity Technologies -

      Summary

      +

      Summary

      To interact with chains in the Polkadot ecosystem it is required to know how transactions are encoded and how to read state. For doing this, Polkadot-SDK, the framework used by most of the chains in the Polkadot ecosystem, exposes metadata about the runtime to the outside. UIs, wallets, and others can use this metadata to interact with these chains. This makes the metadata a crucial piece of the transaction encoding as users are relying on the interacting software to encode the transactions in the correct format.

      It gets even more important when the user signs the transaction in an offline wallet, as the device by its nature cannot get access to the metadata without relying on the online wallet to provide it. This makes it so that the offline wallet needs to trust an online party, deeming the security assumptions of the offline devices, mute.

      This RFC proposes a way for offline wallets to leverage metadata, within the constraints of these. The design idea is that the metadata is chunked and these chunks are put into a merkle tree. The root hash of this merkle tree represents the metadata. The offline wallets can use the root hash to decode transactions by getting proofs for the individual chunks of the metadata. This root hash is also included in the signed data of the transaction (but not sent as part of the transaction). The runtime is then including its known metadata root hash when verifying the transaction. If the metadata root hash known by the runtime differs from the one that the offline wallet used, it very likely means that the online wallet provided some fake data and the verification of the transaction fails.

      Users depend on offline wallets to correctly display decoded transactions before signing. With merkleized metadata, they can be assured of the transaction's legitimacy, as incorrect transactions will be rejected by the runtime.

      -

      Motivation

      +

      Motivation

      Polkadot's innovative design (both relay chain and parachains) present the ability to developers to upgrade their network as frequently as they need. These systems manage to have integrations working after the upgrades with the help of FRAME Metadata. This Metadata, which is in the order of half a MiB for most Polkadot-SDK chains, completely describes chain interfaces and properties. Securing this metadata is key for users to be able to interact with the Polkadot-SDK chain in the expected way.

      On the other hand, offline wallets provide a secure way for Blockchain users to hold their own keys (some do a better job than others). These devices seldomly get upgraded, usually account for one particular network and hold very small internal memories. Currently in the Polkadot ecosystem there is no secure way of having these offline devices know the latest Metadata of the Polkadot-SDK chain they are interacting with. This results in a plethora of similar yet slightly different offline wallets for all different Polkadot-SDK chains, as well as the impediment of keeping these regularly updated, thus not fully leveraging Polkadot-SDK’s unique forkless upgrade feature.

      The two main reasons why this is not possible today are:

      @@ -4265,14 +4550,14 @@ We could even add to the peer-to-peer network nodes that are only capable of ser
    • Chunks handling mechanism SHOULD support chunks being sent in any order without memory utilization overhead;
    • Unused enum variants MUST be stripped (this has great impact on transmitted metadata size; examples: era enum, enum with all calls for call batching).
    • -

      Stakeholders

      +

      Stakeholders

      • Runtime implementors
      • UI/wallet implementors
      • Offline wallet implementors

      The idea for this RFC was brought up by runtime implementors and was extensively discussed with offline wallet implementors. It was designed in such a way that it can work easily with the existing offline wallet solutions in the Polkadot ecosystem.

      -

      Explanation

      +

      Explanation

      The FRAME metadata provides a wide range of information about a FRAME based runtime. It contains information about the pallets, the calls per pallet, the storage entries per pallet, runtime APIs, and type information about most of the types that are used in the runtime. For decoding extrinsics on an offline wallet, what is mainly required is type information. Most of the other information in the FRAME metadata is actually not required for decoding extrinsics and thus it can be removed. Therefore, the following is a proposal on a custom representation of the metadata and how this custom metadata is chunked, ensuring that only the needed chunks required for decoding a particular extrinsic are sent to the offline wallet. The necessary information to transform the FRAME metadata type information into the type information presented in this RFC will be provided. However, not every single detail on how to convert from FRAME metadata into the RFC type information is described.

      First, the MetadataDigest is introduced. After that, ExtrinsicMetadata is covered and finally the actual format of the type information. Then pruning of unrelated type information is covered and how to generate the TypeRefs. In the latest step, merkle tree calculation is explained.

      Metadata digest

      @@ -4557,7 +4842,7 @@ nodes: [[[2, 3], [4, 5]], [0, 1]]

      Prior Art and References

      RFC 46 produced by the Alzymologist team is a previous work reference that goes in this direction as well.

      On other ecosystems, there are other solutions to the problem of trusted signing. Cosmos for example has a standardized way of transforming a transaction into some textual representation and this textual representation is included in the signed data. Basically achieving the same as what the RFC proposes, but it requires that for every transaction applied in a block, every node in the network always has to generate this textual representation to ensure the transaction signature is valid.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None.

        @@ -4597,20 +4882,20 @@ nodes: [[[2, 3], [4, 5]], [0, 1]] AuthorsGeorge Pisaltu -

        Summary

        +

        Summary

        This RFC proposes a change to the extrinsic format to incorporate a new transaction type, the "general" transaction.

        -

        Motivation

        +

        Motivation

        "General" transactions, a new type of transaction that this RFC aims to support, are transactions which obey the runtime's extensions and have according extension data yet do not have hard-coded signatures. They are first described in Extrinsic Horizon and supported in 3685. They enable users to authorize origins in new, more flexible ways (e.g. ZK proofs, mutations over pre-authenticated origins). As of now, all transactions are limited to the account signing model for origin authorization and any additional origin changes happen in extrinsic logic, which cannot leverage the validation process of extensions.

        An example of a use case for such an extension would be sponsoring the transaction fee for some other user. A new extension would be put in place to verify that a part of the initial payload was signed by the author under who the extrinsic should run and change the origin, but the payment for the whole transaction should be handled under a sponsor's account. A POC for this can be found in 3712.

        The new "general" transaction type would coexist with both current transaction types for a while and, therefore, the current number of supported transaction types, capped at 2, is insufficient. A new extrinsic type must be introduced alongside the current signed and unsigned types. Currently, an encoded extrinsic's first byte indicate the type of extrinsic using the most significant bit - 0 for unsigned, 1 for signed - and the 7 following bits indicate the extrinsic format version, which has been equal to 4 for a long time.

        By taking one bit from the extrinsic format version encoding, we can support 2 additional extrinsic types while also having a minimal impact on our capability to extend and change the extrinsic format in the future.

        -

        Stakeholders

        +

        Stakeholders

        • Runtime users
        • Runtime devs
        • Wallet devs
        -

        Explanation

        +

        Explanation

        An extrinsic is currently encoded as one byte to identify the extrinsic type and version. This RFC aims to change the interpretation of this byte regarding the reserved bits for the extrinsic type and version. In the following explanation, bits represented using T make up the extrinsic type and bits represented using V make up the extrinsic version.

        Currently, the bit allocation within the leading encoded byte is 0bTVVV_VVVV. In practice in the Polkadot ecosystem, the leading byte would be 0bT000_0100 as the version has been equal to 4 for a long time.

        This RFC proposes for the bit allocation to change to 0bTTVV_VVVV. As a result, the extrinsic format version will be bumped to 5 and the extrinsic type bit representation would change as follows:

        @@ -4635,7 +4920,7 @@ nodes: [[[2, 3], [4, 5]], [0, 1]]

        This change breaks backwards compatiblity because any transaction that is neither signed nor unsigned, but a new transaction type, would be interpreted as having a future extrinsic format version.

        Prior Art and References

        The original design was originally proposed in the TransactionExtension PR, which is also the motivation behind this effort.

        -

        Unresolved Questions

        +

        Unresolved Questions

        None.

        Following this change, the "general" transaction type will be introduced as part of the Extrinsic Horizon effort, which will shape future work.

        @@ -4670,16 +4955,16 @@ nodes: [[[2, 3], [4, 5]], [0, 1]] AuthorsAlex Gheorghe (alexggh) -

        Summary

        +

        Summary

        Extend the DHT authority discovery records with a signed creation time, so that nodes can determine which record is newer and always decide to prefer the newer records to the old ones.

        -

        Motivation

        +

        Motivation

        Currently, we use the Kademlia DHT for storing records regarding the p2p address of an authority discovery key, the problem is that if the nodes decide to change its PeerId/Network key it will publish a new record, however because of the distributed and replicated nature of the DHT there is no way to tell which record is newer so both old PeerId and the new PeerId will live in the network until the old one expires(36h), that creates all sort of problem and leads to the node changing its address not being properly connected for up to 36h.

        After this RFC, nodes are extended to decide to keep the new record and propagate the new record to nodes that have the old record stored, so in the end all the nodes will converge faster to the new record(in the order of minutes, not 36h)

        Implementation of the rfc: https://github.com/paritytech/polkadot-sdk/pull/3786.

        Current issue without this enhacement: https://github.com/paritytech/polkadot-sdk/issues/3673

        -

        Stakeholders

        +

        Stakeholders

        Polkadot node developers.

        -

        Explanation

        +

        Explanation

        This RFC heavily relies on the functionalities of the Kademlia DHT already in use by Polkadot. You can find a link to the specification here.

        In a nutshell, on a specific node the current authority-discovery protocol publishes Kademila DHT records at startup and periodically. The records contain the full address of the node for each authorithy key it owns. The node tries also to find the full address of all authorities in the network by querying the DHT and picking up the first record it finds for each of the authority id it found on chain.

        @@ -4727,7 +5012,7 @@ in order to speed up the time until all nodes have the newest record, nodes can

        The changes are backwards compatible with the existing protocol, so nodes with both the old protocol and newer protocol can exist in the network, this is achieved by the fact that we use protobuf for serializing and deserializing the records, so new fields will be ignore when deserializing with the older protocol and vice-versa when deserializing an old record with the new protocol the new field will be None and the new code accepts this record as being valid.

        Prior Art and References

        The enhancements have been inspired by the algorithm specified in here

        -

        Unresolved Questions

        +

        Unresolved Questions

        N/A

        N/A

        @@ -4775,23 +5060,23 @@ in order to speed up the time until all nodes have the newest record, nodes can AuthorsJonas Gehrlein & Alistair Stewart -

        Summary

        +

        Summary

        This RFC proposes a flexible unbonding mechanism for tokens that are locked from staking on the Relay Chain (DOT/KSM), aiming to enhance user convenience without compromising system security.

        Locking tokens for staking ensures that Polkadot is able to slash tokens backing misbehaving validators. With changing the locking period, we still need to make sure that Polkadot can slash enough tokens to deter misbehaviour. This means that not all tokens can be unbonded immediately, however we can still allow some tokens to be unbonded quickly.

        The new mechanism leads to a signficantly reduced unbonding time on average, by queuing up new unbonding requests and scaling their unbonding duration relative to the size of the queue. New requests are executed with a minimum of 2 days, when the queue is comparatively empty, to the conventional 28 days, if the sum of requests (in terms of stake) exceed some threshold. In scenarios between these two bounds, the unbonding duration scales proportionately. The new mechanism will never be worse than the current fixed 28 days.

        In this document we also present an empirical analysis by retrospectively fitting the proposed mechanism to the historic unbonding timeline and show that the average unbonding duration would drastically reduce, while still being sensitive to large unbonding events. Additionally, we discuss implications for UI, UX, and conviction voting.

        Note: Our proposition solely focuses on the locks imposed from staking. Other locks, such as governance, remain unchanged. Also, this mechanism should not be confused with the already existing feature of FastUnstake, which lets users unstake tokens immediately that have not received rewards for 28 days or longer.

        As an initial step to gauge its effectiveness and stability, it is recommended to implement and test this model on Kusama before considering its integration into Polkadot, with appropriate adjustments to the parameters. In the following, however, we limit our discussion to Polkadot.

        -

        Motivation

        +

        Motivation

        Polkadot has one of the longest unbonding periods among all Proof-of-Stake protocols, because security is the most important goal. Staking on Polkadot is still attractive compared to other protocols because of its above-average staking APY. However the long unbonding period harms usability and deters potential participants that want to contribute to the security of the network.

        The current length of the unbonding period imposes significant costs for any entity that even wants to perform basic tasks such as a reorganization / consolidation of their stashes, or updating their private key infrastructure. It also limits participation of users that have a large preference for liquidity.

        The combination of long unbonding periods and high returns has lead to the proliferation of liquid staking, where parachains or centralised exchanges offer users their staked tokens before the 28 days unbonding period is over either in original DOT/KSM form or derivative tokens. Liquid staking is harmless if few tokens are involved but it could result in many validators being selected by a few entities if a large fraction of DOTs were involved. This may lead to centralization (see here for more discussion on threats of liquid staking) and an opportunity for attacks.

        The new mechanism greatly increases the competitiveness of Polkadot, while maintaining sufficient security.

        -

        Stakeholders

        +

        Stakeholders

        • Every DOT/KSM token holder
        -

        Explanation

        +

        Explanation

        Before diving into the details of how to implement the unbonding queue, we give readers context about why Polkadot has a 28-day unbonding period in the first place. The reason for it is to prevent long-range attacks (LRA) that becomes theoretically possible if more than 1/3 of validators collude. In essence, a LRA describes the inability of users, who disconnect from the consensus at time t0 and reconnects later, to realize that validators which were legitimate at a certain time, say t0 but dropped out in the meantime, are not to be trusted anymore. That means, for example, a user syncing the state could be fooled by trusting validators that fell outside the active set of validators after t0, and are building a competitive and malicious chain (fork).

        LRAs of longer than 28 days are mitigated by the use of trusted checkpoints, which are assumed to be no more than 28 days old. A new node that syncs Polkadot will start at the checkpoint and look for proofs of finality of later blocks, signed by 2/3 of the validators. In an LRA fork, some of the validator sets may be different but only if 2/3 of some validator set in the last 28 days signed something incorrect.

        If we detect an LRA of no more than 28 days with the current unbonding period, then we should be able to detect misbehaviour from over 1/3 of validators whose nominators are still bonded. The stake backing these validators is considerable fraction of the total stake (empirically it is 0.287 or so). If we allowed more than this stake to unbond, without checking who it was backing, then the LRA attack might be free of cost for an attacker. The proposed mechansim allows up to half this stake to unbond within 28 days. This halves the amount of tokens that can be slashed, but this is still very high in absolute terms. For example, at the time of writing (19.06.2024) this would translate to around 120 millions DOTs.

        @@ -4902,20 +5187,20 @@ The analysis can be reproduced or changed to other parameters using Summary +

        Summary

        This RFC proposes a change to the extrinsic format to include a transaction extension version.

        -

        Motivation

        +

        Motivation

        The extrinsic format supports to be extended with transaction extensions. These transaction extensions are runtime specific and can be different per chain. Each transaction extension can add data to the extrinsic itself or extend the signed payload. This means that adding a transaction extension is breaking the chain specific extrinsic format. A recent example was the introduction of the CheckMetadatHash to Polkadot and all its system chains. As the extension was adding one byte to the extrinsic, it broke a lot of tooling. By introducing an extra version for the transaction extensions it will be possible to introduce changes to these transaction extensions while still being backwards compatible. Based on the version of the transaction extensions, each chain runtime could decode the extrinsic correctly and also create the correct signed payload.

        -

        Stakeholders

        +

        Stakeholders

        • Runtime users
        • Runtime devs
        • Wallet devs
        -

        Explanation

        +

        Explanation

        RFC84 introduced the extrinsic format 5. The idea is to piggyback onto this change of the extrinsic format to add the extra version for the transaction extensions. If required, this could also come as extrinsic format 6, but 5 is not yet deployed anywhere.

        The extrinsic format supports the following types of transactions:

        @@ -4947,7 +5232,7 @@ to decode these old versions, but this should be neglectable.

        old extrinsic format and decoded by the runtime.

        Prior Art and References

        None.

        -

        Unresolved Questions

        +

        Unresolved Questions

        None.

        None.

        @@ -4986,14 +5271,14 @@ old extrinsic format and decoded by the runtime.

        AuthorsAdrian Catangiu -

        Summary

        +

        Summary

        This RFC proposes a new instruction that provides a way to initiate on remote chains, asset transfers which transfer multiple types (teleports, local-reserve, destination-reserve) of assets, using XCM alone.

        The currently existing instructions are too opinionated and force each XCM asset transfer to a single transfer type (teleport, local-reserve, destination-reserve). This results in inability to combine different types of transfers in single transfer which results in overall poor UX when trying to move assets across chains.

        -

        Motivation

        +

        Motivation

        XCM is the de-facto cross-chain messaging protocol within the Polkadot ecosystem, and cross-chain assets transfers is one of its main use-cases. Unfortunately, in its current spec, it does not support initiating on a remote chain, one or more transfers that combine assets with different transfer types.
        @@ -5015,14 +5300,14 @@ For example, allows single XCM program execution to transfer multiple assets fro Kusama Asset Hub, over the bridge through Polkadot Asset Hub with final destination ParaP on Polkadot.

        With current XCM, we are limited to doing multiple independent transfers for each individual hop in order to move both "interesting" assets, but also "supporting" assets (used to pay fees).

        -

        Stakeholders

        +

        Stakeholders

        • Runtime users
        • Runtime devs
        • Wallet devs
        • dApps devs
        -

        Explanation

        +

        Explanation

        A new instruction InitiateAssetsTransfer is introduced that initiates an assets transfer from the chain it is executed on, to another chain. The executed transfer is point-to-point (chain-to-chain) with all of the transfer properties specified in the instruction parameters. The instruction also @@ -5241,7 +5526,7 @@ XCM versions, because there is no equivalent capability there. Such conversion attempts will explicitly fail.

        Prior Art and References

        None.

        -

        Unresolved Questions

        +

        Unresolved Questions

        None.

        None.

        @@ -5276,10 +5561,10 @@ Such conversion attempts will explicitly fail.

        AuthorsAdrian Catangiu -

        Summary

        +

        Summary

        The Transact XCM instruction currently forces the user to set a specific maximum weight allowed to the inner call and then also pay for that much weight regardless of how much the call actually needs in practice.

        This RFC proposes improving the usability of Transact by removing that parameter and instead get and charge the actual weight of the inner call from its dispatch info on the remote chain.

        -

        Motivation

        +

        Motivation

        The UX of using Transact is poor because of having to guess/estimate the require_weight_at_most weight used by the inner call on the target.

        We've seen multiple Transact on-chain failures caused by guessing wrong values for this require_weight_at_most even though the rest of the XCM program would have worked.

        In practice, this parameter only adds UX overhead with no real practical value. Use cases fall in one of two categories:

        @@ -5292,14 +5577,14 @@ weight limit parameter.

        We've had multiple OpenGov root/whitelisted_caller proposals initiated by core-devs completely or partially fail because of incorrect configuration of require_weight_at_most parameter. This is a strong indication that the instruction is hard to use.

        -

        Stakeholders

        +

        Stakeholders

        • Runtime Users,
        • Runtime Devs,
        • Wallets,
        • dApps,
        -

        Explanation

        +

        Explanation

        The proposed enhancement is simple: remove require_weight_at_most parameter from the instruction:

        - Transact { origin_kind: OriginKind, require_weight_at_most: Weight, call: DoubleEncoded<Call> },
         + Transact { origin_kind: OriginKind, call: DoubleEncoded<Call> },
        @@ -5323,7 +5608,7 @@ both this new version and the old. In both cases, an "attacker" can do
         

        Compatible with previous XCM programs.

        Prior Art and References

        None.

        -

        Unresolved Questions

        +

        Unresolved Questions

        None.

        None.

        @@ -5369,13 +5654,13 @@ both this new version and the old. In both cases, an "attacker" can do AuthorsAndrei Sandu -

        Summary

        +

        Summary

        Elastic scaling is not resilient against griefing attacks without a way for a PoV (Proof of Validity) to commit to the particular core index it was intended for. This RFC proposes a way to include core index information in the candidate commitments and the CandidateDescriptor data structure in a backward compatible way. Additionally, it proposes the addition of a SessionIndex field in the CandidateDescriptor to make dispute resolution more secure and robust.

        -

        Motivation

        +

        Motivation

        This RFC proposes a way to solve two different problems:

        1. For Elastic Scaling, it prevents anyone who has acquired a valid collation to DoS the parachain @@ -5390,14 +5675,14 @@ dispute. The dispute may concern a relay chain block not yet imported by a validator. In this case, validators can safely assume the session index refers to the session the candidate has appeared in, otherwise, the chain would have rejected the candidate.
        -

        Stakeholders

        +

        Stakeholders

        • Polkadot core developers.
        • Cumulus node developers.
        • Tooling, block explorer developers.

        This approach and alternatives have been considered and discussed in this issue.

        -

        Explanation

        +

        Explanation

        The approach proposed below was chosen primarily because it minimizes the number of breaking changes, the complexity and takes less implementation and testing time. The proposal is to change the existing primitives while keeping binary compatibility with the older versions. We repurpose @@ -5592,7 +5877,7 @@ separator.

        Prior Art and References

        Forum discussion about a new CandidateReceipt format: https://forum.polkadot.network/t/pre-rfc-discussion-candidate-receipt-format-v2/3738

        -

        Unresolved Questions

        +

        Unresolved Questions

        N/A

        The implementation is extensible and future-proof to some extent. With minimal or no breaking @@ -5636,7 +5921,7 @@ by using the version field of the descriptor introduced in this RFC AuthorsFrancisco Aguirre -

        Summary

        +

        Summary

        XCM already handles execution fees in an effective and efficient manner using the BuyExecution instruction. However, other types of fees are not handled as effectively -- for example, delivery fees. Fees exist that can't be measured using Weight -- as execution fees can -- so a new method should be thought up for those cases. @@ -5645,7 +5930,7 @@ This RFC proposes making the fee handling system simpler and more general, by do

      • Adding a fees register
      • Deprecating BuyExecution and adding a new instruction PayFees with new semantics to ultimately replace it.
      -

      Motivation

      +

      Motivation

      Execution fees are handled correctly by XCM right now. However, the addition of extra fees, like for message delivery, result in awkward ways of integrating them into the XCVM implementation. This is because these types of fees are not included in the language. @@ -5653,14 +5938,14 @@ The standard should have a way to correctly deal with these implementation speci The new instruction moves the specified amount of fees from the holding register to a dedicated fees register that the XCVM can use in flexible ways depending on its implementation. The XCVM implementation is free to use these fees to pay for execution fees, transport fees, or any other type of fee that might be necessary. This moves the specifics of fees further away from the XCM standard, and more into the actual underlying XCVM implementation, which is a good thing.

      -

      Stakeholders

      +

      Stakeholders

      • Runtime Users
      • Runtime Devs
      • Wallets
      • dApps
      -

      Explanation

      +

      Explanation

      The new instruction that will replace BuyExecution is a much simpler and general version: PayFees. This instruction takes one Asset, takes it from the holding register, and puts it into a new fees register. The XCVM implementation can now use this Asset to make sure every necessary fee is paid for, this includes execution fees, delivery fees, and any other type of fee @@ -5709,7 +5994,7 @@ The new proposed instruction, PayFees, doesn't return the leftover In practice, the deprecated BuyExecution needs to be slowly rolled out in favour of PayFees.

      Prior Art and References

      The closed RFC PR on the xcm-format repository, before XCM RFCs got moved to fellowship RFCs: https://github.com/polkadot-fellows/xcm-format/pull/53.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None

      This proposal would greatly benefit from an improved asset trapping system.

      @@ -5747,12 +6032,12 @@ In practice, the deprecated BuyExecution needs to be slowly rolled AuthorsFrancisco Aguirre -

      Summary

      +

      Summary

      A previous XCM RFC (https://github.com/polkadot-fellows/xcm-format/pull/37) introduced a SetAssetClaimer instruction. This idea of instructing the XCVM to change some implementation-specific behavior is useful. In order to generalize this mechanism, this RFC introduces a new instruction SetHints and makes the SetAssetClaimer be just one of many possible execution hints.

      -

      Motivation

      +

      Motivation

      There is a need for specifying how certain implementation-specific things should behave. Things like who can claim the assets or what can be done instead of trapping assets. Another idea for a hint:

      @@ -5760,13 +6045,13 @@ Another idea for a hint:

    • AssetForFees: to signify to the executor what asset the user prefers to use for fees.
    • LeftoverAssetsDestination: for depositing leftover assets to a destination instead of trapping them
    -

    Stakeholders

    +

    Stakeholders

    • Runtime devs
    • Wallets
    • dApps
    -

    Explanation

    +

    Explanation

    A new instruction, SetHints, will be added. This instruction will take a single parameter of type Hint, an enumeration. The first variant for this enum is AssetClaimer, which allows to specify a location that should be able to claim trapped assets. @@ -5805,7 +6090,7 @@ You only need to specify the hints you want in one single instruction at the top

    None.

    Prior Art and References

    The previous RFC PR in the xcm-format repository before XCM RFCs moved to fellowship RFCs: https://github.com/polkadot-fellows/xcm-format/pull/59.

    -

    Unresolved Questions

    +

    Unresolved Questions

    None.

    None.

    @@ -5840,19 +6125,19 @@ You only need to specify the hints you want in one single instruction at the top Authors -

    Summary

    +

    Summary

    This RFC aims to remove the NetworkIds of Westend and Rococo, arguing that testnets shouldn't go in the language.

    -

    Motivation

    +

    Motivation

    We've already seen the plans to phase out Rococo and Paseo has appeared. Instead of constantly changing the testnets included in the language, we should favor specifying them via their genesis hash, using NetworkId::ByGenesis.

    -

    Stakeholders

    +

    Stakeholders

    • Runtime devs
    • Wallets
    • dApps
    -

    Explanation

    +

    Explanation

    Remove Westend and Rococo from the included NetworkIds in the language.

    Drawbacks

    This RFC will make it less convenient to specify a testnet, but not by a large amount.

    @@ -5867,7 +6152,7 @@ using NetworkId::ByGenesis.

    NetworkId::Rococo and NetworkId::Westend can just use NetworkId::ByGenesis, as can other testnets.

    Prior Art and References

    A previous attempt to add NetworkId::Paseo: https://github.com/polkadot-fellows/xcm-format/pull/58.

    -

    Unresolved Questions

    +

    Unresolved Questions

    None.

    None.

    @@ -5908,11 +6193,11 @@ using NetworkId::ByGenesis.

    AuthorsAdrian Catangiu -

    Summary

    +

    Summary

    XCM programs generated by the InitiateAssetTransfer instruction shall have the option to carry over the original origin all the way to the final destination. They shall do so by internally making use of AliasOrigin or ClearOrigin depending on given parameters.

    This allows asset transfers to retain their original origin even across multiple hops.

    Ecosystem chains would have to change their trusted aliasing rules to effectively make use of this feature.

    -

    Motivation

    +

    Motivation

    Currently, all XCM asset transfer instructions ultimately clear the origin in the remote XCM message by use of the ClearOrigin instruction. This is done for security considerations to ensure that subsequent (user-controlled) instructions cannot command the authority of the sending chain.

    The problem with this approach is that it limits what can be achieved on remote chains through XCM. Most XCM operations require having an origin, and following any asset transfer the origin is lost, meaning not much can be done other than depositing the transferred assets to some local account or transferring them onward to another chain.

    For example, we cannot transfer some funds for buying execution, then do a Transact (all in the same XCM message).

    @@ -5920,9 +6205,9 @@ using NetworkId::ByGenesis.

    Transact XCM programs today require a two step process:

    Transact Today

    And we want to be able to do it using a single XCM program.

    -

    Stakeholders

    +

    Stakeholders

    Runtime Users, Runtime Devs, wallets, cross-chain dApps.

    -

    Explanation

    +

    Explanation

    In the case of XCM programs going from source-chain directly to dest-chain without an intermediary hop, we can enable scenarios such as above by using the AliasOrigin instruction instead of the ClearOrigin instruction.

    Instead of clearing the source-chain origin, the destination chain shall attempt to alias source-chain to "original origin" on the source chain. Most common such origin aliasing would be X1(Parachain(source-chain)) -> X2(Parachain(source-chain), AccountId32(origin-account)) for the case of a single hop transfer where the initiator is a (signed/pure/proxy) account origin-account on source-chain. @@ -5998,7 +6283,7 @@ Following the same logic, the existing DepositReserveAsset, I

  • RFC: InitiateAssetsTransfer for complex asset transfers
  • RFC: Descend XCM origin instead of clearing it where possible
  • -

    Unresolved Questions

    +

    Unresolved Questions

    None

    (source)

    @@ -6031,18 +6316,18 @@ Following the same logic, the existing DepositReserveAsset, I AuthorsBastian Köcher -

    Summary

    +

    Summary

    The code of a runtime is stored in its own state, and when performing a runtime upgrade, this code is replaced. The new runtime can contain runtime migrations that adapt the state to the state layout as defined by the runtime code. This runtime migration is executed when building the first block with the new runtime code. Anything that interacts with the runtime state uses the state layout as defined by the runtime code. So, when trying to load something from the state in the block that applied the runtime upgrade, it will use the new state layout but will decode the data from the non-migrated state. In the worst case, the data is incorrectly decoded, which may lead to crashes or halting of the chain.

    This RFC proposes to store the new runtime code under a different storage key when applying a runtime upgrade. This way, all the off-chain logic can still load the old runtime code under the default storage key and decode the state correctly. The block producer is then required to use this new runtime code to build the next block. While building the next block, the runtime is executing the migrations and moves the new runtime code to the default runtime code location. So, the runtime code found under the default location is always the correct one to decode the state from which the runtime code was loaded.

    -

    Motivation

    +

    Motivation

    While the issue of having undecodable state only exists for the one block in which the runtime upgrade was applied, it still impacts anything that reads state data, like block explorers, UIs, nodes, etc. For block explorers, the issue mainly results in indexing invalid data and UIs may show invalid data to the user. For nodes, reading incorrect data may lead to a performance degradation of the network. There are also ways to prevent certain decoding issues from happening, but it requires that developers are aware of this issue and also requires introducing extra code, which could introduce further bugs down the line.

    So, this RFC tries to solve these issues by fixing the underlying problem of having temporary undecodable state.

    -

    Stakeholders

    +

    Stakeholders

    • Relay chain/Parachain node developers
    • Relay chain/Parachain node operators
    -

    Explanation

    +

    Explanation

    The runtime code is stored under the special key :code in the state. Nodes and other tooling read the runtime code under this storage key when they want to interact with the runtime for e.g., building/importing blocks or getting the metadata to read the state. To update the runtime code the runtime overwrites the value at :code, and then from the next block on, the new runtime will be loaded. This RFC proposes to first store the new runtime code under :pending_code in the state for one block. When the next block is being built, the block builder first needs to check if :pending_code is set, and if so, it needs to load the runtime from this storage key. While building the block the runtime will move :pending_code to :code to have the runtime code at the default location. Nodes importing the block will also need to load :pending_code if it exists to ensure that the correct runtime code is used. By doing it this way, the runtime code found at :code in the state of a block will always be able to decode the state. Furthermore, this RFC proposes to introduce system_version: 3. The system_version was introduced in RFC42. Version 3 would then enable the usage of :pending_code when applying a runtime code upgrade. This way, the feature can be introduced first and enabled later when the majority of the nodes have upgraded.

    @@ -6061,7 +6346,7 @@ There is still the possibility of having state that is not migrated even when fo For Polkadot/Kusama this means that also the parachain nodes need to be running with a relay chain node version that supports this new feature. Otherwise the parachains will stop producing/finalizing nodes as they can not sync the relay chain any more.

    Prior Art and References

    The issue initially reported a bug that led to this RFC. It also discusses multiple solutions for the problem.

    -

    Unresolved Questions

    +

    Unresolved Questions

    None

      @@ -6102,9 +6387,9 @@ For Polkadot/Kusama this means that also the parachain nodes need to be running AuthorsDaniel Shiposha -

      Summary

      +

      Summary

      This RFC proposes a metadata format for XCM-identifiable assets (i.e., for fungible/non-fungible collections and non-fungible tokens) and a set of instructions to communicate it across chains.

      -

      Motivation

      +

      Motivation

      Currently, there is no way to communicate metadata of an asset (or an asset instance) via XCM.

      The ability to query and modify the metadata is useful for two kinds of entities:

        @@ -6124,9 +6409,9 @@ For Polkadot/Kusama this means that also the parachain nodes need to be running

      Besides metadata modification, the ability to read it is also valuable. On-chain logic can interpret the NFT metadata, i.e., the metadata could have not only the media meaning but also a utility function within a consensus system. Currently, such a way of using NFT metadata is possible only within one consensus system. This RFC proposes making it possible between different systems via XCM so different chains can fetch and analyze the asset metadata from other chains.

      -

      Stakeholders

      +

      Stakeholders

      Runtime users, Runtime devs, Cross-chain dApps, Wallets.

      -

      Explanation

      +

      Explanation

      The Asset Metadata is information bound to an asset class (fungible or NFT collection) or an asset instance (an NFT). The Asset Metadata could be represented differently on different chains (or in other consensus entities). However, to communicate metadata between consensus entities via XCM, we need a general format so that any consensus entity can make sense of such information.

      @@ -6332,9 +6617,9 @@ This RFC proposes to use the Undefined variant of a collection iden AuthorsBryan Chen, Jiyuan Zheng -

      Summary

      +

      Summary

      This proposal introduces PVQ (PolkaVM Query), a unified query interface that bridges different chain runtime implementations and client tools/UIs. PVQ provides an extension-based system where runtime developers can expose chain-specific functionality through standardized interfaces, while allowing client-side developers to perform custom computations on the data through PolkaVM programs. By abstracting away concrete implementations across chains and supporting both off-chain and cross-chain scenarios, PVQ aims to reduce code duplication and development complexity while maintaining flexibility for custom use cases.

      -

      Motivation

      +

      Motivation

      In Substrate, runtime APIs facilitate off-chain clients in reading the state of the consensus system. However, the APIs defined and implemented by individual chains often fall short of meeting the diverse requirements of client-side developers. For example, client-side developers may want some aggregated data from multiple pallets, or apply various custom transformations on the raw data. @@ -6369,12 +6654,12 @@ As a result, client-side developers frequently resort to directly accessing stor

    -

    Stakeholders

    +

    Stakeholders

    • Runtime Developers
    • Tools/UI Developers
    -

    Explanation

    +

    Explanation

    The core idea of PVQ is to have a unified interface that meets the aforementioned requirements.

    On the runtime side, an extension-based system is introduced to serve as a standardization layer across different chains. Each extension specification defines a set of cohesive APIs. @@ -6699,7 +6984,7 @@ For XCM Integration, the proposal does not modify the existing XCM message forma

  • View functions aims to provide view-only functions at the pallet level. Additionally, Facade Project aims to gather and return commonly wanted information in runtime level. PVQ does not conflict with them, and it can take advantage of these Pallet View Functions / Runtime APIs and allow people to build arbitrary PVQ programs to obtain more custom/complex data that is not otherwise expressed by these two proposals.
  • -

    Unresolved Questions

    +

    Unresolved Questions

    • The specific conversion between gas and weight has not been finalized and will likely require development of a suitable benchmarking methodology.
    @@ -6741,14 +7026,14 @@ PVQ does not conflict with them, and it can take advantage of these Pallet View Authorss0me0ne-unkn0wn (13WGadgNgqSjiGQvfhimw9pX26mvGdYQ6XgrjPANSEDRoGMt) -

    Summary

    +

    Summary

    This RFC proposes a change that makes it possible to identify types of compressed blobs stored on-chain, as well as used off-chain, without the need for decompression.

    -

    Motivation

    +

    Motivation

    Currently, a compressed blob does not give any idea of what's inside because the only thing that can be inside, according to the spec, is Wasm. In reality, other blob types are already being used, and more are to come. Apart from being error-prone by itself, the current approach does not allow to properly route the blob through the execution paths before its decompression, which will result in suboptimal implementations when more blob types are used. Thus, it is necessary to introduce a mechanism allowing to identify the blob type without decompressing it.

    This proposal is intended to support future work enabling Polkadot to execute PolkaVM and, more generally, other-than-Wasm parachain runtimes, and allow developers to introduce arbitrary compression methods seamlessly in the future.

    -

    Stakeholders

    +

    Stakeholders

    Node developers are the main stakeholders for this proposal. It also creates a foundation on which parachain runtime developers will build.

    -

    Explanation

    +

    Explanation

    Overview

    The current approach to compressing binary blobs involves using zstd compression, and the resulting compressed blob is prefixed with a unique 64-bit magic value specified in that subsection. The same procedure is used to compress both Wasm code blobs and proofs-of-validity. Currently, having solely a compressed blob, it's impossible to tell what's inside it without decompression, a Wasm blob, or a PoV. That doesn't cause problems in the current protocol, as Wasm blobs and PoV blobs take completely different execution paths in the code.

    The changes proposed below are intended to define the means for distinguishing compressed blob types in a backward-compatible and future-proof way.

    @@ -6786,7 +7071,7 @@ PVQ does not conflict with them, and it can take advantage of these Pallet View

    The change is designed to be backward-compatible.

    Prior Art and References

    SDK PR#6704 (WIP) introduces a mechanism similar to that described in this proposal and proves the necessity of such a change.

    -

    Unresolved Questions

    +

    Unresolved Questions

    None

    This proposal creates a foundation for two future work directions:

    @@ -6828,9 +7113,9 @@ PVQ does not conflict with them, and it can take advantage of these Pallet View Authorsordian -

    Summary

    +

    Summary

    This RFC proposes changes to the erasure coding algorithm and the method for computing the erasure root on Polkadot to improve performance of both processes.

    -

    Motivation

    +

    Motivation

    The Data Availability (DA) Layer in Polkadot provides a foundation for shared security, enabling Approval Checkers and Collators to download Proofs-of-Validity (PoV) for security and liveness purposes respectively. @@ -6847,12 +7132,12 @@ The proposed change is orthogonal to RFC-47 and can be used in conjunction with collator nodes), we propose bundling another performance-enhancing breaking change that addresses the CPU bottleneck in the erasure coding process, but using a separate node feature (NodeFeatures part of HostConfiguration) for its activation.

    -

    Stakeholders

    +

    Stakeholders

    • Infrastructure providers (operators of validator/collator nodes) will need to upgrade their client version in a timely manner
    -

    Explanation

    +

    Explanation

    We propose two specific changes:

    1. @@ -6901,7 +7186,7 @@ faster deployment for most parachains but would add complexity.

      This requires a breaking change that can be coordinated following the same approach as in RFC-47.

      Prior Art and References

      JAM already utilizes the same optimizations described in the Graypaper.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None.

      Future improvements could include:

      @@ -6954,16 +7239,16 @@ faster deployment for most parachains but would add complexity.

      AuthorsJeff Burdges, ... -

      Summary

      +

      Summary

      An off-chain approximation protocol should assign rewards based upon the approvals and availability work done by validators.

      All validators track which approval votes they actually use, reporting the aggregate, after which an on-chain median computation gives a good approximation under byzantine assumptions. Approval checkers report aggregate information about which availability chunks they use too, but in availability we need a tit-for-tat game to enforce honesty, because approval committees could often bias results thanks to their small size.

      -

      Motivation

      +

      Motivation

      We want all or most polkadot subsystems be profitable for validataors, because otherwise operators might profit from running modified code. In particular, almost all rewards in Kusama/Polkadot should come from work done securing parachains, primarily approval checking, but also backing, availability, and support of XCMP.

      Among these task, our highest priorities must be approval checks, which ensure soundness, and sending availability chunks to approval checkers. We prove backers must be paid strictly less than approval checkers.

      At present though, validators' rewards have relatively little relationship to validators operating costs, in terms of bandwidth and CPU time. Worse, polkadot's scaling makes us particular vulnerable "no-shows" caused by validators skipping their approval checks.

      We're particularly concernned about hardware specks impact upon the number of parachain cores. We've requested relatively low spec machines so far, only four physical CPU cores, although some run even lower specs like only two physical CPU cores. Alone, rewards cannot fix our low speced validator problem, but rewards and outreach together should far more impact than either alone.

      In future, we'll further increase validator spec requirements, which directly improve polkadot's throughput, and which repeats this dynamic of purging underspeced nodes, except outreach becomes more important because de facto too many slow validators can "out vote" the faster ones

      -

      Stakeholders

      +

      Stakeholders

      We alter the validators rewards protocol, but with negligable impact upon rewards for honest validators who comply with hardware and bandwidth recommendations.

      We shall still reward participation in relay chain concensus of course, which de facto means block production but not finality, but these current reward levels shall wind up greatly reduced. Any validators who manipulate block rewards now could lose rewards here, simply because of rewards being shifted from block production to availability, but this sounds desirable.

      We've discussed roughly this rewards protocol in https://hackmd.io/@rgbPIkIdTwSICPuAq67Jbw/S1fHcvXSF and https://github.com/paritytech/polkadot-sdk/issues/1811 as well as related topics like https://github.com/paritytech/polkadot-sdk/issues/5122

      @@ -7080,7 +7365,7 @@ for (mmu,atm) in my_missing_uploads.iter_mut().zip(approvals_tally_messages) {

      All validators could collect ApprovalsTallyMessages and independently compute rewards off-core. At that point, all validators have opinions about all other validators rewards, but even among honest validators these opinions could differ if some lack some ApprovalsTallyMessages.

      We'd have the same in-core computation problem if we perform statistics like medians upon these opinions. We could however take an optimistic approach where each validator computes medians like above, but then shares their hash of the final rewards list. If 2/3rds voted for the same hash, then we distribute rewards as above. If not, then we distribute no rewards until governance selects the correct hash.

      We never validate in-core the signatures on ApprovalsTallyMessages or the computation, so this approach permits more direct cheating by malicious 2/3rd majority, but if that occurs then we've broken our security assumptions anyways. It's likely these hashes do diverge during some network disruptions though, which increases our "drama" factor considerably, which maybe unacceptable.

      -

      Explanation

      +

      Explanation

      Backing

      Polkadot's efficency creates subtle liveness concerns: Anytime one node cannot perform one of its approval checks then Polkadot loses in expectation 3.25 approval checks, or 0.10833 parablocks. This makes back pressure essential.

      We cannot throttle approval checks securely either, so reactive off-chain back pressure only makes sense during or before the backing phase. In other words, if nodes feel overworked themselves, or perhaps beleive others to be, then they should drop backing checks, never approval checks. It follows backing work must be rewarded less well and less reliably than approvals, as otherwise validators could benefit from behavior that harms the network.

      @@ -7142,7 +7427,7 @@ At this point, we compute $\beta\prime_w = \sum_v \beta\prime_{w,v}$ on-chain fo

      JAM's block exports should not complicate availability rewards, but could impact some alternative schemes.

      Prior Art and References

      None

      -

      Unresolved Questions

      +

      Unresolved Questions

      Provide specific questions to discuss and address before the RFC is voted on by the Fellowship. This should include, for example, alternatives to aspects of the proposed design where the appropriate trade-off to make is unclear.

      Synthetic parachain flag

      @@ -7176,17 +7461,17 @@ At this point, we compute $\beta\prime_w = \sum_v \beta\prime_{w,v}$ on-chain fo AuthorsPierre Krieger -

      Summary

      +

      Summary

      Update the runtime-host interface to no longer make use of a host-side allocator.

      -

      Motivation

      +

      Motivation

      The heap allocation of the runtime is currently controlled by the host using a memory allocator on the host side.

      The API of many host functions consists in allocating a buffer. For example, when calling ext_hashing_twox_256_version_1, the host allocates a 32 bytes buffer using the host allocator, and returns a pointer to this buffer to the runtime. The runtime later has to call ext_allocator_free_version_1 on this pointer in order to free the buffer.

      Even though no benchmark has been done, it is pretty obvious that this design is very inefficient. To continue with the example of ext_hashing_twox_256_version_1, it would be more efficient to instead write the output hash to a buffer that was allocated by the runtime on its stack and passed by pointer to the function. Allocating a buffer on the stack in the worst case scenario simply consists in decreasing a number, and in the best case scenario is free. Doing so would save many Wasm memory reads and writes by the allocator, and would save a function call to ext_allocator_free_version_1.

      Furthermore, the existence of the host-side allocator has become questionable over time. It is implemented in a very naive way, and for determinism and backwards compatibility reasons it needs to be implemented exactly identically in every client implementation. Runtimes make substantial use of heap memory allocations, and each allocation needs to go twice through the runtime <-> host boundary (once for allocating and once for freeing). Moving the allocator to the runtime side, while it would increase the size of the runtime, would be a good idea. But before the host-side allocator can be deprecated, all the host functions that make use of it need to be updated to not use it.

      -

      Stakeholders

      +

      Stakeholders

      No attempt was made at convincing stakeholders.

      -

      Explanation

      -

      New host functions

      +

      Explanation

      +

      New host functions

      This section contains a list of new host functions to introduce.

      (func $ext_storage_read_version_2
           (param $key i64) (param $value_out i64) (param $offset i32) (result i64))
      @@ -7373,7 +7658,7 @@ The runtime execution stops with an error if out is outside of the
       

      The ext_input_size_version_1 host function returns the size in bytes of the input data.

      The ext_input_read_version_1 host function copies some data from the input data to the memory of the runtime. The offset parameter indicates the offset within the input data where to start copying, and must be inferior or equal to the value returned by ext_input_size_version_1. The out parameter is a pointer-size containing the buffer where to write to. The runtime execution stops with an error if offset is strictly superior to the size of the input data, or if out is outside of the range of the memory of the virtual machine, even if the amount of data to copy would be 0 bytes.

      -

      Other changes

      +

      Other changes

      In addition to the new host functions, this RFC proposes two changes to the runtime-host interface:

      • The following function signature is now also accepted for runtime entry points: (func (result i64)).
      • @@ -7390,9 +7675,9 @@ The following other host functions are similarly also considered deprecated:

      Drawbacks

      This RFC might be difficult to implement in Substrate due to the internal code design. It is not clear to the author of this RFC how difficult it would be.

      -

      Prior Art

      +

      Prior Art

      The API of these new functions was heavily inspired by API used by the C programming language.

      -

      Unresolved Questions

      +

      Unresolved Questions

      The changes in this RFC would need to be benchmarked. This involves implementing the RFC and measuring the speed difference.

      It is expected that most host functions are faster or equal speed to their deprecated counterparts, with the following exceptions:

        @@ -7449,10 +7734,10 @@ This would remove the possibility to synchronize older blocks, which is probably LicenseMIT -

        Summary

        +

        Summary

        This RFC proposes a dynamic pricing model for the sale of Bulk Coretime on the Polkadot UC. The proposed model updates the regular price of cores for each sale period, by taking into account the number of cores sold in the previous sale, as well as a limit of cores and a target number of cores sold. It ensures a minimum price and limits price growth to a maximum price increase factor, while also giving govenance control over the steepness of the price change curve. It allows governance to address challenges arising from changing market conditions and should offer predictable and controlled price adjustments.

        Accompanying visualizations are provided at [1].

        -

        Motivation

        +

        Motivation

        RFC-1 proposes periodic Bulk Coretime Sales as a mechanism to sell continouos regions of blockspace (suggested to be 4 weeks in length). A number of Blockspace Regions (compare RFC-1 & RFC-3) are provided for sale to the Broker-Chain each period and shall be sold in a way that provides value-capture for the Polkadot network. The exact pricing mechanism is out of scope for RFC-1 and shall be provided by this RFC.

        A dynamic pricing model is needed. A limited number of Regions are offered for sale each period. The model needs to find the price for a period based on supply and demand of the previous period.

        The model shall give Coretime consumers predictability about upcoming price developments and confidence that Polkadot governance can adapt the pricing model to changing market conditions.

        @@ -7464,7 +7749,7 @@ This would remove the possibility to synchronize older blocks, which is probably
      • The solution SHOULD provide a maximum factor of price increase should the limit of Regions sold per period be reached.
      • The solution should allow governance to control the steepness of the price function
    -

    Stakeholders

    +

    Stakeholders

    The primary stakeholders of this RFC are:

    • Protocol researchers and evelopers
    • @@ -7472,7 +7757,7 @@ This would remove the possibility to synchronize older blocks, which is probably
    • Polkadot parachains teams
    • Brokers involved in the trade of Bulk Coretime
    -

    Explanation

    +

    Explanation

    Overview

    The dynamic pricing model sets the new price based on supply and demand in the previous period. The model is a function of the number of Regions sold, piecewise-defined by two power functions.

      @@ -7611,16 +7896,16 @@ OLD_PRICE = 1000 AuthorsGabriel Facco de Arruda -

      Summary

      +

      Summary

      This RFC proposes changes that enable the use of absolute locations in AccountId derivations, which allows protocols built using XCM to have static account derivations in any runtime, regardless of its position in the family hierarchy.

      -

      Motivation

      +

      Motivation

      These changes would allow protocol builders to leverage absolute locations to maintain the exact same derived account address across all networks in the ecosystem, thus enhancing user experience.

      One such protocol, that is the original motivation for this proposal, is InvArch's Saturn Multisig, which gives users a unifying multisig and DAO experience across all XCM connected chains.

      -

      Stakeholders

      +

      Stakeholders

      • Ecosystem developers
      -

      Explanation

      +

      Explanation

      This proposal aims to make it possible to derive accounts for absolute locations, enabling protocols that require the ability to maintain the same derived account in any runtime. This is done by deriving accounts from the hash of described absolute locations, which are static across different destinations.

      The same location can be represented in relative form and absolute form like so:

      #![allow(unused)]
      @@ -7695,7 +7980,7 @@ OLD_PRICE = 1000
       
    • DescirbeFamily type: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/location_conversion.rs#L122
    • WithComputedOrigin type: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/barriers.rs#L153
    -

    Unresolved Questions

    +

    Unresolved Questions

    Implementation details and overall code is still up to discussion.

    (source)

    Table of Contents

    @@ -7727,7 +8012,7 @@ OLD_PRICE = 1000 ChaosDAO -

    Summary

    +

    Summary

    This RFC proposes to make modifications to voting power delegations as part of the Conviction Voting pallet. The changes being proposed include:

    1. Allow a Delegator to vote independently of their Delegate if they so desire.
    2. @@ -7735,7 +8020,7 @@ OLD_PRICE = 1000
    3. Make a change so that when a delegate votes abstain their delegated votes also vote abstain.
    4. Allow a Delegator to delegate/ undelegate their votes for all tracks with a single call.
    -

    Motivation

    +

    Motivation

    It has become clear since the launch of OpenGov that there are a few common tropes which pop up time and time again:

    1. The frequency of referenda is often too high for network participants to have sufficient time to review, comprehend, and ultimately vote on each individual referendum. This means that these network participants end up being inactive in on-chain governance.
    2. @@ -7743,13 +8028,13 @@ OLD_PRICE = 1000
    3. Delegating votes for all tracks currently requires long batched calls which result in high fees for the Delegator - resulting in a reluctance from many to delegate their votes.

    We believe (based on feedback from token holders with a larger stake in the network) that if there were some changes made to delegation mechanics, these larger stake holders would be more likely to delegate their voting power to active network participants – thus greatly increasing the support turnout.

    -

    Stakeholders

    +

    Stakeholders

    The primary stakeholders of this RFC are:

    • The Polkadot Technical Fellowship who will have to research and implement the technical aspects of this RFC
    • DOT token holders in general
    -

    Explanation

    +

    Explanation

    This RFC proposes to make 4 changes to the convictionVoting pallet logic in order to improve the user experience of those delegating their voting power to another account.

    1. @@ -7777,7 +8062,7 @@ OLD_PRICE = 1000

      We want to highlight the importance for ecosystem builders to create a mechanism for indexers and wallets to be able to understand that changes have occurred such as increasing the pallet version, etc.

      Prior Art and References

      N/A

      -

      Unresolved Questions

      +

      Unresolved Questions

      N/A

      Additionally we would like to re-open the conversation about the potential for there to be free delegations. This was discussed by Dr Gavin Wood at Sub0 2022 and we feel like this would go a great way towards increasing the amount of network participants that are delegating: https://youtu.be/hSoSA6laK3Q?t=526

      @@ -7821,9 +8106,9 @@ OLD_PRICE = 1000 AuthorsSergej Sakac -

      Summary

      +

      Summary

      This RFC proposes a new model for a sustainable on-demand parachain registration, involving a smaller initial deposit and periodic rent payments. The new model considers that on-demand chains may be unregistered and later re-registered. The proposed solution also ensures a quick startup for on-demand chains on Polkadot in such cases.

      -

      Motivation

      +

      Motivation

      With the support of on-demand parachains on Polkadot, there is a need to explore a new, more cost-effective model for registering validation code. In the current model, the parachain manager is responsible for reserving a unique ParaId and covering the cost of storing the validation code of the parachain. These costs can escalate, particularly if the validation code is large. We need a better, sustainable model for registering on-demand parachains on Polkadot to help smaller teams deploy more easily.

      This RFC suggests a new payment model to create a more financially viable approach to on-demand parachain registration. In this model, a lower initial deposit is required, followed by recurring payments upon parachain registration.

      This new model will coexist with the existing one-time deposit payment model, offering teams seeking to deploy on-demand parachains on Polkadot a more cost-effective alternative.

      @@ -7837,11 +8122,11 @@ OLD_PRICE = 1000
    2. The solution MUST allow anyone to pay the rent.
    3. The solution MUST prevent the removal of validation code if it could still be required for disputes or approval checking.
    -

    Stakeholders

    +

    Stakeholders

    • Future Polkadot on-demand Parachains
    -

    Explanation

    +

    Explanation

    This RFC proposes a set of changes that will enable the new rent based approach to registering and storing validation code on-chain. The new model, compared to the current one, will require periodic rent payments. The parachain won't be pruned automatically if the rent is not paid, but by permitting anyone to prune the parachain and rewarding the caller, there will be an incentive for the removal of the validation code.

    On-demand parachains should still be able to utilize the current one-time payment model. However, given the size of the deposit required, it's highly likely that most on-demand parachains will opt for the new rent-based model.

    @@ -7966,7 +8251,7 @@ pub(super) type CheckedCodeHash<T: Config> =

    This RFC does not break compatibility.

    Prior Art and References

    Prior discussion on this topic: https://github.com/paritytech/polkadot-sdk/issues/1796

    -

    Unresolved Questions

    +

    Unresolved Questions

    None at this time.

    As noted in this GitHub issue, we want to raise the per-byte cost of on-chain data storage. However, a substantial increase in this cost would make it highly impractical for on-demand parachains to register on Polkadot. @@ -8002,16 +8287,16 @@ This RFC offers an alternative solution for on-demand parachains, ensuring that AuthorsPierre Krieger -

    Summary

    +

    Summary

    Rather than enforce a limit to the total memory consumption on the client side by loading the value at :heappages, enforce that limit on the runtime side.

    -

    Motivation

    +

    Motivation

    From the early days of Substrate up until recently, the runtime was present in two forms: the wasm runtime (wasm bytecode passed through an interpreter) and the native runtime (native code directly run by the client).

    Since the wasm runtime has a lower amount of available memory (4 GiB maximum) compared to the native runtime, and in order to ensure sure that the wasm and native runtimes always produce the same outcome, it was necessary to clamp the amount of memory available to both runtimes to the same value.

    In order to achieve this, a special storage key (a "well-known" key) :heappages was introduced and represents the number of "wasm pages" (one page equals 64kiB) of memory that are available to the memory allocator of the runtimes. If this storage key is absent, it defaults to 2048, which is 128 MiB.

    The native runtime has since then been disappeared, but the concept of "heap pages" still exists. This RFC proposes a simplification to the design of Polkadot by removing the concept of "heap pages" as is currently known, and proposes alternative ways to achieve the goal of limiting the amount of memory available.

    -

    Stakeholders

    +

    Stakeholders

    Client implementers and low-level runtime developers.

    -

    Explanation

    +

    Explanation

    This RFC proposes the following changes to the client:

    • The client no longer considers :heappages as special.
    • @@ -8055,7 +8340,7 @@ The :heappages are a rather obscure feature, and it is not clear wh

      Not a breaking change. The runtime-side changes can be applied immediately (without even having to wait for changes in the client), then as soon as the runtime is updated, the client can be updated without any transition period. One can even consider updating the client before the runtime, as it corresponds to path C.

      Prior Art and References

      None.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None.

      This RFC follows the same path as https://github.com/polkadot-fellows/RFCs/pull/4 by scoping everything related to memory allocations to the runtime.

      @@ -8095,7 +8380,7 @@ The :heappages are a rather obscure feature, and it is not clear wh AuthorAdam Clay Steeber -

      Summary

      +

      Summary

      This RFC proposes adding a trivial governance track on Kusama to facilitate X (formerly known as Twitter) posts on the @kusamanetwork account. The technical aspect of implementing this in the runtime is very inconsequential and straight-forward, though it might get more technical if the Fellowship wants to regulate this track with a non-existent permission set. If this is implemented it would need to be followed up with:

      @@ -8103,7 +8388,7 @@ with a non-existent permission set. If this is implemented it would need to be f
    • the establishment of specifications for proposing X posts via this track, and
    • the development of tools/processes to ensure that the content contained in referenda enacted in this track would be automatically posted on X.
    • -

      Motivation

      +

      Motivation

      The overall motivation for this RFC is to decentralize the management of the Kusama brand/communication channel to KSM holders. This is necessary in my opinion primarily because of the inactivity of the account in recent history, with posts spanning weeks or months apart. I am currently unaware of who/what entity manages the Kusama X account, but if they are affiliated with Parity or W3F this proposed solution could also offload some of the legal ramifications of making (or not making) @@ -8113,11 +8398,11 @@ and the community becomes totally autonomous in the management of Kusama's X pos that could be offloaded to openGov, provided this proof-of-concept is successful.

      Finally, this RFC is the epitome of experimentation that Kusama is ideal for. This proposal may spark newfound excitement for Kusama and help us realize Kusama's potential for pushing boundaries and trying new unconventional ideas.

      -

      Stakeholders

      +

      Stakeholders

      This idea has not been formalized by any individual (or group of) KSM holder(s). To my knowledge the socialization of this idea is contained entirely in my recent X post here, but it is possible that an idea like this one has been discussed in other places. It appears to me that the ecosystem would welcome a change like this which is why I am taking action to formalize the discussion.

      -

      Explanation

      +

      Explanation

      The implementation of this idea can be broken down into 3 primary phases:

      Phase 1 - Track configurations

      First, we begin with this RFC to ensure all feedback can be discussed and implemented in the proposal. After the Fellowship and the community come to a reasonable @@ -8203,7 +8488,7 @@ much of a burden or overhead since they've already built the infrastructure for

      One reference to a similar feature requiring on-chain/off-chain coordination would be the Kappa-Sigma-Mu Society. Nothing on-chain necessarily enforces the rules or facilitates bids, challenges, defenses, etc. However, the Society has managed to maintain itself with integrity to its rules. So I don't think this is totally out of Kusama's scope. But it will require some off-chain effort to maintain.

      -

      Unresolved Questions

      +

      Unresolved Questions

      • Who will develop the tools necessary to implement this feature? How do we select them?
      • How can this idea be better implemented with on-chain/substrate features?
      • @@ -8242,11 +8527,11 @@ out of Kusama's scope. But it will require some off-chain effort to maintain.

        AuthorsJelliedOwl -

        Summary

        +

        Summary

        The current size of the decision deposit on some tracks is too high for many proposers. As a result, those needing to use it have to find someone else willing to put up the deposit for them - and a number of legitimate attempts to use the root track have timed out. This track would provide a more affordable (though slower) route for these holders to use the root track.

        -

        Motivation

        +

        Motivation

        There have been recent attempts to use the Kusama root track which have timed out with no decision deposit placed. Usually, these referenda have been related to parachain registration related issues.

        -

        Explanation

        +

        Explanation

        Propose to address this by adding a new referendum track [22] Referendum Deposit which can place the decision deposit on another referendum. This would require the following changes:

        • [Referenda Pallet] Modify the placeDecisionDesposit function to additionally allow it to be called by root, with root call bypassing the requirements for a deposit payment.
        • @@ -8286,7 +8571,7 @@ out of Kusama's scope. But it will require some off-chain effort to maintain.

        • Recent discussion / referendum for an alternative way to address this issue: Kusama Referendum 340 - Funding a Decision Deposit Sponsor
        -

        Unresolved Questions

        +

        Unresolved Questions

        Feedback on whether my proposed implementation of this is the best way to address the issue - including which calls the track should be allowed to make. Are the track parameters correct or should be use something different? Alternative would be welcome.

        (source)

        Table of Contents

        @@ -8331,7 +8616,7 @@ out of Kusama's scope. But it will require some off-chain effort to maintain.

        AuthorsAbdelrahman Soliman (Boda) -

        Summary

        +

        Summary

        A pallet to facilitate enhanced multisig accounts. The main enhancement is that we store a multisig account in the state with related info (signers, threshold,..etc). The module affords enhanced control over administrative operations such as adding/removing signers, changing the threshold, account deletion, canceling an existing proposal. Each signer can approve/reject a proposal while still exists. The proposal is not intended for migrating or getting rid of existing multisig. It's to allow both options to coexist.

        For the rest of the RFC We use the following terms:

          @@ -8339,7 +8624,7 @@ out of Kusama's scope. But it will require some off-chain effort to maintain.

          Stateful Multisig to refer to the proposed pallet.
        • Stateless Multisig to refer to the current multisig pallet in polkadot-sdk.
        -

        Motivation

        +

        Motivation

        Problem

        Entities in the Polkadot ecosystem need to have a way to manage their funds and other operations in a secure and efficient way. Multisig accounts are a common way to achieve this. Entities by definition change over time, members of the entity may change, threshold requirements may change, and the multisig account may need to be deleted. For even more enhanced hierarchical control, the multisig account may need to be controlled by other multisig accounts.

        Current native solutions for multisig operations are less optimal, performance-wise (as we'll explain later in the RFC), and lack fine-grained control over the multisig account.

        @@ -8381,12 +8666,12 @@ DAOs can utilize multisig accounts to ensure that decisions are made collectivel

      and much more...

      -

      Stakeholders

      +

      Stakeholders

      • Polkadot holders
      • Polkadot developers
      -

      Explanation

      +

      Explanation

      I've created the stateful multisig pallet during my studies in Polkadot Blockchain Academy under supervision from @shawntabrizi and @ank4n. After that, I've enhanced it to be fully functional and this is a draft PR#3300 in polkadot-sdk. I'll list all the details and design decisions in the following sections. Note that the PR is not 1-1 exactly to the current RFC as the RFC is a more polished version of the PR after updating based on the feedback and discussions.

      Let's start with a sequence diagram to illustrate the main operations of the Stateful Multisig.

      multisig operations

      @@ -8876,7 +9161,7 @@ We have the following extrinsics:

      This RFC is compatible with the existing implementation and can be handled via upgrades and migration. It's not intended to replace the existing multisig pallet.

      Prior Art and References

      multisig pallet in polkadot-sdk

      -

      Unresolved Questions

      +

      Unresolved Questions

      • On account deletion, should we transfer remaining deposits to treasury or remove signers' addition deposits completely and consider it as fees to start with?
      @@ -8926,9 +9211,9 @@ Implement call filters. This will allow multisig accounts to only accept certain AuthorsLuke Schoen -

      Summary

      +

      Summary

      This proposes to increase the maximum length of PGP Fingerprint values from a 20 bytes/chars limit to a 40 bytes/chars limit.

      -

      Motivation

      +

      Motivation

      Background

      Pretty Good Privacy (PGP) Fingerprints are shorter versions of their corresponding Public Key that may be printed on a business card.

      They may be used by someone to validate the correct corresponding Public Key.

      @@ -8946,7 +9231,7 @@ Implement call filters. This will allow multisig accounts to only accept certain

    Solution Requirements

    The maximum length of identity PGP Fingerprint values should be increased from the current 20 bytes/chars limit at least a 40 bytes/chars limit to support PGP Fingerprints and GPG Fingerprints.

    -

    Stakeholders

    +

    Stakeholders

    • Any Polkadot account holder wishing to use a Polkadot on-chain identity for their:
        @@ -8955,7 +9240,7 @@ Implement call filters. This will allow multisig accounts to only accept certain
    -

    Explanation

    +

    Explanation

    If a user tries to setting an on-chain identity by creating an extrinsic using Polkadot.js with identity > setIdentity(info), then if they try to provide their 40 character long PGP Fingerprint or GPG Fingerprint, which is longer than the maximum length of 20 bytes/chars [u8;20], then they will encounter this error:

    createType(Call):: Call: failed decoding identity.setIdentity:: Struct: failed on args: {...}:: Struct: failed on pgpFingerprint: Option<[u8;20]>:: Expected input with 20 bytes (160 bits), found 40 bytes
     
    @@ -8976,7 +9261,7 @@ Implement call filters. This will allow multisig accounts to only accept certain

    Updates to Polkadot.js Apps, API and its documentation and those referring to it may be required.

    Prior Art and References

    No prior articles or references.

    -

    Unresolved Questions

    +

    Unresolved Questions

    No further questions at this stage.

    Relates to RFC entitled "Increase maximum length of identity raw data values from 32 bytes".

    @@ -9024,10 +9309,10 @@ Implement call filters. This will allow multisig accounts to only accept certain AuthorsLuke Schoen -

    Summary

    +

    Summary

    This proposes to require a slashable deposit in the broker pallet when initially purchasing or renewing Bulk Coretime or Instantaneous Coretime cores.

    Additionally, it proposes to record a reputational status based on the behavior of the purchaser, as it relates to their use of Kusama Coretime cores that they purchase, and to possibly reserve a proportion of the cores for prospective purchasers that have an on-chain identity.

    -

    Motivation

    +

    Motivation

    Background

    There are sales of Kusama Coretime cores that are scheduled to occur later this month by Coretime Marketplace Lastic.xyz initially in limited quantities, and potentially also by RegionX in future that is subject to their Polkadot referendum #582. This poses a risk in that some Kusama Coretime core purchasers may buy Kusama Coretime cores when they have no intention of actually placing a workload on them or leasing them out, which would prevent those that wish to purchase and actually use Kusama Coretime cores from being able to use any at cores at all.

    Problem

    @@ -9059,7 +9344,7 @@ Implement call filters. This will allow multisig accounts to only accept certain

    Reputation. To disincentivise certain behaviours, a reputational status indicator could be used to record the historic behavior of the purchaser and whether on-chain judgement has determined they have adequately rectified that behaviour, as it relates to their usage of Kusama Coretime cores that they purchase.

    -

    Stakeholders

    +

    Stakeholders

    • Any Kusama account holder wishing to use the Broker pallet in any upcoming Kusama Coretime sales.
    • Any prospective Kusama Coretime purchaser, developer, and user.
    • @@ -9082,9 +9367,9 @@ Implement call filters. This will allow multisig accounts to only accept certain

      Compatibility

      Updates to Polkadot.js Apps, API and its documentation and those referring to it may be required.

      Prior Art and References

      -

      Prior Art

      +

      Prior Art

      No prior articles.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None

      None

      @@ -9125,13 +9410,13 @@ Implement call filters. This will allow multisig accounts to only accept certain AuthorsAurora Poppyseed, Philip Lucsok -

      Summary

      +

      Summary

      This RFC proposes the addition of a secondary market feature to either the broker pallet or as a separate pallet maintained by Lastic, enabling users to list and purchase regions. This includes creating, purchasing, and removing listings, as well as emitting relevant events and handling associated errors.

      -

      Motivation

      +

      Motivation

      Currently, the broker pallet lacks functionality for a secondary market, which limits users' ability to freely trade regions. This RFC aims to introduce a secure and straightforward mechanism for users to list regions they own for sale and allow other users to purchase these regions.

      While integrating this functionality directly into the broker pallet is one option, another viable approach is to implement it as a separate pallet maintained by Lastic. This separate pallet would have access to the broker pallet and add minimal functionality necessary to support the secondary market.

      Adding smart contracts to the Coretime chain could also address this need; however, this process is expected to be lengthy and complex. We cannot afford to wait for this extended timeline to enable basic secondary market functionality. By proposing either integration into the broker pallet or the creation of a dedicated pallet, we can quickly enhance the flexibility and utility of the broker pallet, making it more user-friendly and valuable.

      -

      Stakeholders

      +

      Stakeholders

      Primary stakeholders include:

      • Developers working on the broker pallet.
      • @@ -9139,7 +9424,7 @@ Implement call filters. This will allow multisig accounts to only accept certain
      • Users who own regions and wish to trade them.
      • Community members interested in enhancing the broker pallet’s capabilities.
      -

      Explanation

      +

      Explanation

      This RFC introduces the following key features:

      1. @@ -9219,7 +9504,7 @@ Implement call filters. This will allow multisig accounts to only accept certain
        • All related discussions are going to be under this PR.
        -

        Unresolved Questions

        +

        Unresolved Questions

        • Are there additional security measures needed to prevent potential abuses of the new functionalities?
        @@ -9266,12 +9551,12 @@ Implement call filters. This will allow multisig accounts to only accept certain AuthorsAurora Poppyseed, Phil Lucksok -

        Summary

        +

        Summary

        This RFC proposes the integration of smart contracts on the Coretime chain to enhance flexibility and enable complex decentralized applications, including secondary market functionalities.

        -

        Motivation

        +

        Motivation

        Currently, the Coretime chain lacks the capability to support smart contracts, which limits the range of decentralized applications that can be developed and deployed. By enabling smart contracts, the Coretime chain can facilitate more sophisticated functionalities such as automated region trading, dynamic pricing mechanisms, and other decentralized applications that require programmable logic. This will enhance the utility of the Coretime chain, attract more developers, and create more opportunities for innovation.

        Additionally, while there is a proposal (#885) to allow EVM-compatible contracts on Polkadot’s Asset Hub, the implementation of smart contracts directly on the Coretime chain will provide synchronous interactions and avoid the complexities of asynchronous operations via XCM.

        -

        Stakeholders

        +

        Stakeholders

        Primary stakeholders include:

        • Developers working on the Coretime chain.
        • @@ -9279,7 +9564,7 @@ Implement call filters. This will allow multisig accounts to only accept certain
        • Community members interested in expanding the capabilities of the Coretime chain.
        • Secondary Coretime marketplaces.
        -

        Explanation

        +

        Explanation

        This RFC introduces the following key components:

        1. @@ -9357,7 +9642,7 @@ Implement call filters. This will allow multisig accounts to only accept certain
        2. Existing decentralized applications and use cases on other blockchain platforms.
        3. Proposal #885: EVM-compatible contracts on Asset Hub, which highlights the community's interest in integrating smart contracts within the Polkadot ecosystem.
    -

    Unresolved Questions

    +

    Unresolved Questions

    • What specific security measures should be implemented to prevent smart contract vulnerabilities?
    • How can we ensure optimal performance while supporting complex smart contracts?
    • @@ -9417,12 +9702,12 @@ Implement call filters. This will allow multisig accounts to only accept certain Authorseskimor -

      Summary

      +

      Summary

      Change the upgrade process of a parachain runtime upgrade to become an off-chain process with regards to the relay chain. Upgrades are still contained in parachain blocks, but will no longer need to end up in relay chain blocks nor in relay chain state.

      -

      Motivation

      +

      Motivation

      Having parachain runtime upgrades go through the relay chain has always been seen as a scalability concern. Due to optimizations in statement distribution and asynchronous backing it became less crucial and got @@ -9437,13 +9722,13 @@ this we would hope for far more parachains to get registered, thousands potentially even ten thousands. With so many PVFs registered, updates are expected to become more frequent and even attacks on service quality for other parachains would become a higher risk.

      -

      Stakeholders

      +

      Stakeholders

      • Parachain Teams
      • Relay Chain Node implementation teams
      • Relay Chain runtime developers
      -

      Explanation

      +

      Explanation

      The issues with on-chain runtime upgrades are:

      1. Needlessly costly.
      2. @@ -9648,7 +9933,7 @@ namely:

      3. Existing pre-checking.

      https://github.com/paritytech/polkadot-sdk/issues/971

      -

      Unresolved Questions

      +

      Unresolved Questions

      1. What about the initial runtime, shall we make that off-chain as well?
      2. Good news, at least after the first upgrade, no code will be stored on chain @@ -9741,24 +10026,24 @@ sharing if multiple parachains use the same data (e.g. same smart contracts).

        AuthorsFrancisco Aguirre -

        Summary

        +

        Summary

        The SetFeesMode instruction and the fees_mode register allow for the existence of JIT withdrawal. JIT withdrawal complicates the fee mechanism and leads to bugs and unexpected behaviour. The proposal is to remove said functionality. Another effort to simplify fee handling in XCM.

        -

        Motivation

        +

        Motivation

        The JIT withdrawal mechanism creates bugs such as not being able to get fees when all assets are put into holding and none left in the origin location. This is a confusing behavior, since there are funds for fees, just not where the XCVM wants them. The XCVM should have only one entrypoint to fee payment, the holding register. That way there is also less surface for bugs.

        -

        Stakeholders

        +

        Stakeholders

        • Runtime Users
        • Runtime Devs
        • Wallets
        • dApps
        -

        Explanation

        +

        Explanation

        The SetFeesMode instruction will be removed. The Fees Mode register will be removed.

        Drawbacks

        @@ -9784,7 +10069,7 @@ The instruction should be deprecated as soon as this RFC is approved (probably deprecate in v5, remove in v6).

        Prior Art and References

        The previous RFC PR on the xcm-format repo, before XCM RFCs were moved to fellowship RFCs: https://github.com/polkadot-fellows/xcm-format/pull/57.

        -

        Unresolved Questions

        +

        Unresolved Questions

        None.

        The new generic fees mechanism is related to this proposal and further stimulates it as the JIT withdraw fees mechanism will become useless anyway.

        @@ -9819,9 +10104,9 @@ The instruction should be deprecated as soon as this RFC is approved Authors@muharem @xlc -

        Summary

        +

        Summary

        This RFC proposes a solution to replicate an existing pure proxy from one chain to others. The aim is to address the current limitations where pure proxy accounts, which are keyless, cannot have their proxy relationships recreated on different chains. This leads to issues where funds or permissions transferred to the same keyless account address on chains other than its origin chain become inaccessible.

        -

        Motivation

        +

        Motivation

        A pure proxy is a new account created by a primary account. The primary account is set as a proxy for the pure proxy account, managing it. Pure proxies are keyless and non-reproducible, meaning they lack a private key and have an address derived from a preimage determined by on-chain logic. More on pure proxies can be found here.

        For the purpose of this document, we define a keyless account as a "pure account", the controlling account as a "proxy account", and the entire relationship as a "pure proxy".

        The relationship between a pure account (e.g., account ID: pure1) and its proxy (e.g., account ID: alice) is stored on-chain (e.g., parachain A) and currently cannot be replicated to another chain (e.g., parachain B). Because the account pure1 is keyless and its proxy relationship with alice is not replicable from the parachain A to the parachain B, alice does not control the pure1 account on the parachain B.

        @@ -9833,9 +10118,9 @@ The instruction should be deprecated as soon as this RFC is approved
      3. users may prefer an account with a registered identity (e.g. for cross-chain treasury spend proposal), even if the account is keyless;

    Given that these mistakes are likely, it is necessary to provide a solution to either prevent them or enable access to a pure account on a target chain.

    -

    Stakeholders

    +

    Stakeholders

    Runtime Users, Runtime Devs, wallets, cross-chain dApps.

    -

    Explanation

    +

    Explanation

    One possible solution is to allow a proxy to create or replicate a pure proxy relationship for the same pure account on a target chain. For example, Alice, as the proxy of the pure1 pure account on parachain A, should be able to set a proxy for the same pure1 account on parachain B.

    To minimise security risks, the parachain B should grant the parachain A the least amount of permission necessary for the replication. First, Parachain A claims to Parachain B that the operation is commanded by the pure account, and thus by its proxy, and second, provides proof that the account is keyless.

    The replication process will be facilitated by XCM, with the first claim made using the DescendOrigin instruction. The replication call on parachain A would require a signed origin by the pure account and construct an XCM program for parachain B, where it first descends the origin, resulting in the ParachainA/AccountId32(pure1) origin location on the receiving side.

    @@ -9909,7 +10194,7 @@ mod pallet_proxy_replica {

    None.

    Prior Art and References

    None.

    -

    Unresolved Questions

    +

    Unresolved Questions

    None.

      @@ -9946,20 +10231,20 @@ mod pallet_proxy_replica { AuthorsLiu-Cheng Xu -

      Summary

      +

      Summary

      This RFC proposes compressing the state response message during the state syncing process to reduce the amount of data transferred.

      -

      Motivation

      +

      Motivation

      State syncing can require downloading several gigabytes of data, particularly for blockchains with large state sizes, such as Astar, which has a state size exceeding 5 GiB (https://github.com/AstarNetwork/Astar/issues/1110). This presents a significant challenge for nodes with slower network connections. Additionally, the current state sync implementation lacks a persistence feature (https://github.com/paritytech/polkadot-sdk/issues/4), meaning any network disruption forces the node to re-download the entire state, making the process even more difficult.

      -

      Stakeholders

      +

      Stakeholders

      This RFC benefits all projects utilizing the Substrate framework, specifically in improving the efficiency of state syncing.

      • Node Operators.
      • Substrate Users.
      -

      Explanation

      +

      Explanation

      The largest portion of the state response message consists of either CompactProof or Vec<KeyValueStateEntry>, depending on whether a proof is requested (source):

      • CompactProof: When proof is requested, compression yields a lower ratio but remains beneficial, as shown in warp sync tests in the Performance section below.
      • @@ -9981,7 +10266,7 @@ for compression.

        No compatibility issues identified.

        Prior Art and References

        None.

        -

        Unresolved Questions

        +

        Unresolved Questions

        None.

        None.

        @@ -10018,9 +10303,9 @@ for compression. AuthorsRodrigo Quelhas -

        Summary

        +

        Summary

        This RFC proposes a new host function, secp256r1_ecdsa_verify_prehashed, for verifying NIST-P256 signatures. The function takes as input the message hash, r and s components of the signature, and the x and y coordinates of the public key. By providing this function, runtime authors can leverage a more efficient verification mechanism for "secp256r1" elliptic curve signatures, reducing computational costs and improving overall performance.

        -

        Motivation

        +

        Motivation

        “secp256r1” elliptic curve is a standardized curve by NIST which has the same calculations by different input parameters with “secp256k1” elliptic curve. The cost of combined attacks and the security conditions are almost the same for both curves. Adding a host function can provide signature verifications using the “secp256r1” elliptic curve in the runtime and multi-faceted benefits can occur. One important factor is that this curve is widely used and supported in many modern devices such as Apple’s Secure Enclave, Webauthn, Android Keychain which proves the user adoption. Additionally, the introduction of this host function could enable valuable features in the account abstraction which allows more efficient and flexible management of accounts by transaction signs in mobile devices. Most of the modern devices and applications rely on the “secp256r1” elliptic curve. The addition of this host function enables a more efficient verification of device native transaction signing mechanisms. For example:

          @@ -10029,11 +10314,11 @@ Most of the modern devices and applications rely on the “secp256r1” elliptic
        1. Android Keystore: Android Keystore is an API that manages the private keys and signing methods. The private keys are not processed while using Keystore as the applications’ signing method. Also, it can be done in the “Trusted Execution Environment” in the microchip.
        2. Passkeys: Passkeys is utilizing FIDO Alliance and W3C standards. It replaces passwords with cryptographic key-pairs which is also can be used for the elliptic curve cryptography.
        -

        Stakeholders

        +

        Stakeholders

        • Runtime Authors
        -

        Explanation

        +

        Explanation

        This RFC proposes a new host function for runtime authors to leverage a more efficient verification mechanism for "secp256r1" elliptic curve signatures.

        Proposed host function signature:

        #![allow(unused)]
        @@ -10098,12 +10383,12 @@ Most of the modern devices and applications rely on the “secp256r1” elliptic
         AuthorsBryan Chen, Pablo Dorado
         
         
        -

        Summary

        +

        Summary

        A followup of the RFC-0014. This RFC proposes adding a new collective to the Polkadot Collectives Chain: The Unbrick Collective, as well as improvements in the mechanisms that will allow teams operating paras that had stopped producing blocks to be assisted, in order to restore the production of blocks of these paras.

        -

        Motivation

        +

        Motivation

        Since the initial launch of Polkadot parachains, there has been many incidients causing parachains to stop producing new blocks (therefore, being bricked) and many occurrences that required Polkadot governance to update the parachain head state/wasm. This can be due to many reasons range @@ -10117,14 +10402,14 @@ damage to the parachain and users.

        Polkadot Fellowship), due to the nature of their mission, are not fit to carry these kind of tasks.

        In consequence, the idea of a Unbrick Collective that can provide assistance to para teams when they brick and further protection against future halts is reasonable enough.

        -

        Stakeholders

        +

        Stakeholders

        • Parachain teams
        • Parachain users
        • OpenGov users
        • Polkadot Fellowship
        -

        Explanation

        +

        Explanation

        The Collective

        The Unbrick Collective is defined as an unranked collective of members, not paid by the Polkadot Treasury. Its main goal is to serve as a point of contact and assistance for enacting the actions @@ -10216,7 +10501,7 @@ to unbrick a stalled para.

      • How to Recover a Parachain, Polkadot Forum
      • Unbrick Collective, Polkadot Forum
      -

      Unresolved Questions

      +

      Unresolved Questions

      • What are the parameters for the WhitelistedUnbrickCaller track?
      • Any other methods that shall be updated to accept Unbrick origin?
      • @@ -10269,11 +10554,11 @@ of the paras, the locked/unlocked state, and the manager info.

        AuthorsPablo Dorado, Daniel Olano -

        Summary

        +

        Summary

        In an attempt to mitigate risks derived from unwanted behaviours around long decision periods on referenda, this proposal describes how to finalize and decide a result of a poll via a mechanism similar to candle auctions.

        -

        Motivation

        +

        Motivation

        Referenda protocol provide permissionless and efficient mechanisms to enable governance actors to decide the future of the blockchains around Polkadot network. However, they pose a series of risks derived from the game theory perspective around these mechanisms. One of them being where an actor @@ -10289,7 +10574,7 @@ on a poll as early as possible. This proposal's approach suggests using a Candle be determined right after the confirm period finishes, thus decreasing the chances of actors to alter the results of a poll on confirming state, and instead incentivizing them to cast their votes earlier, on deciding state.

        -

        Stakeholders

        +

        Stakeholders

        • Governance actors: Tokenholders and Collectives that vote on polls that have this mechanism enabled should be aware this change affects the outcome of failing a poll on its confirm period.
        • @@ -10298,7 +10583,7 @@ parameters for the Referenda Pallet.
        • Tooling and UI developers: Applications that interact with referenda must update to reflect the new Finalizing state.
        -

        Explanation

        +

        Explanation

        Currently, the process of a referendum/poll is defined as an sequence between an ongoing state (where accounts can vote), comprised by a with a preparation period, a decision period, and a confirm period. If the poll is passing before the decision period ends, it's possible to push @@ -10411,7 +10696,7 @@ from which to start applying the new mechanism, thus, not affecting the already

      • Auctions pallet in polkadot-runtime-commont: Defines the mechanism of candle auctions.
      • PBA Book: A good place to read about VRFs.
      -

      Unresolved Questions

      +

      Unresolved Questions

      • How to determine in a statistically meaningful way that a change in the poll status corresponds to an organic behaviour, and not an unwanted, malicious behaviour?
      • @@ -10456,10 +10741,10 @@ organic behaviour, and not an unwanted, malicious behaviour? AuthorsGeorge Pisaltu -

        Summary

        +

        Summary

        This RFC proposes the definition of version 5 extrinsics along with changes to the specification and encoding from version 4.

        -

        Motivation

        +

        Motivation

        RFC84 introduced the specification of General transactions, a new type of extrinsic besides the Signed and Unsigned variants available previously in version 4. Additionally, @@ -10468,13 +10753,13 @@ introduced versioning of transaction extensions through an extra byte in the ext Both of these changes require an extrinsic format version bump as both the semantics around extensions as well as the actual encoding of extrinsics need to change to accommodate these new features.

        -

        Stakeholders

        +

        Stakeholders

        • Runtime users
        • Runtime devs
        • Wallet devs
        -

        Explanation

        +

        Explanation

        Changes to extrinsic authorization

        The introduction of General transactions allows the authorization of any and all origins through extensions. This means that, with the appropriate extension, General transactions can replicate @@ -10576,7 +10861,7 @@ support version 5 and to remove version 4 in the future.

        This is a result of the work in Extrinsic Horizon and RFC99.

        -

        Unresolved Questions

        +

        Unresolved Questions

        None.

        Following this change, extrinsic version 5 will be introduced as part of the Extrinsic @@ -10620,12 +10905,12 @@ work.

        AuthorsGeorge Pisaltu -

        Summary

        +

        Summary

        The current election mechanism for permissionless collators on system chains was introduced in RFC-7. This RFC proposes a mechanism to facilitate replacements in the invulnerable sets of system chains by breaking down barriers that exist today.

        -

        Motivation

        +

        Motivation

        Following RFC-7 and the introduction of the collator election mechanism, anyone can now collate on a system chain on the permissionless slots, but the invulnerable set has been a contentious issue among @@ -10660,12 +10945,12 @@ invulnerable set de facto immutable.

        circle. The aim of this RFC is to provide a clear, reasonable, fair, and socially acceptable path for a permissionless collator with a proven track record to become an invulnerable while preserving the stability of the invulnerable set of a system parachain.

        -

        Stakeholders

        +

        Stakeholders

        • Infrastructure providers (people who run validator/collator nodes)
        • Polkadot Treasury
        -

        Explanation

        +

        Explanation

        Proposal

        This RFC proposes a periodic, mandatory, round-robin, two-round election mechanism for invulnerables.

        @@ -10758,7 +11043,7 @@ invulnerable set for each chain can be grandfathered in when upgrading the

        Prior Art and References

        This RFC builds on RFC-7, which introduced the election mechanism for system chain collators.

        -

        Unresolved Questions

        +

        Unresolved Questions

        • How long should the period between individual elections be? How long should the full election cycle be? @@ -10811,7 +11096,7 @@ of this RFC.

          AuthorsLeemo / ChaosDAO -

          Summary

          +

          Summary

          This RFC proposes to change the duration of the Confirmation Period for the Big Tipper and Small Tipper tracks in Polkadot OpenGov:

          • @@ -10821,7 +11106,7 @@ of this RFC.

            Big Tipper: 1 Hour -> 1 Day

          -

          Motivation

          +

          Motivation

          Currently, these are the durations of treasury tracks in Polkadot OpenGov. Confirmation periods for the Spender tracks were adjusted based on RFC20 and its related conversation.

          @@ -10835,12 +11120,12 @@ of this RFC.

          You can see that there is a general trend on the Spender track that when the privilege level (the amount the track can spend) the confirmation period approximately doubles.

          I believe that the Big Tipper and Small Tipper track's confirmation periods should be adjusted to match this trend.

          In the current state it is possible to somewhat positively snipe these tracks, and whilst the power/privilege level of these tracks is very low (they cannot spend a large amount of funds), I believe we should increase the confirmation periods to something higher. This is backed up by the recent sentiment in the greater community regarding referendums submitted on these tracks. The parameters of Polkadot OpenGov can be adjusted based on the general sentiment of token holders when necessary.

          -

          Stakeholders

          +

          Stakeholders

          The primary stakeholders of this RFC are: – DOT token holders – as this affects the protocol's treasury – Entities wishing to submit a referendum on these tracks – as this affects the referendum's timeline – Projects with governance app integrations – see Performance, Ergonomics and Compatibility section below

          -

          Explanation

          +

          Explanation

          This RFC proposes to change the duration of the confirmation period for both the Big Tipper and Small Tipper tracks. To achieve this the confirm_period parameter for those tracks should be changed.

          You can see the lines of code that need to be adjusted here:

            @@ -10868,7 +11153,7 @@ of this RFC.

          Prior Art and References

          N/A

          -

          Unresolved Questions

          +

          Unresolved Questions

          Some token holders may want these confirmation periods to remain as they are currently and for them not to increase. If this is something that the Polkadot Technical Fellowship considers to be an issue to implement into a runtime upgrade then I can create a Wish For Change to obtain token holder approval.

          The parameters of Polkadot OpenGov will likely continue to change over time, there are additional discussions in the community regarding adjusting the min_support for some tracks so that it does not trend towards 0%, similar to the current state of the Whitelisted Caller track. This is outside of the scope of this RFC and requires a lot more discussion.

          @@ -10903,9 +11188,9 @@ of this RFC.

          Track DescriptionConfirmation Period Duration
          Treasurer7 Days
          AuthorsShawn Tabrizi
          -

          Summary

          +

          Summary

          This is a proposal to reduce the impact of stale nominations in the Polkadot staking system. With this proposal, nominators are incentivized to update or renew their selected validators once per time period. Nominators that do not update or renew their selected validators would be considered stale, and a decaying multiplier would be applied to their nominations, reducing the weight of their nomination and rewards.

          -

          Motivation

          +

          Motivation

          Longer motivation behind the content of the RFC, presented as a combination of both problems and requirements for the solution.

          One of Polkadot's primary utilities is providing a high quality security layer for applications built on top of it. To achieve this, Polkadot runs a Nominated Proof-of-Stake system, allowing nominators to vote on who they think are the best validators for Polkadot.

          This system functions best when nominators and validators are active participants in the network. Nominators should consistently evaluate the quality and preferences of validators, and adjust their nominations accordingly.

          @@ -10918,13 +11203,13 @@ of this RFC.

        • New validators have a harder time entering the active set.
        • Validators are able to "sneakily" increase their commission.
        -

        Stakeholders

        +

        Stakeholders

        Primary stakeholders are:

        • Nominators
        • Validators
        -

        Explanation

        +

        Explanation

        Detail-heavy explanation of the RFC, suitable for explanation to an implementer of the changeset. This should address corner cases in detail and provide justification behind decisions, and provide rationale for how the design meets the solution requirements.

        Drawbacks

        Description of recognized drawbacks to the approach given in the RFC. Non-exhaustively, drawbacks relating to performance, ergonomics, user experience, security, or privacy.

        @@ -10940,7 +11225,7 @@ of this RFC.

        Does this proposal break compatibility with existing interfaces, older versions of implementations? Summarize necessary migrations or upgrade strategies, if any.

        Prior Art and References

        Provide references to either prior art or other relevant research for the submitted design.

        -

        Unresolved Questions

        +

        Unresolved Questions

        Provide specific questions to discuss and address before the RFC is voted on by the Fellowship. This should include, for example, alternatives to aspects of the proposed design where the appropriate trade-off to make is unclear.

        Describe future work which could be enabled by this RFC, if it were accepted, as well as related RFCs. This is a place to brain-dump and explore possibilities, which themselves may become their own RFCs.

        diff --git a/proposed/0017-coretime-market-redesign.html b/proposed/0017-coretime-market-redesign.html index 5224b62..b7a1358 100644 --- a/proposed/0017-coretime-market-redesign.html +++ b/proposed/0017-coretime-market-redesign.html @@ -90,7 +90,7 @@ @@ -312,7 +312,7 @@