From 00b3ec9d1ad4f2edc489151b372b4e3ed9b8de61 Mon Sep 17 00:00:00 2001 From: bkchr Date: Thu, 18 Jan 2024 00:53:07 +0000 Subject: [PATCH] deploy: ade390b5ef06f3d897a56acea653504e706fe30e --- 404.html | 2 +- approved/0001-agile-coretime.html | 2 +- approved/0005-coretime-interface.html | 2 +- approved/0007-system-collator-selection.html | 2 +- approved/0008-parachain-bootnodes-dht.html | 2 +- ...12-process-for-adding-new-collectives.html | 2 +- ...rove-locking-mechanism-for-parachains.html | 2 +- approved/0022-adopt-encointer-runtime.html | 2 +- approved/0032-minimal-relay.html | 2 +- approved/0050-fellowship-salaries.html | 2 +- ...0056-one-transaction-per-notification.html | 2 +- index.html | 2 +- introduction.html | 2 +- new/0066-add-smartcontracts-to-assethub.html | 2 +- print.html | 630 +++++++----------- proposed/000x-lowering-deposits-assethub.html | 2 +- proposed/0026-sassafras-consensus.html | 2 +- ...-absolute-location-account-derivation.html | 2 +- proposed/0042-extrinsics-state-version.html | 2 +- proposed/0044-rent-based-registration.html | 2 +- .../0046-metadata-for-offline-signers.html | 2 +- ...047-assignment-of-availability-chunks.html | 6 +- .../0061-allocator-inside-of-runtime.html | 6 +- searchindex.js | 2 +- searchindex.json | 2 +- ...04-remove-unnecessary-allocator-usage.html | 2 +- ...namic-pricing-for-bulk-coretime-sales.html | 2 +- ...09-improved-net-light-client-requests.html | 2 +- stale/0010-burn-coretime-revenue.html | 6 +- ...ath-to-account-creation-on-asset-hubs.html | 392 ----------- ...uilder-and-core-runtime-apis-for-mbms.html | 6 +- stale/0015-market-design-revisit.html | 2 +- ...irmation-period-duration-modification.html | 2 +- ...ction-voting-delegation-modifications.html | 2 +- .../0043-storage-proof-size-hostfunction.html | 2 +- stale/0048-session-keys-runtime-api.html | 2 +- stale/0054-remove-heap-pages.html | 8 +- .../0059-nodes-capabilities-discovery.html | 12 +- 38 files changed, 285 insertions(+), 841 deletions(-) delete mode 100644 stale/0011-add-new-path-to-account-creation-on-asset-hubs.html rename {proposed => stale}/0059-nodes-capabilities-discovery.html (89%) diff --git a/404.html b/404.html index e2ab128..15d4d78 100644 --- a/404.html +++ b/404.html @@ -91,7 +91,7 @@ diff --git a/approved/0001-agile-coretime.html b/approved/0001-agile-coretime.html index 0f4356e..9d13e6b 100644 --- a/approved/0001-agile-coretime.html +++ b/approved/0001-agile-coretime.html @@ -90,7 +90,7 @@ diff --git a/approved/0005-coretime-interface.html b/approved/0005-coretime-interface.html index 657c9cb..95e1077 100644 --- a/approved/0005-coretime-interface.html +++ b/approved/0005-coretime-interface.html @@ -90,7 +90,7 @@ diff --git a/approved/0007-system-collator-selection.html b/approved/0007-system-collator-selection.html index 97bd2f6..e391702 100644 --- a/approved/0007-system-collator-selection.html +++ b/approved/0007-system-collator-selection.html @@ -90,7 +90,7 @@ diff --git a/approved/0008-parachain-bootnodes-dht.html b/approved/0008-parachain-bootnodes-dht.html index 345c46d..917f576 100644 --- a/approved/0008-parachain-bootnodes-dht.html +++ b/approved/0008-parachain-bootnodes-dht.html @@ -90,7 +90,7 @@ diff --git a/approved/0012-process-for-adding-new-collectives.html b/approved/0012-process-for-adding-new-collectives.html index bbf000a..2199d6c 100644 --- a/approved/0012-process-for-adding-new-collectives.html +++ b/approved/0012-process-for-adding-new-collectives.html @@ -90,7 +90,7 @@ diff --git a/approved/0014-improve-locking-mechanism-for-parachains.html b/approved/0014-improve-locking-mechanism-for-parachains.html index 43deca0..9a94cfd 100644 --- a/approved/0014-improve-locking-mechanism-for-parachains.html +++ b/approved/0014-improve-locking-mechanism-for-parachains.html @@ -90,7 +90,7 @@ diff --git a/approved/0022-adopt-encointer-runtime.html b/approved/0022-adopt-encointer-runtime.html index a45b562..9fdcb85 100644 --- a/approved/0022-adopt-encointer-runtime.html +++ b/approved/0022-adopt-encointer-runtime.html @@ -90,7 +90,7 @@ diff --git a/approved/0032-minimal-relay.html b/approved/0032-minimal-relay.html index cba52fd..7a74c5b 100644 --- a/approved/0032-minimal-relay.html +++ b/approved/0032-minimal-relay.html @@ -90,7 +90,7 @@ diff --git a/approved/0050-fellowship-salaries.html b/approved/0050-fellowship-salaries.html index 2c3fd88..35ccf1d 100644 --- a/approved/0050-fellowship-salaries.html +++ b/approved/0050-fellowship-salaries.html @@ -90,7 +90,7 @@ diff --git a/approved/0056-one-transaction-per-notification.html b/approved/0056-one-transaction-per-notification.html index 9ac8c13..dfd70ac 100644 --- a/approved/0056-one-transaction-per-notification.html +++ b/approved/0056-one-transaction-per-notification.html @@ -90,7 +90,7 @@ diff --git a/index.html b/index.html index 1afc31e..e6bc436 100644 --- a/index.html +++ b/index.html @@ -90,7 +90,7 @@ diff --git a/introduction.html b/introduction.html index 1afc31e..e6bc436 100644 --- a/introduction.html +++ b/introduction.html @@ -90,7 +90,7 @@ diff --git a/new/0066-add-smartcontracts-to-assethub.html b/new/0066-add-smartcontracts-to-assethub.html index c63c215..fe29197 100644 --- a/new/0066-add-smartcontracts-to-assethub.html +++ b/new/0066-add-smartcontracts-to-assethub.html @@ -90,7 +90,7 @@ diff --git a/print.html b/print.html index b7d7152..c8e143a 100644 --- a/print.html +++ b/print.html @@ -91,7 +91,7 @@ @@ -4223,119 +4223,6 @@ This information would be used in order to query the runtime API and retrieve th occupying. However, considering it's part of an unimported fork, the validator cannot call a runtime API on that block.

Adding the core_index to the CandidateReceipt would solve this problem and would enable systematic recovery for all dispute scenarios.

-

(source)

-

Table of Contents

- -

RFC-0059: Add a discovery mechanism for nodes based on their capabilities

-
- - - -
Start Date2023-12-18
DescriptionNodes having certain capabilities register themselves in the DHT to be discoverable
AuthorsPierre Krieger
-
-

Summary

-

This RFC proposes to make the mechanism of RFC #8 more generic by introducing the concept of "capabilities".

-

Implementations can implement certain "capabilities", such as serving old block headers or being a parachain bootnode.

-

The discovery mechanism of RFC #8 is extended to be able to discover nodes of specific capabilities.

-

Motivation

-

The Polkadot peer-to-peer network is made of nodes. Not all these nodes are equal. Some nodes store only the headers of recently blocks, some nodes store all the block headers and bodies since the genesis, some nodes store the storage of all blocks since the genesis, and so on.

-

It is currently not possible to know ahead of time (without connecting to it and asking) which nodes have which data available, and it is not easily possible to build a list of nodes that have a specific piece of data available.

-

If you want to download for example the header of block 500, you have to connect to a randomly-chosen node, ask it for block 500, and if it says that it doesn't have the block, disconnect and try another randomly-chosen node. -In certain situations such as downloading the storage of old blocks, nodes that have the information are relatively rare, and finding through trial and error a node that has the data can take a long time.

-

This RFC attempts to solve this problem by giving the possibility to build a list of nodes that are capable of serving specific data.

-

Stakeholders

-

Low-level client developers. -People interested in accessing the archive of the chain.

-

Explanation

-

Reading RFC #8 first might help with comprehension, as this RFC is very similar.

-

Please keep in mind while reading that everything below applies for both relay chains and parachains, except mentioned otherwise.

-

Capabilities

-

This RFC defines a list of so-called capabilities:

- -

In the context of the head of chain provider, the word "recent" means: any not-finalized-yet block that is equal to or an ancestor of a block that it has announced through a block announce, and any finalized block whose height is superior to its current finalized block minus 16. -This does not include blocks that have been pruned because they're not a descendant of its current finalized block. In other words, blocks that aren't a descendant of the current finalized block can be thrown away. -A gap of blocks is required due to race conditions: when a node finalizes a block, it takes some time for its peers to be made aware of this, during which they might send requests concerning older blocks. The exact gap is arbitrary.

-

Substrate is currently by default a head of chain provider provider. After it has finished warp syncing, it downloads the list of old blocks, after which it becomes a history provider. -If Substrate is instead configured as an archive node, then it downloads the state of all blocks since the genesis, after which it becomes an archive provider, history provider, and head of chain provider. -If blocks pruning is enabled and the chain is a relay chain, then Substrate unfortunately doesn't implement any of these capabilities, not even head of chain provider. This is considered as a bug that should be fixed, see https://github.com/paritytech/polkadot-sdk/issues/2733.

-

DHT provider registration

-

This RFC heavily relies on the functionalities of the Kademlia DHT already in use by Polkadot. You can find a link to the specification here.

-

Implementations that have the history provider capability should register themselves as providers under the key sha256(concat("history", randomness)).

-

Implementations that have the archive provider capability should register themselves as providers under the key sha256(concat("archive", randomness)).

-

Implementations that have the parachain bootnode capability should register themselves as provider under the key sha256(concat(scale_compact(para_id), randomness)), as described in RFC 8.

-

"Register themselves as providers" consists in sending ADD_PROVIDER requests to nodes close to the key, as described in the Content provider advertisement section of the specification.

-

The value of randomness can be found in the randomness field when calling the BabeApi_currentEpoch function.

-

In order to avoid downtimes when the key changes, nodes should also register themselves as a secondary key that uses a value of randomness equal to the randomness field when calling BabeApi_nextEpoch.

-

Implementers should be aware that their implementation of Kademlia might already hash the key before XOR'ing it. The key is not meant to be hashed twice.

-

Implementations must not register themselves if they don't fulfill the capability yet. For example, a node configured to be an archive node but that is still building its archive state in the background must register itself only after it has finished building its archive.

-

Secondary DHTs

-

Implementations that have the history provider capability must also participate in a secondary DHT that comprises only of nodes with that capability. The protocol name of that secondary DHT must be /<genesis-hash>/kad/history.

-

Similarly, implementations that have the archive provider capability must also participate in a secondary DHT that comprises only of nodes with that capability and whose protocol name is /<genesis-hash>/kad/archive.

-

Just like implementations must not register themselves if they don't fulfill their capability yet, they must also not participate in the secondary DHT if they don't fulfill their capability yet.

-

Head of the chain providers

-

Implementations that have the head of the chain provider capability do not register themselves as providers, but instead are the nodes that participate in the main DHT. In other words, they are the nodes that serve requests of the /<genesis_hash>/kad protocol.

-

Any implementation that isn't a head of the chain provider (read: light clients) must not participate in the main DHT. This is already presently the case.

-

Implementations must not participate in the main DHT if they don't fulfill the capability yet. For example, a node that is still in the process of warp syncing must not participate in the main DHT. However, assuming that warp syncing doesn't last more than a few seconds, it is acceptable to ignore this requirement in order to avoid complicating implementations too much.

-

Drawbacks

-

None that I can see.

-

Testing, Security, and Privacy

-

The content of this section is basically the same as the one in RFC 8.

-

This mechanism doesn't add or remove any security by itself, as it relies on existing mechanisms.

-

Due to the way Kademlia works, it would become the responsibility of the 20 Polkadot nodes whose sha256(peer_id) is closest to the key (described in the explanations section) to store the list of nodes that have specific capabilities. -Furthermore, when a large number of providers are registered, only the providers closest to the key are kept, up to a certain implementation-defined limit.

-

For this reason, an attacker can abuse this mechanism by randomly generating libp2p PeerIds until they find the 20 entries closest to the key representing the target capability. They are then in control of the list of nodes with that capability. While doing this can in no way be actually harmful, it could lead to eclipse attacks.

-

Because the key changes periodically and isn't predictable, and assuming that the Polkadot DHT is sufficiently large, it is not realistic for an attack like this to be maintained in the long term.

-

Performance, Ergonomics, and Compatibility

-

Performance

-

The DHT mechanism generally has a low overhead, especially given that publishing providers is done only every 24 hours.

-

Doing a Kademlia iterative query then sending a provider record shouldn't take more than around 50 kiB in total of bandwidth for the parachain bootnode.

-

Assuming 1000 nodes with a specific capability, the 20 Polkadot full nodes corresponding to that capability will each receive a sudden spike of a few megabytes of networking traffic when the key rotates. Again, this is relatively negligible. If this becomes a problem, one can add a random delay before a node registers itself to be the provider of the key corresponding to BabeApi_next_epoch.

-

Maybe the biggest uncertainty is the traffic that the 20 Polkadot full nodes will receive from light clients that desire knowing the nodes with a capability. If this every becomes a problem, this value of 20 is an arbitrary constant that can be increased for more redundancy.

-

Ergonomics

-

Irrelevant.

-

Compatibility

-

Irrelevant.

-

Prior Art and References

-

Unknown.

-

Unresolved Questions

-

While it fundamentally doesn't change much to this RFC, using BabeApi_currentEpoch and BabeApi_nextEpoch might be inappropriate. I'm not familiar enough with good practices within the runtime to have an opinion here. Should it be an entirely new pallet?

- -

This RFC would make it possible to reliably discover archive nodes, which would make it possible to reliably send archive node requests, something that isn't currently possible. This could solve the problem of finding archive RPC node providers by migrating archive-related request to using the native peer-to-peer protocol rather than JSON-RPC.

-

If we ever decide to break backwards compatibility, we could divide the "history" and "archive" capabilities in two, between nodes capable of serving older blocks and nodes capable of serving newer blocks. -We could even add to the peer-to-peer network nodes that are only capable of serving older blocks (by reading from a database) but do not participate in the head of the chain, and that just exist for historical purposes.

(source)

Table of Contents

Detail-heavy explanation of the RFC, suitable for explanation to an implementer of the changeset. This should address corner cases in detail and provide justification behind decisions, and provide rationale for how the design meets the solution requirements.

-

Drawbacks

+

Drawbacks

The allocator inside of the runtime will make code size bigger, but it's not obvious. The allocator inside of the runtime maybe slow down(or speed up) the runtime, still not obvious.

We could ignore these drawbacks since they are not prominent. And the execution efficiency is highly decided by runtime developer. We could not prevent a poor efficiency if developer want to do it.

-

Testing, Security, and Privacy

+

Testing, Security, and Privacy

Keep the legacy allocator runtime test cases, and add new feature to compile test cases for v1 allocator spec. And then update the test asserts.

Update template runtime to enable v1 spec. Once the dev network runs well, it seems that the spec is implmented correctly.

-

Performance, Ergonomics, and Compatibility

-

Performance

+

Performance, Ergonomics, and Compatibility

+

Performance

As the above says, not obvious impact about performance. And polkadot-sdk could offer the best practice allocator for all chains. Third party also could customized by theirself. So the performance could be improved over time.

-

Ergonomics

+

Ergonomics

Only for runtime developer, Just need to import a new crate and enable a new feature. Maybe it's convienient for other wasm-target language to implment.

-

Compatibility

+

Compatibility

It's 100% compatible. Only Some runtime configs and executor configs need to be depreacted.

For support new runtime spec, we MUST upgrade the client binary to support new spec of client part firstly.

We SHALL add an optional primtive crate to enable the version 1 spec and disable the legacy allocator by cargo feature. For the first year, we SHALL disable the v1 by default, and enable it by default start in the next year.

-

Prior Art and References

+

Prior Art and References

-

Unresolved Questions

+

Unresolved Questions

None at this time.

- +

The content discussed with RFC-0004 is basically orthogonal, but it could still be considered together, and it is preferred that this rfc be implmentented first.

This feature could make substrate runtime be easier supported by other languages and integreted into other ecosystem.

(source)

@@ -4479,16 +4366,16 @@ For the first year, we SHALL disable the v1 by default, and enable it by default AuthorsPierre Krieger -

Summary

+

Summary

Update the runtime-host interface to no longer make use of a host-side allocator.

-

Motivation

+

Motivation

The heap allocation of the runtime is currently controlled by the host using a memory allocator on the host side.

The API of many host functions consists in allocating a buffer. For example, when calling ext_hashing_twox_256_version_1, the host allocates a 32 bytes buffer using the host allocator, and returns a pointer to this buffer to the runtime. The runtime later has to call ext_allocator_free_version_1 on this pointer in order to free the buffer.

Even though no benchmark has been done, it is pretty obvious that this design is very inefficient. To continue with the example of ext_hashing_twox_256_version_1, it would be more efficient to instead write the output hash to a buffer that was allocated by the runtime on its stack and passed by pointer to the function. Allocating a buffer on the stack in the worst case scenario simply consists in decreasing a number, and in the best case scenario is free. Doing so would save many Wasm memory reads and writes by the allocator, and would save a function call to ext_allocator_free_version_1.

Furthermore, the existence of the host-side allocator has become questionable over time. It is implemented in a very naive way, and for determinism and backwards compatibility reasons it needs to be implemented exactly identically in every client implementation. Runtimes make substantial use of heap memory allocations, and each allocation needs to go twice through the runtime <-> host boundary (once for allocating and once for freeing). Moving the allocator to the runtime side, while it would increase the size of the runtime, would be a good idea. But before the host-side allocator can be deprecated, all the host functions that make use of it need to be updated to not use it.

-

Stakeholders

+

Stakeholders

No attempt was made at convincing stakeholders.

-

Explanation

+

Explanation

New host functions

This section contains a list of new host functions to introduce.

(func $ext_storage_read_version_2
@@ -4691,11 +4578,11 @@ The following other host functions are similarly also considered deprecated:

  • ext_allocator_free_version_1
  • ext_offchain_network_state_version_1
  • -

    Drawbacks

    +

    Drawbacks

    This RFC might be difficult to implement in Substrate due to the internal code design. It is not clear to the author of this RFC how difficult it would be.

    Prior Art

    The API of these new functions was heavily inspired by API used by the C programming language.

    -

    Unresolved Questions

    +

    Unresolved Questions

    The changes in this RFC would need to be benchmarked. This involves implementing the RFC and measuring the speed difference.

    It is expected that most host functions are faster or equal speed to their deprecated counterparts, with the following exceptions:

    -

    Drawbacks

    +

    Drawbacks

    None at present.

    -

    Prior Art and References

    +

    Prior Art and References

    This pricing model is based on the requirements from the basic linear solution proposed in RFC-1, which is a simple dynamic pricing model and only used as proof. The present model adds additional considerations to make the model more adaptable under real conditions.

    Future Possibilities

    This RFC, if accepted, shall be implemented in conjunction with RFC-1.

    @@ -4915,9 +4802,9 @@ OLD_PRICE = 1000 AuthorsPierre Krieger -

    Summary

    +

    Summary

    Improve the networking messages that query storage items from the remote, in order to reduce the bandwidth usage and number of round trips of light clients.

    -

    Motivation

    +

    Motivation

    Clients on the Polkadot peer-to-peer network can be divided into two categories: full nodes and light clients. So-called full nodes are nodes that store the content of the chain locally on their disk, while light clients are nodes that don't. In order to access for example the balance of an account, a full node can do a disk read, while a light client needs to send a network message to a full node and wait for the full node to reply with the desired value. This reply is in the form of a Merkle proof, which makes it possible for the light client to verify the exactness of the value.

    Unfortunately, this network protocol is suffering from some issues:

    Once Polkadot and Kusama will have transitioned to state_version = 1, which modifies the format of the trie entries, it will be possible to generate Merkle proofs that contain only the hashes of values in the storage. Thanks to this, it is already possible to prove the existence of a key without sending its entire value (only its hash), or to prove that a value has changed or not between two blocks (by sending just their hashes). Thus, the only reason why aforementioned issues exist is because the existing networking messages don't give the possibility for the querier to query this. This is what this proposal aims at fixing.

    -

    Stakeholders

    +

    Stakeholders

    This is the continuation of https://github.com/w3f/PPPs/pull/10, which itself is the continuation of https://github.com/w3f/PPPs/pull/5.

    -

    Explanation

    +

    Explanation

    The protobuf schema of the networking protocol can be found here: https://github.com/paritytech/substrate/blob/5b6519a7ff4a2d3cc424d78bc4830688f3b184c0/client/network/light/src/schema/light.v1.proto

    The proposal is to modify this protocol in this way:

    @@ -11,6 +11,7 @@ message Request {
    @@ -4987,26 +4874,26 @@ An alternative could have been to specify the child_trie_info for e
     Also note that child tries aren't considered as descendants of the main trie when it comes to the includeDescendants flag. In other words, if the request concerns the main trie, no content coming from child tries is ever sent back.

    This protocol keeps the same maximum response size limit as currently exists (16 MiB). It is not possible for the querier to know in advance whether its query will lead to a reply that exceeds the maximum size. If the reply is too large, the replier should send back only a limited number (but at least one) of requested items in the proof. The querier should then send additional requests for the rest of the items. A response containing none of the requested items is invalid.

    The server is allowed to silently discard some keys of the request if it judges that the number of requested keys is too high. This is in line with the fact that the server might truncate the response.

    -

    Drawbacks

    +

    Drawbacks

    This proposal doesn't handle one specific situation: what if a proof containing a single specific item would exceed the response size limit? For example, if the response size limit was 1 MiB, querying the runtime code (which is typically 1.0 to 1.5 MiB) would be impossible as it's impossible to generate a proof less than 1 MiB. The response size limit is currently 16 MiB, meaning that no single storage item must exceed 16 MiB.

    Unfortunately, because it's impossible to verify a Merkle proof before having received it entirely, parsing the proof in a streaming way is also not possible.

    A way to solve this issue would be to Merkle-ize large storage items, so that a proof could include only a portion of a large storage item. Since this would require a change to the trie format, it is not realistically feasible in a short time frame.

    -

    Testing, Security, and Privacy

    +

    Testing, Security, and Privacy

    The main security consideration concerns the size of replies and the resources necessary to generate them. It is for example easily possible to ask for all keys and values of the chain, which would take a very long time to generate. Since responses to this networking protocol have a maximum size, the replier should truncate proofs that would lead to the response being too large. Note that it is already possible to send a query that would lead to a very large reply with the existing network protocol. The only thing that this proposal changes is that it would make it less complicated to perform such an attack.

    Implementers of the replier side should be careful to detect early on when a reply would exceed the maximum reply size, rather than inconditionally generate a reply, as this could take a very large amount of CPU, disk I/O, and memory. Existing implementations might currently be accidentally protected from such an attack thanks to the fact that requests have a maximum size, and thus that the list of keys in the query was bounded. After this proposal, this accidental protection would no longer exist.

    Malicious server nodes might truncate Merkle proofs even when they don't strictly need to, and it is not possible for the client to (easily) detect this situation. However, malicious server nodes can already do undesirable things such as throttle down their upload bandwidth or simply not respond. There is no need to handle unnecessarily truncated Merkle proofs any differently than a server simply not answering the request.

    -

    Performance, Ergonomics, and Compatibility

    -

    Performance

    +

    Performance, Ergonomics, and Compatibility

    +

    Performance

    It is unclear to the author of the RFC what the performance implications are. Servers are supposed to have limits to the amount of resources they use to respond to requests, and as such the worst that can happen is that light client requests become a bit slower than they currently are.

    -

    Ergonomics

    +

    Ergonomics

    Irrelevant.

    -

    Compatibility

    +

    Compatibility

    The prior networking protocol is maintained for now. The older version of this protocol could get removed in a long time.

    -

    Prior Art and References

    +

    Prior Art and References

    None. This RFC is a clean-up of an existing mechanism.

    -

    Unresolved Questions

    +

    Unresolved Questions

    None

    - +

    The current networking protocol could be deprecated in a long time. Additionally, the current "state requests" protocol (used for warp syncing) could also be deprecated in favor of this one.

    (source)

    Table of Contents

    @@ -5027,13 +4914,13 @@ Also note that child tries aren't considered as descendants of the main trie whe AuthorsJonas Gehrlein -

    Summary

    +

    Summary

    The Polkadot UC will generate revenue from the sale of available Coretime. The question then arises: how should we handle these revenues? Broadly, there are two reasonable paths – burning the revenue and thereby removing it from total issuance or divert it to the Treasury. This Request for Comment (RFC) presents arguments favoring burning as the preferred mechanism for handling revenues from Coretime sales.

    -

    Motivation

    +

    Motivation

    How to handle the revenue accrued from Coretime sales is an important economic question that influences the value of DOT and should be properly discussed before deciding for either of the options. Now is the best time to start this discussion.

    -

    Stakeholders

    +

    Stakeholders

    Polkadot DOT token holders.

    -

    Explanation

    +

    Explanation

    This RFC discusses potential benefits of burning the revenue accrued from Coretime sales instead of diverting them to Treasury. Here are the following arguments for it.

    It's in the interest of the Polkadot community to have a consistent and predictable Treasury income, because volatility in the inflow can be damaging, especially in situations when it is insufficient. As such, this RFC operates under the presumption of a steady and sustainable Treasury income flow, which is crucial for the Polkadot community's stability. The assurance of a predictable Treasury income, as outlined in a prior discussion here, or through other equally effective measures, serves as a baseline assumption for this argument.

    Consequently, we need not concern ourselves with this particular issue here. This naturally begs the question - why should we introduce additional volatility to the Treasury by aligning it with the variable Coretime sales? It's worth noting that Coretime revenues often exhibit an inverse relationship with periods when Treasury spending should ideally be ramped up. During periods of low Coretime utilization (indicated by lower revenue), Treasury should spend more on projects and endeavours to increase the demand for Coretime. This pattern underscores that Coretime sales, by their very nature, are an inconsistent and unpredictable source of funding for the Treasury. Given the importance of maintaining a steady and predictable inflow, it's unnecessary to rely on another volatile mechanism. Some might argue that we could have both: a steady inflow (from inflation) and some added bonus from Coretime sales, but burning the revenue would offer further benefits as described below.

    @@ -5048,170 +4935,6 @@ Also note that child tries aren't considered as descendants of the main trie whe

    Collective Value Accrual: Following the previous argument, burning the revenue also generates some externality, because it reduces the overall issuance of DOT and thereby increases the value of each remaining token. In contrast to the aforementioned argument, this benefits all token holders collectively and equally. Therefore, I'd consider this as the preferrable option, because burns lets all token holders participate at Polkadot's success as Coretime usage increases.

    -

    (source)

    -

    Table of Contents

    - -

    RFC-0011: Add New Path to Account Creation on Asset Hubs

    -
    - - - -
    Start Date19 July 2023
    DescriptionProposal for a new secure means of creating an account on Asset Hub.
    AuthorsJoe Petrowski
    -
    -

    Summary

    -

    The Assets pallet includes a notion of asset "sufficiency". Sufficient assets, when transferred to -a non-existent account, will provide a sufficient reference that creates the account. That is, the -asset is sufficient to justify an account's existence, even in lieu of the existential deposit of -DOT.

    -

    While convenient for sufficient assets, the vast majority of assets are not sufficient. This RFC -proposes an opt-in means for users to create accounts from non-sufficient assets by swapping a -portion of the first transfer to acquire the existential deposit of DOT.

    -

    Motivation

    -

    The network can make an asset "sufficient" via governance call. However, the network is still -placing trust in the asset's administrator (which may be a third-party account or a protocol). The -asset's administrator could mint the asset and create many accounts without paying an adequate -storage deposit. For this reason, governance has been extremely strict in granting sufficiency, so -far only doing so to one asset (USDT).

    -

    With the introduction of the Asset Conversion pallet, the Asset Hub can offer a new path to account -creation. The current paths are:

    -
      -
    1. An account can have the existential deposit of DOT;
    2. -
    3. An account can have the minimum balance of a sufficient asset;
    4. -
    5. Someone else can create an account in the context of an asset class by placing a deposit in DOT. -This path is only available to the asset class's Admin or Freezer.
    6. -
    -

    This RFC proposes a fourth path that does not introduce prior steps for either the sender or -receiver of the asset.

    -

    Requirements

    -
      -
    • The system MUST be secure against economic attacks that allow an attacker to create a virtually -unlimited number of accounts.
    • -
    • The system SHOULD allow users to hold and transact in any asset without separately and priorly -acquiring DOT.
    • -
    -

    Stakeholders

    -
      -
    • Polkadot users
    • -
    • Wallet and UI/UX developers
    • -
    -

    Explanation

    -

    By using the Asset Conversion protocol, the system can convert any asset to DOT as long as there is -a path from that asset to DOT. As such, we can rely on the economic security provided by the -existential deposit of DOT by simply converting some amount of the asset being transferred to the -existential deposit.

    -

    This conversion only need happen when the account does not yet exist. When the destination account -does exist, the full amount of the asset can be transferred. This would mean that only the first -asset transfer to an account has some amount debited to acquire the DOT to create the account, but -subsequent transfers would always be in full.

    -

    The main benefit of this approach is that it removes the sender's need to know about the -desination's existence and the recipient's need to "prepare" an account by endowing it.

    -

    The primary tradeoff, of course, is that transactions like "send 10 USDT" could result in fewer -than 10 USDT arriving in the destination account. This can be solved by having the conversion be -opt-in for the sender.

    -

    Because the existential deposit is small (0.1 DOT on Asset Hub), and the user need not interact -with the DOT in any way -- because transaction fee payment can also be handled via Asset Conversion --- many users may find this path convenient in avoiding transfer errors due to non-existent -accounts or asset insufficiency.

    -

    Stripping out all other asset transfer-associated logic, this RFC proposes the following logic:

    -
    #![allow(unused)]
    -fn main() {
    -fn transfer(
    -    origin: OriginFor,
    -    asset: AssetId,
    -    destination: AccountId,
    -    amount: Balance,
    -    create_destination: bool,
    -    ..
    -) -> DispatchResult {
    -    let from = ensure_signed(origin)?;
    -    let details = Asset::<T, I>::get(&id).ok_or(Error::<T, I>::Unknown)?;
    -    if destination.exists() || !create_destination || details.sufficient {
    -        // Either the destination already exists (holds ED of DOT), the user does not want to create
    -        // the destination account, or the asset class is sufficient. We can just transfer the
    -        // asset as normal.
    -        Self::do_transfer(asset, from, destination, amount, ..)?;
    -    } else {
    -        // The destination does not exist and the user has opted in to create it via a swap.
    -        //
    -        // We will try to swap the asset provided for the existential deposit, depositing the ED in
    -        // the destination account. If the asset does not have an Asset Conversion pair with DOT or
    -        // the asset amount isn't enough to acquire the existential deposit, this will fail. But we
    -        // generally think (a) pairs will exist, and (b) the ED is small and UIs can easily verify
    -        // that this should succeed, so failures should be rare.
    -        //
    -        // The swap returns the amount of the asset consumed to acquire the ED.
    -        let consumed = Swap::swap_tokens_for_exact_tokens(
    -            from,                // sender
    -            vec![asset, dot],    // path, where `dot` is Multilocation {parents: 1, interior: Here}
    -            existential_deposit, // amount_out, we need the ED for the account
    -            destination,         // send_to
    -            ..
    -        )?;
    -        // We used some asset for the swap, so we have to subtract that from the amount.
    -        let remaining_asset_amount = amount.saturating_sub(consumed);
    -        // Now we transfer whatever amount is left, knowing that the destination account exists.
    -        // This could still fail if the remaining amount is less than the minimum balance required
    -        // by the asset class.
    -        Self::do_transfer(asset, from, destination, remaining_asset_amount, ..)?;
    -    }
    -}
    -}
    -

    Drawbacks

    -

    This solution would automatically convert some amount of another asset to DOT when acquiring DOT -was perhaps not the recipient's intent. However, this is opt-in.

    -

    Testing, Security, and Privacy

    -

    An attacker that wanted to bloat state by sending worthless assets to many new accounts would need -to put the DOT into an Asset Conversion pool with the asset (thereby making the asset not -worthless with respect to DOT). This would provide the same cost and economic security as just -sending the existential deposit of DOT to all the new accounts. This approach is no less secure -than the DOT-only existential deposit system.

    -

    This proposal introduces no privacy enhancements or reductions.

    -

    Performance, Ergonomics, and Compatibility

    -

    Performance

    -

    The function to transfer assets will need to charge a larger weight at dispatch to account for the -possibility of needing to perform a swap for DOT. It could return any unused weight.

    -

    The implementation could also include witness data as to the destination account's existence so -that the block builder can appropriately budget for the weight.

    -

    Ergonomics

    -

    This proposal would benefit the ergonomics of the system for end users by allowing all assets to -create destination accounts when needed.

    -

    Compatibility

    -

    This change would require changes to the Assets pallet to add the new account creation path.

    -

    Prior Art and References

    -

    Discussions with:

    -
      -
    • SR Labs auditors, in particular Jakob Lell and Louis Merlin
    • -
    • The monthly Asset Conversion ecosystem call, particular inspiration from Jakub Gregus
    • -
    -

    Unresolved Questions

    -

    None at this time.

    - -

    Not applicable.

    (source)

    Table of Contents

      @@ -5249,11 +4972,11 @@ create destination accounts when needed.

      AuthorsOliver Tale-Yazdi -

      Summary

      +

      Summary

      Introduces breaking changes to the BlockBuilder and Core runtime APIs.
      A new function BlockBuilder::last_inherent is introduced and the return value of Core::initialize_block is changed to an enum.
      The versions of both APIs are bumped; BlockBuilder to 7 and Core to 5.

      -

      Motivation

      +

      Motivation

      There are three main features that motivate for this RFC:

      1. Multi-Block-Migrations: These make it possible to split a migration over multiple blocks.
      2. @@ -5265,7 +4988,7 @@ The versions of both APIs are bumped; BlockBuilder to 7 and C
      3. The runtime can tell the block author to not include any transactions in the block.
      4. The runtime can execute logic right after all pallet-provided inherents have been applied.
      -

      Stakeholders

      +

      Stakeholders

      • Substrate Maintainers: They have to implement this, including tests, audit and maintenance burden.
      • @@ -5273,7 +4996,7 @@ maintenance burden.
      • Polkadot Parachain Teams: They also have to adapt to the breaking changes but then eventually have multi-block migrations available.
      -

      Explanation

      +

      Explanation

      Core::initialize_block

      This runtime API function is changed from returning () to ExtrinsicInclusionMode:

      #![allow(unused)]
      @@ -5293,26 +5016,26 @@ multi-block migrations available.
       

      1. Multi-Block-Migrations: The runtime is being put into lock-down mode for the duration of the migration process by returning OnlyInherents from initialize_block. This ensures that no user provided transaction can interfere with the migration process. It is absolutely necessary to ensure this, since otherwise a transaction could call into un-migrated storage and violate storage invariants. The entry-point for the MBM logic is last_inherent. This is a good spot, because any data that is touched in inherents, is not MBM-migratable anyway. It could also be done before all other inherents or at the end of the block in finalize_block, but there is no downside from doing it in last_inherent and the other two features are in favour of this.

      2. poll becomes possible by using last_inherent as entry-point. It would not be possible to use a pallet inherent like System::last_inherent to achieve this for two reasons. First is that pallets do not have access to AllPalletsWithSystem that is required to invoke the poll hook on all pallets. Second is that the runtime does currently not enforce an order of inherents.

      3. System::PostInherents can be done in the same manner as poll.

      -

      Drawbacks

      +

      Drawbacks

      As noted in the review comments: this cements some assumptions about the order of inherents into the BlockBuilder traits. It was criticized for being to rigid in its assumptions.

      -

      Testing, Security, and Privacy

      +

      Testing, Security, and Privacy

      Compliance of a block author can be tested by adding specific code to the last_inherent hook and checking that it always executes. The new logic of initialize_block can be tested by checking that the block-builder will skip transactions and optional hooks when OnlyInherents is returned.

      Security: n/a

      Privacy: n/a

      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      The performance overhead is minimal in the sense that no clutter was added after fulfilling the requirements. A slight performance penalty is expected from invoking last_inherent once per block.

      -

      Ergonomics

      +

      Ergonomics

      The new interface allows for more extensible runtime logic. In the future, this will be utilized for multi-block-migrations which should be a huge ergonomic advantage for parachain developers.

      -

      Compatibility

      +

      Compatibility

      The advice here is OPTIONAL and outside of the RFC. To not degrade user experience, it is recommended to ensure that an updated node can still import historic blocks.

      -

      Prior Art and References

      +

      Prior Art and References

      The RFC is currently being implemented in polkadot-sdk#1781. Related issues and merge requests:

      -

      Unresolved Questions

      +

      Unresolved Questions

      Please suggest a better name for BlockExecutiveMode. We already tried: RuntimeExecutiveMode, ExtrinsicInclusionMode. The names of the modes Normal and Minimal were also called AllExtrinsics and OnlyInherents, so if you have naming preferences; please post them.
      => renamed to ExtrinsicInclusionMode

      Is post_inherents more consistent instead of last_inherent? Then we should change it.
      => renamed to last_inherent

      - +

      The long-term future here is to move the block building logic into the runtime. Currently there is a tight dance between the block author and the runtime; the author has to call into different runtime functions in quick succession and exact order. Any misstep causes the built block to be invalid.
      This can be unified and simplified by moving both parts of the logic into the runtime.

      (source)

      @@ -5360,9 +5083,9 @@ This can be unified and simplified by moving both parts of the logic into the ru AuthorsJonas Gehrlein -

      Summary

      +

      Summary

      This document is a proposal for restructuring the bulk markets in the Polkadot UC's coretime allocation system to improve efficiency and fairness. The proposal suggests separating the BULK_PERIOD into MARKET_PERIOD and RENEWAL_PERIOD, allowing for a market-driven price discovery through a clearing price Dutch auction during the MARKET_PERIOD followed by renewal offers at the MARKET_PRICE during the RENEWAL_PERIOD. The new system ensures synchronicity between renewal and market prices, fairness among all current tenants, and efficient price discovery, while preserving price caps to provide security for current tenants. It seeks to start a discussion about the possibility of long-term leases.

      -

      Motivation

      +

      Motivation

      While the initial RFC-1 has provided a robust framework for Coretime allocation within the Polkadot UC, this proposal builds upon its strengths and uses many provided building blocks to address some areas that could be further improved.

      In particular, this proposal introduces the following changes:

        @@ -5382,14 +5105,14 @@ This can be unified and simplified by moving both parts of the logic into the ru

      The premise of this proposal is to reduce complexity by introducing a common price (that develops releative to capacity consumption of Polkadot UC), while still allowing for market forces to add efficiency. Longterm lease owners still receive priority IF they can pay (close to) the market price. This prevents a situation where the renewal price significantly diverges from renewal prices which allows for core captures. While maximum price increase certainty might seem contradictory to efficient price discovery, the proposed model aims to balance these elements, utilizing market forces to determine the price and allocate cores effectively within certain bounds. It must be stated, that potential price increases remain predictable (in the worst-case) but could be higher than in the originally proposed design. The argument remains, however, that we need to allow market forces to affect all prices for an efficient Coretime pricing and allocation.

      Ultimately, this the framework proposed here adheres to all requirements stated in RFC-1.

      -

      Stakeholders

      +

      Stakeholders

      Primary stakeholder sets are:

      • Protocol researchers and developers, largely represented by the Polkadot Fellowship and Parity Technologies' Engineering division.
      • Polkadot Parachain teams both present and future, and their users.
      • Polkadot DOT token holders.
      -

      Explanation

      +

      Explanation

      Bulk Markets

      The BULK_PERIOD has been restructured into two primary segments: the MARKET_PERIOD and RENEWAL_PERIOD, along with an auxiliary SETTLEMENT_PERIOD. This latter period doesn't necessitate any actions from the coretime system chain, but it facilitates a more efficient allocation of coretime in secondary markets. A significant departure from the original proposal lies in the timing of renewals, which now occur post-market phase. This adjustment aims to harmonize renewal prices with their market counterparts, ensuring a more consistent and equitable pricing model.

      Market Period (14 days)

      @@ -5436,12 +5159,12 @@ This can be unified and simplified by moving both parts of the logic into the ru
      • Long-term Coretime: The Polkadot UC is undergoing a transition from two-year leases without an instantaneous market to a model encompassing instantaneous and one-month leases. This shift seems to pivot from one extreme to another. While the introduction of short-term leases, both instantaneous and for one month, is a constructive move to lower barriers to entry and promote experimentation, it seems to be the case that established projects might benefit from more extended lease options. We could consider offering another product, such as a six-month Coretime lease, using the same mechanism described herein. Although the majority of leases would still be sold on a one-month basis, the addition of this option would enhance market efficiency as it would strengthen the impact of a secondary market.
      -

      Drawbacks

      +

      Drawbacks

      There are trade-offs that arise from this proposal, compared to the initial model. The most notable one is that here, I prioritize requirement 6 over requirement 2. The price, in the very "worst-case" (meaning a huge explosion in demand for coretime) could lead to a much larger increase of prices in Coretime. From an economic perspective, this (rare edgecase) would also mean that we'd vastly underprice Coretime in the original model, leading to highly inefficient allocations.

      -

      Prior Art and References

      +

      Prior Art and References

      This RFC builds extensively on the available ideas put forward in RFC-1.

      Additionally, I want to express a special thanks to Samuel Haefner and Shahar Dobzinski for fruitful discussions and helping me structure my thoughts.

      -

      Unresolved Questions

      +

      Unresolved Questions

      The technical feasability needs to be assessed.

      (source)

      Table of Contents

      @@ -5473,9 +5196,9 @@ This can be unified and simplified by moving both parts of the logic into the ru AuthorsChaosDAO -

      Summary

      +

      Summary

      This RFC proposes a change to the duration of the confirmation period for the treasurer track from 3 hours to at least 48 hours.

      -

      Motivation

      +

      Motivation

      Track parameters for Polkadot OpenGov should be configured in a way that their "difficulty" increases relative to the power associated with their respective origin. When we look at the confirmation periods for treasury based tracks, we can see that this is clearly the case - with the one notable exception to the trend being the treasurer track:

      @@ -5488,7 +5211,7 @@ This can be unified and simplified by moving both parts of the logic into the ru

      The confirmation period is one of the last lines of defence for the collective Polkadot stakeholders to react to a potentially bad referendum and vote NAY in order for its confirmation period to be aborted.

      Since the power / privilege level of the treasurer track is greater than that of the the big spender track – their confirmation period should be either equal, or the treasurer track's should be higher (note: currently the big spender track has a longer confirmation period than even the root track).

      -

      Stakeholders

      +

      Stakeholders

      The primary stakeholders of this RFC are:

      • DOT token holders – as this affects the protocol's treasury
      • @@ -5498,17 +5221,17 @@ This can be unified and simplified by moving both parts of the logic into the ru
      • Leemo - expressed interest to change this parameter
      • Paradox - expressed interest to change this parameter
      -

      Explanation

      +

      Explanation

      This RFC proposes to change the duration of the confirmation period for the treasurer track. In order to achieve that, the confirm_period parameter for the treasurer track in runtime/polkadot/src/governance/tracks.rs must be changed.

      Currently it is set to confirm_period: 3 * HOURS

      It should be changed to confirm_period: 48 * HOURS as a minimum.

      It may make sense for it to be changed to a value greater than 48 hours since the treasurer track has more power than the big spender track (48 hour confirmation period); however, the root track's confirmation period is 24 hours. 48 hours may be on the upper bounds of a trade-off between security and flexibility.

      -

      Drawbacks

      +

      Drawbacks

      The drawback of changing the treasurer track's confirmation period would be that the lifecycle of a referendum submitted on the treasurer track would ultimately be longer. However, the security of the protocol and its treasury should take priority here.

      -

      Testing, Security, and Privacy

      +

      Testing, Security, and Privacy

      This change will enhance / improve the security of the protocol as it relates to its treasury. The confirmation period is one of the last lines of defence for the collective Polkadot stakeholders to react to a potentially bad referendum and vote NAY in order for its confirmation period to be aborted. It makes sense for the treasurer track's confirmation period duration to be either equal to, or higher than, the big spender track confirmation period.

      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      This is a simple change (code wise) which should not affect the performance of the Polkadot protocol, outside of increasing the duration of the confirmation period on the treasurer track.

      Ergonomics & Compatibility

      If the proposal alters exposed interfaces to developers or end-users, which types of usage patterns have been optimized for?

      @@ -5518,14 +5241,14 @@ This can be unified and simplified by moving both parts of the logic into the ru
    • Polkassembly - directly uses on-chain data via rpc to fetch trackInfo so the change will be automatically reflected.
    • SubSquare - scan script will update their app to the latest parameters and it will be automatically reflected in their app.
    • -

      Prior Art and References

      +

      Prior Art and References

      N/A

      -

      Unresolved Questions

      +

      Unresolved Questions

      The proposed change to the confirmation period duration for the treasurer track is to set it to 48 hours. This is equal to the current confirmation period for the big spender track.

      Typically it seems that track parameters increase in difficulty (duration, etc.) based on the power level of their associated origin.

      The longest confirmation period is that of the big spender, at 48 hours. There may be value in discussing whether or not the treasurer track confirmation period should be longer than 48 hours – a discussion of the trade-offs between security vs flexibility/agility.

      As a side note, the root track confirmation period is 24 hours.

      - +

      This RFC hopefully reminds the greater Polkadot community that it is possible to submit changes to the parameters of Polkadot OpenGov, and the greater protocol as a whole through the RFC process.

      (source)

      Table of Contents

      @@ -5557,7 +5280,7 @@ This can be unified and simplified by moving both parts of the logic into the ru
      Track DescriptionConfirmation Period Duration
      Small Tipper10 Min
      ChaosDAO
      -

      Summary

      +

      Summary

      This RFC proposes to make modifications to voting power delegations as part of the Conviction Voting pallet. The changes being proposed include:

      1. Allow a Delegator to vote independently of their Delegate if they so desire.
      2. @@ -5565,7 +5288,7 @@ This can be unified and simplified by moving both parts of the logic into the ru
      3. Make a change so that when a delegate votes abstain their delegated votes also vote abstain.
      4. Allow a Delegator to delegate/ undelegate their votes for all tracks with a single call.
      -

      Motivation

      +

      Motivation

      It has become clear since the launch of OpenGov that there are a few common tropes which pop up time and time again:

      1. The frequency of referenda is often too high for network participants to have sufficient time to review, comprehend, and ultimately vote on each individual referendum. This means that these network participants end up being inactive in on-chain governance.
      2. @@ -5573,13 +5296,13 @@ This can be unified and simplified by moving both parts of the logic into the ru
      3. Delegating votes for all tracks currently requires long batched calls which result in high fees for the Delegator - resulting in a reluctance from many to delegate their votes.

      We believe (based on feedback from token holders with a larger stake in the network) that if there were some changes made to delegation mechanics, these larger stake holders would be more likely to delegate their voting power to active network participants – thus greatly increasing the support turnout.

      -

      Stakeholders

      +

      Stakeholders

      The primary stakeholders of this RFC are:

      • The Polkadot Technical Fellowship who will have to research and implement the technical aspects of this RFC
      • DOT token holders in general
      -

      Explanation

      +

      Explanation

      This RFC proposes to make 4 changes to the convictionVoting pallet logic in order to improve the user experience of those delegating their voting power to another account.

      1. @@ -5595,21 +5318,21 @@ This can be unified and simplified by moving both parts of the logic into the ru

        Allow a Delegator to delegate/ undelegate their votes for all tracks with a single call - in order to delegate votes across all tracks, a user must batch 15 calls - resulting in high costs for delegation. A single call for delegate_all/ undelegate_all would reduce the complexity and therefore costs of delegations considerably for prospective Delegators.

      -

      Drawbacks

      +

      Drawbacks

      We do not foresee any drawbacks by implementing these changes. If anything we believe that this should help to increase overall voter turnout (via the means of delegation) which we see as a net positive.

      -

      Testing, Security, and Privacy

      +

      Testing, Security, and Privacy

      We feel that the Polkadot Technical Fellowship would be the most competent collective to identify the testing requirements for the ideas presented in this RFC.

      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      This change may add extra chain storage requirements on Polkadot, especially with respect to nested delegations.

      Ergonomics & Compatibility

      The change to add nested delegations may affect governance interfaces such as Nova Wallet who will have to apply changes to their indexers to support nested delegations. It may also affect the Polkadot Delegation Dashboard as well as Polkassembly & SubSquare.

      We want to highlight the importance for ecosystem builders to create a mechanism for indexers and wallets to be able to understand that changes have occurred such as increasing the pallet version, etc.

      -

      Prior Art and References

      +

      Prior Art and References

      N/A

      -

      Unresolved Questions

      +

      Unresolved Questions

      N/A

      - +

      Additionally we would like to re-open the conversation about the potential for there to be free delegations. This was discussed by Dr Gavin Wood at Sub0 2022 and we feel like this would go a great way towards increasing the amount of network participants that are delegating: https://youtu.be/hSoSA6laK3Q?t=526

      Overall, we strongly feel that delegations are a great way to increase voter turnout, and the ideas presented in this RFC would hopefully help in that aspect.

      (source)

      @@ -5639,9 +5362,9 @@ This can be unified and simplified by moving both parts of the logic into the ru AuthorsSebastian Kunert -

      Summary

      +

      Summary

      This RFC proposes a new host function for parachains, storage_proof_size. It shall provide the size of the currently recorded storage proof to the runtime. Runtime authors can use the proof size to improve block utilization by retroactively reclaiming unused storage weight.

      -

      Motivation

      +

      Motivation

      The number of extrinsics that are included in a parachain block is limited by two constraints: execution time and proof size. FRAME weights cover both concepts, and block-builders use them to decide how many extrinsics to include in a block. However, these weights are calculated ahead of time by benchmarking on a machine with reference hardware. The execution-time properties of the state-trie and its storage items are unknown at benchmarking time. Therefore, we make some assumptions about the state-trie:

      • Trie Depth: We assume a trie depth to account for intermediary nodes.
      • @@ -5650,12 +5373,12 @@ This can be unified and simplified by moving both parts of the logic into the ru

        These pessimistic assumptions lead to an overestimation of storage weight, negatively impacting block utilization on parachains.

        In addition, the current model does not account for multiple accesses to the same storage items. While these repetitive accesses will not increase storage-proof size, the runtime-side weight monitoring will account for them multiple times. Since the proof size is completely opaque to the runtime, we can not implement retroactive storage weight correction.

        A solution must provide a way for the runtime to track the exact storage-proof size consumed on a per-extrinsic basis.

        -

        Stakeholders

        +

        Stakeholders

        • Parachain Teams: They MUST include this host function in their runtime and node.
        • Light-client Implementors: They SHOULD include this host function in their runtime and node.
        -

        Explanation

        +

        Explanation

        This RFC proposes a new host function that exposes the storage-proof size to the runtime. As a result, runtimes can implement storage weight reclaiming mechanisms that improve block utilization.

        This RFC proposes the following host function signature:

        #![allow(unused)]
        @@ -5663,14 +5386,14 @@ This can be unified and simplified by moving both parts of the logic into the ru
         fn ext_storage_proof_size_version_1() -> u64;
         }

        The host function MUST return an unsigned 64-bit integer value representing the current proof size. In block-execution and block-import contexts, this function MUST return the current size of the proof. To achieve this, parachain node implementors need to enable proof recording for block imports. In other contexts, this function MUST return 18446744073709551615 (u64::MAX), which represents disabled proof recording.

        -

        Performance, Ergonomics, and Compatibility

        -

        Performance

        +

        Performance, Ergonomics, and Compatibility

        +

        Performance

        Parachain nodes need to enable proof recording during block import to correctly implement the proposed host function. Benchmarking conducted with balance transfers has shown a performance reduction of around 0.6% when proof recording is enabled.

        -

        Ergonomics

        +

        Ergonomics

        The host function proposed in this RFC allows parachain runtime developers to keep track of the proof size. Typical usage patterns would be to keep track of the overall proof size or the difference between subsequent calls to the host function.

        -

        Compatibility

        +

        Compatibility

        Parachain teams will need to include this host function to upgrade.

        -

        Prior Art and References

        +

        Prior Art and References

        • Pull Request including proposed host function: PoV Reclaim (Clawback) Node Side.
        • Issue with discussion: [FRAME core] Clawback PoV Weights For Dispatchables
        • @@ -5706,24 +5429,24 @@ This can be unified and simplified by moving both parts of the logic into the ru AuthorsBastian Köcher -

          Summary

          +

          Summary

          When rotating/generating the SessionKeys of a node, the node calls into the runtime using the SessionKeys::generate_session_keys runtime api. This runtime api function needs to be changed to add an extra parameter owner and to change the return value to also include the proof of ownership. The owner should be the account id of the account setting the SessionKeys on chain to allow the on chain logic the verification of the proof. The on chain logic is then able to proof the possession of the private keys of the SessionKeys using the proof.

          -

          Motivation

          +

          Motivation

          When a user sets new SessionKeys on chain the chain can currently not ensure that the user actually has control over the private keys of the SessionKeys. With the RFC applied the chain is able to ensure that the user actually is in possession of the private keys.

          -

          Stakeholders

          +

          Stakeholders

          • Polkadot runtime implementors
          • Polkadot node implementors
          • Validator operators
          -

          Explanation

          +

          Explanation

          We are first going to explain the proof format being used:

          #![allow(unused)]
           fn main() {
          @@ -5756,26 +5479,26 @@ actual exported function signature looks like:

          already gets the proof passed as Vec<u8>. This proof needs to be decoded to the actual Proof type as explained above. The proof and the SCALE encoded account_id of the sender are used to verify the ownership of the SessionKeys.

          -

          Drawbacks

          +

          Drawbacks

          Validator operators need to pass the their account id when rotating their session keys in a node. This will require updating some high level docs and making users familiar with the slightly changed ergonomics.

          -

          Testing, Security, and Privacy

          +

          Testing, Security, and Privacy

          Testing of the new changes is quite easy as it only requires passing an appropriate owner for the current testing context. The changes to the proof generation and verification got audited to ensure they are correct.

          -

          Performance, Ergonomics, and Compatibility

          -

          Performance

          +

          Performance, Ergonomics, and Compatibility

          +

          Performance

          Does not have any impact on the overall performance, only setting SessionKeys will require more weight.

          -

          Ergonomics

          +

          Ergonomics

          If the proposal alters exposed interfaces to developers or end-users, which types of usage patterns have been optimized for?

          -

          Compatibility

          +

          Compatibility

          Introduces a new version of the SessionKeys runtime api. Thus, nodes should be updated before a runtime is enacted that contains these changes otherwise they will fail to generate session keys.

          -

          Prior Art and References

          +

          Prior Art and References

          None.

          -

          Unresolved Questions

          +

          Unresolved Questions

          None.

          - +

          Substrate implementation of the RFC.

          (source)

          Table of Contents

          @@ -5808,16 +5531,16 @@ a runtime is enacted that contains these changes otherwise they will fail to gen AuthorsPierre Krieger -

          Summary

          +

          Summary

          Rather than enforce a limit to the total memory consumption on the client side by loading the value at :heappages, enforce that limit on the runtime side.

          -

          Motivation

          +

          Motivation

          From the early days of Substrate up until recently, the runtime was present in two forms: the wasm runtime (wasm bytecode passed through an interpreter) and the native runtime (native code directly run by the client).

          Since the wasm runtime has a lower amount of available memory (4 GiB maximum) compared to the native runtime, and in order to ensure sure that the wasm and native runtimes always produce the same outcome, it was necessary to clamp the amount of memory available to both runtimes to the same value.

          In order to achieve this, a special storage key (a "well-known" key) :heappages was introduced and represents the number of "wasm pages" (one page equals 64kiB) of memory that are available to the memory allocator of the runtimes. If this storage key is absent, it defaults to 2048, which is 128 MiB.

          The native runtime has since then been disappeared, but the concept of "heap pages" still exists. This RFC proposes a simplification to the design of Polkadot by removing the concept of "heap pages" as is currently known, and proposes alternative ways to achieve the goal of limiting the amount of memory available.

          -

          Stakeholders

          +

          Stakeholders

          Client implementers and low-level runtime developers.

          -

          Explanation

          +

          Explanation

          This RFC proposes the following changes to the client:

          • The client no longer considers :heappages as special.
          • @@ -5843,28 +5566,141 @@ a runtime is enacted that contains these changes otherwise they will fail to gen

          Each parachain can choose the option that they prefer, but the author of this RFC strongly suggests either option C or B.

          -

          Drawbacks

          +

          Drawbacks

          In case of path A, there is one situation where the behaviour pre-RFC is not equivalent to the one post-RFC: when a host function that performs an allocation (for example ext_storage_get) is called, without this RFC this allocation might fail due to reaching the maximum heap pages, while after this RFC this will always succeed. This is most likely not a problem, as storage values aren't supposed to be larger than a few megabytes at the very maximum.

          In the unfortunate event where the runtime runs out of memory, path B would make it more difficult to relax the memory limit, as we would need to re-upload the entire Wasm, compared to updating only :heappages in path A or before this RFC. In the case where the runtime runs out of memory only in the specific event where the Wasm runtime is modified, this could brick the chain. However, this situation is no different than the thousands of other ways that a bug in the runtime can brick a chain, and there's no reason to be particularily worried about this situation in particular.

          -

          Testing, Security, and Privacy

          +

          Testing, Security, and Privacy

          This RFC would reduce the chance of a consensus issue between clients. The :heappages are a rather obscure feature, and it is not clear what happens in some corner cases such as the value being too large (error? clamp?) or malformed. This RFC would completely erase these questions.

          -

          Performance, Ergonomics, and Compatibility

          -

          Performance

          +

          Performance, Ergonomics, and Compatibility

          +

          Performance

          In case of path A, it is unclear how performances would be affected. Path A consists in moving client-side operations to the runtime without changing these operations, and as such performance differences are expected to be minimal. Overall, we're talking about one addition/subtraction per malloc and per free, so this is more than likely completely negligible.

          In case of path B and C, the performance gain would be a net positive, as this RFC strictly removes things.

          -

          Ergonomics

          +

          Ergonomics

          This RFC would isolate the client and runtime more from each other, making it a bit easier to reason about the client or the runtime in isolation.

          -

          Compatibility

          +

          Compatibility

          Not a breaking change. The runtime-side changes can be applied immediately (without even having to wait for changes in the client), then as soon as the runtime is updated, the client can be updated without any transition period. One can even consider updating the client before the runtime, as it corresponds to path C.

          -

          Prior Art and References

          +

          Prior Art and References

          None.

          -

          Unresolved Questions

          +

          Unresolved Questions

          None.

          - +

          This RFC follows the same path as https://github.com/polkadot-fellows/RFCs/pull/4 by scoping everything related to memory allocations to the runtime.

          +

          (source)

          +

          Table of Contents

          + +

          RFC-0059: Add a discovery mechanism for nodes based on their capabilities

          +
          + + + +
          Start Date2023-12-18
          DescriptionNodes having certain capabilities register themselves in the DHT to be discoverable
          AuthorsPierre Krieger
          +
          +

          Summary

          +

          This RFC proposes to make the mechanism of RFC #8 more generic by introducing the concept of "capabilities".

          +

          Implementations can implement certain "capabilities", such as serving old block headers or being a parachain bootnode.

          +

          The discovery mechanism of RFC #8 is extended to be able to discover nodes of specific capabilities.

          +

          Motivation

          +

          The Polkadot peer-to-peer network is made of nodes. Not all these nodes are equal. Some nodes store only the headers of recently blocks, some nodes store all the block headers and bodies since the genesis, some nodes store the storage of all blocks since the genesis, and so on.

          +

          It is currently not possible to know ahead of time (without connecting to it and asking) which nodes have which data available, and it is not easily possible to build a list of nodes that have a specific piece of data available.

          +

          If you want to download for example the header of block 500, you have to connect to a randomly-chosen node, ask it for block 500, and if it says that it doesn't have the block, disconnect and try another randomly-chosen node. +In certain situations such as downloading the storage of old blocks, nodes that have the information are relatively rare, and finding through trial and error a node that has the data can take a long time.

          +

          This RFC attempts to solve this problem by giving the possibility to build a list of nodes that are capable of serving specific data.

          +

          Stakeholders

          +

          Low-level client developers. +People interested in accessing the archive of the chain.

          +

          Explanation

          +

          Reading RFC #8 first might help with comprehension, as this RFC is very similar.

          +

          Please keep in mind while reading that everything below applies for both relay chains and parachains, except mentioned otherwise.

          +

          Capabilities

          +

          This RFC defines a list of so-called capabilities:

          +
            +
          • Head of chain provider. An implementation with this capability must be able to serve to other nodes block headers, block bodies, justifications, calls proofs, and storage proofs of "recent" (see below) blocks, and, for relay chains, to serve to other nodes warp sync proofs where the starting block is a session change block and must participate in Grandpa and Beefy gossip.
          • +
          • History provider. An implementation with this capability must be able to serve to other nodes block headers and block bodies of any block since the genesis, and must be able to serve to other nodes justifications of any session change block since the genesis up until and including their currently finalized block.
          • +
          • Archive provider. This capability is a superset of History provider. In addition to the requirements of History provider, an implementation with this capability must be able to serve call proofs and storage proof requests of any block since the genesis up until and including their currently finalized block.
          • +
          • Parachain bootnode (only for relay chains). An implementation with this capability must be able to serve the network request described in RFC 8.
          • +
          +

          In the context of the head of chain provider, the word "recent" means: any not-finalized-yet block that is equal to or an ancestor of a block that it has announced through a block announce, and any finalized block whose height is superior to its current finalized block minus 16. +This does not include blocks that have been pruned because they're not a descendant of its current finalized block. In other words, blocks that aren't a descendant of the current finalized block can be thrown away. +A gap of blocks is required due to race conditions: when a node finalizes a block, it takes some time for its peers to be made aware of this, during which they might send requests concerning older blocks. The exact gap is arbitrary.

          +

          Substrate is currently by default a head of chain provider provider. After it has finished warp syncing, it downloads the list of old blocks, after which it becomes a history provider. +If Substrate is instead configured as an archive node, then it downloads the state of all blocks since the genesis, after which it becomes an archive provider, history provider, and head of chain provider. +If blocks pruning is enabled and the chain is a relay chain, then Substrate unfortunately doesn't implement any of these capabilities, not even head of chain provider. This is considered as a bug that should be fixed, see https://github.com/paritytech/polkadot-sdk/issues/2733.

          +

          DHT provider registration

          +

          This RFC heavily relies on the functionalities of the Kademlia DHT already in use by Polkadot. You can find a link to the specification here.

          +

          Implementations that have the history provider capability should register themselves as providers under the key sha256(concat("history", randomness)).

          +

          Implementations that have the archive provider capability should register themselves as providers under the key sha256(concat("archive", randomness)).

          +

          Implementations that have the parachain bootnode capability should register themselves as provider under the key sha256(concat(scale_compact(para_id), randomness)), as described in RFC 8.

          +

          "Register themselves as providers" consists in sending ADD_PROVIDER requests to nodes close to the key, as described in the Content provider advertisement section of the specification.

          +

          The value of randomness can be found in the randomness field when calling the BabeApi_currentEpoch function.

          +

          In order to avoid downtimes when the key changes, nodes should also register themselves as a secondary key that uses a value of randomness equal to the randomness field when calling BabeApi_nextEpoch.

          +

          Implementers should be aware that their implementation of Kademlia might already hash the key before XOR'ing it. The key is not meant to be hashed twice.

          +

          Implementations must not register themselves if they don't fulfill the capability yet. For example, a node configured to be an archive node but that is still building its archive state in the background must register itself only after it has finished building its archive.

          +

          Secondary DHTs

          +

          Implementations that have the history provider capability must also participate in a secondary DHT that comprises only of nodes with that capability. The protocol name of that secondary DHT must be /<genesis-hash>/kad/history.

          +

          Similarly, implementations that have the archive provider capability must also participate in a secondary DHT that comprises only of nodes with that capability and whose protocol name is /<genesis-hash>/kad/archive.

          +

          Just like implementations must not register themselves if they don't fulfill their capability yet, they must also not participate in the secondary DHT if they don't fulfill their capability yet.

          +

          Head of the chain providers

          +

          Implementations that have the head of the chain provider capability do not register themselves as providers, but instead are the nodes that participate in the main DHT. In other words, they are the nodes that serve requests of the /<genesis_hash>/kad protocol.

          +

          Any implementation that isn't a head of the chain provider (read: light clients) must not participate in the main DHT. This is already presently the case.

          +

          Implementations must not participate in the main DHT if they don't fulfill the capability yet. For example, a node that is still in the process of warp syncing must not participate in the main DHT. However, assuming that warp syncing doesn't last more than a few seconds, it is acceptable to ignore this requirement in order to avoid complicating implementations too much.

          +

          Drawbacks

          +

          None that I can see.

          +

          Testing, Security, and Privacy

          +

          The content of this section is basically the same as the one in RFC 8.

          +

          This mechanism doesn't add or remove any security by itself, as it relies on existing mechanisms.

          +

          Due to the way Kademlia works, it would become the responsibility of the 20 Polkadot nodes whose sha256(peer_id) is closest to the key (described in the explanations section) to store the list of nodes that have specific capabilities. +Furthermore, when a large number of providers are registered, only the providers closest to the key are kept, up to a certain implementation-defined limit.

          +

          For this reason, an attacker can abuse this mechanism by randomly generating libp2p PeerIds until they find the 20 entries closest to the key representing the target capability. They are then in control of the list of nodes with that capability. While doing this can in no way be actually harmful, it could lead to eclipse attacks.

          +

          Because the key changes periodically and isn't predictable, and assuming that the Polkadot DHT is sufficiently large, it is not realistic for an attack like this to be maintained in the long term.

          +

          Performance, Ergonomics, and Compatibility

          +

          Performance

          +

          The DHT mechanism generally has a low overhead, especially given that publishing providers is done only every 24 hours.

          +

          Doing a Kademlia iterative query then sending a provider record shouldn't take more than around 50 kiB in total of bandwidth for the parachain bootnode.

          +

          Assuming 1000 nodes with a specific capability, the 20 Polkadot full nodes corresponding to that capability will each receive a sudden spike of a few megabytes of networking traffic when the key rotates. Again, this is relatively negligible. If this becomes a problem, one can add a random delay before a node registers itself to be the provider of the key corresponding to BabeApi_next_epoch.

          +

          Maybe the biggest uncertainty is the traffic that the 20 Polkadot full nodes will receive from light clients that desire knowing the nodes with a capability. If this every becomes a problem, this value of 20 is an arbitrary constant that can be increased for more redundancy.

          +

          Ergonomics

          +

          Irrelevant.

          +

          Compatibility

          +

          Irrelevant.

          +

          Prior Art and References

          +

          Unknown.

          +

          Unresolved Questions

          +

          While it fundamentally doesn't change much to this RFC, using BabeApi_currentEpoch and BabeApi_nextEpoch might be inappropriate. I'm not familiar enough with good practices within the runtime to have an opinion here. Should it be an entirely new pallet?

          + +

          This RFC would make it possible to reliably discover archive nodes, which would make it possible to reliably send archive node requests, something that isn't currently possible. This could solve the problem of finding archive RPC node providers by migrating archive-related request to using the native peer-to-peer protocol rather than JSON-RPC.

          +

          If we ever decide to break backwards compatibility, we could divide the "history" and "archive" capabilities in two, between nodes capable of serving older blocks and nodes capable of serving newer blocks. +We could even add to the peer-to-peer network nodes that are only capable of serving older blocks (by reading from a database) but do not participate in the head of the chain, and that just exist for historical purposes.

          diff --git a/proposed/000x-lowering-deposits-assethub.html b/proposed/000x-lowering-deposits-assethub.html index dba3c83..1e492fc 100644 --- a/proposed/000x-lowering-deposits-assethub.html +++ b/proposed/000x-lowering-deposits-assethub.html @@ -90,7 +90,7 @@ diff --git a/proposed/0026-sassafras-consensus.html b/proposed/0026-sassafras-consensus.html index 5b5816d..ea77ed3 100644 --- a/proposed/0026-sassafras-consensus.html +++ b/proposed/0026-sassafras-consensus.html @@ -90,7 +90,7 @@ diff --git a/proposed/0034-xcm-absolute-location-account-derivation.html b/proposed/0034-xcm-absolute-location-account-derivation.html index 9c31186..256d595 100644 --- a/proposed/0034-xcm-absolute-location-account-derivation.html +++ b/proposed/0034-xcm-absolute-location-account-derivation.html @@ -90,7 +90,7 @@ diff --git a/proposed/0042-extrinsics-state-version.html b/proposed/0042-extrinsics-state-version.html index 42a5c0e..ef329fb 100644 --- a/proposed/0042-extrinsics-state-version.html +++ b/proposed/0042-extrinsics-state-version.html @@ -90,7 +90,7 @@ diff --git a/proposed/0044-rent-based-registration.html b/proposed/0044-rent-based-registration.html index 85c1eba..a0bd21a 100644 --- a/proposed/0044-rent-based-registration.html +++ b/proposed/0044-rent-based-registration.html @@ -90,7 +90,7 @@ diff --git a/proposed/0046-metadata-for-offline-signers.html b/proposed/0046-metadata-for-offline-signers.html index aeaf11e..3eecb77 100644 --- a/proposed/0046-metadata-for-offline-signers.html +++ b/proposed/0046-metadata-for-offline-signers.html @@ -90,7 +90,7 @@ diff --git a/proposed/0047-assignment-of-availability-chunks.html b/proposed/0047-assignment-of-availability-chunks.html index 36dbbdf..b8fa279 100644 --- a/proposed/0047-assignment-of-availability-chunks.html +++ b/proposed/0047-assignment-of-availability-chunks.html @@ -90,7 +90,7 @@ @@ -467,7 +467,7 @@ dispute scenarios.

          - @@ -481,7 +481,7 @@ dispute scenarios.

          - diff --git a/proposed/0061-allocator-inside-of-runtime.html b/proposed/0061-allocator-inside-of-runtime.html index 6e9bb07..d422afc 100644 --- a/proposed/0061-allocator-inside-of-runtime.html +++ b/proposed/0061-allocator-inside-of-runtime.html @@ -90,7 +90,7 @@ @@ -294,7 +294,7 @@ For the first year, we SHALL disable the v1 by default, and enable it by default