From c4d3846ce20f99cb621ea21fe2a500aa1694fc99 Mon Sep 17 00:00:00 2001 From: "paritytech-rfc-bot[bot]" Date: Tue, 15 Oct 2024 13:19:17 +0000 Subject: [PATCH] deploy: d39ca4b893db86d1a20980721317aedb21f62863 --- 404.html | 2 +- approved/0001-agile-coretime.html | 2 +- approved/0005-coretime-interface.html | 2 +- approved/0007-system-collator-selection.html | 2 +- approved/0008-parachain-bootnodes-dht.html | 2 +- approved/0010-burn-coretime-revenue.html | 2 +- ...12-process-for-adding-new-collectives.html | 2 +- ...uilder-and-core-runtime-apis-for-mbms.html | 2 +- ...rove-locking-mechanism-for-parachains.html | 2 +- approved/0022-adopt-encointer-runtime.html | 2 +- approved/0026-sassafras-consensus.html | 2 +- approved/0032-minimal-relay.html | 2 +- approved/0042-extrinsics-state-version.html | 2 +- .../0043-storage-proof-size-hostfunction.html | 2 +- approved/0045-nft-deposits-asset-hub.html | 2 +- ...047-assignment-of-availability-chunks.html | 2 +- approved/0048-session-keys-runtime-api.html | 2 +- approved/0050-fellowship-salaries.html | 2 +- ...0056-one-transaction-per-notification.html | 2 +- .../0059-nodes-capabilities-discovery.html | 2 +- approved/0078-merkleized-metadata.html | 2 +- ...-general-transaction-extrinsic-format.html | 2 +- approved/0091-dht-record-creation-time.html | 2 +- approved/0097-unbonding_queue.html | 2 +- .../0099-transaction-extension-version.html | 2 +- .../0100-xcm-multi-type-asset-transfer.html | 2 +- ...-xcm-transact-remove-max-weight-param.html | 2 +- .../0103-introduce-core-index-commitment.html | 2 +- approved/0105-xcm-improved-fee-mechanism.html | 2 +- approved/0107-xcm-execution-hints.html | 2 +- approved/0108-xcm-remove-testnet-ids.html | 6 +- .../0122-alias-origin-on-asset-transfers.html | 12 +- index.html | 2 +- introduction.html | 2 +- ...storage-location-for-runtime-upgrades.html | 2 +- print.html | 1057 +++++++++-------- proposed/0000-rewards.html | 2 +- ...2-offchain-parachain-runtime-upgrades.html | 2 +- proposed/0106-xcm-remove-fees-mode.html | 2 +- proposed/0111-pure-proxy-replication.html | 2 +- ...-state-response-message-in-state-sync.html | 6 +- .../0117-unbrick-collective.html | 49 +- ...enda-confirmation-by-candle-mechanism.html | 6 +- proposed/0121-iterable-referenda-tracks.html | 6 +- ...ust Tipper Track Confirmation Periods.html | 6 +- searchindex.js | 2 +- searchindex.json | 2 +- ...04-remove-unnecessary-allocator-usage.html | 6 +- ...namic-pricing-for-bulk-coretime-sales.html | 2 +- ...09-improved-net-light-client-requests.html | 2 +- stale/0015-market-design-revisit.html | 2 +- ...-absolute-location-account-derivation.html | 2 +- ...ction-voting-delegation-modifications.html | 2 +- stale/0044-rent-based-registration.html | 2 +- stale/0054-remove-heap-pages.html | 2 +- stale/0070-x-track-kusamanetwork.html | 2 +- stale/0073-referedum-deposit-track.html | 2 +- stale/0074-stateful-multisig-pallet.html | 2 +- ...gth-of-identity-pgp-fingerprint-value.html | 2 +- ...t-purchaser-reputation-reserved-cores.html | 2 +- stale/0089-flexible-inflation.html | 2 +- ...0xx-secondary-marketplace-for-regions.html | 2 +- .../00xx-smart-contracts-coretime-chain.html | 2 +- stale/0114-secp256r1-hostfunction.html | 6 +- stale/TODO-stale-nomination-reward-curve.html | 6 +- 65 files changed, 648 insertions(+), 626 deletions(-) rename {proposed => approved}/0122-alias-origin-on-asset-transfers.html (73%) rename {stale => proposed}/0117-unbrick-collective.html (65%) diff --git a/404.html b/404.html index c332801..0ab0bce 100644 --- a/404.html +++ b/404.html @@ -91,7 +91,7 @@ diff --git a/approved/0001-agile-coretime.html b/approved/0001-agile-coretime.html index 204b4b8..b282465 100644 --- a/approved/0001-agile-coretime.html +++ b/approved/0001-agile-coretime.html @@ -90,7 +90,7 @@ diff --git a/approved/0005-coretime-interface.html b/approved/0005-coretime-interface.html index 170f95d..050d693 100644 --- a/approved/0005-coretime-interface.html +++ b/approved/0005-coretime-interface.html @@ -90,7 +90,7 @@ diff --git a/approved/0007-system-collator-selection.html b/approved/0007-system-collator-selection.html index 31762d2..2d319a5 100644 --- a/approved/0007-system-collator-selection.html +++ b/approved/0007-system-collator-selection.html @@ -90,7 +90,7 @@ diff --git a/approved/0008-parachain-bootnodes-dht.html b/approved/0008-parachain-bootnodes-dht.html index 0022996..d3b9238 100644 --- a/approved/0008-parachain-bootnodes-dht.html +++ b/approved/0008-parachain-bootnodes-dht.html @@ -90,7 +90,7 @@ diff --git a/approved/0010-burn-coretime-revenue.html b/approved/0010-burn-coretime-revenue.html index d7a36cb..3482efd 100644 --- a/approved/0010-burn-coretime-revenue.html +++ b/approved/0010-burn-coretime-revenue.html @@ -90,7 +90,7 @@ diff --git a/approved/0012-process-for-adding-new-collectives.html b/approved/0012-process-for-adding-new-collectives.html index 4773d12..c1d0698 100644 --- a/approved/0012-process-for-adding-new-collectives.html +++ b/approved/0012-process-for-adding-new-collectives.html @@ -90,7 +90,7 @@ diff --git a/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html b/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html index 0c8d387..155fa59 100644 --- a/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html +++ b/approved/0013-prepare-blockbuilder-and-core-runtime-apis-for-mbms.html @@ -90,7 +90,7 @@ diff --git a/approved/0014-improve-locking-mechanism-for-parachains.html b/approved/0014-improve-locking-mechanism-for-parachains.html index 140d315..ebd5b1b 100644 --- a/approved/0014-improve-locking-mechanism-for-parachains.html +++ b/approved/0014-improve-locking-mechanism-for-parachains.html @@ -90,7 +90,7 @@ diff --git a/approved/0022-adopt-encointer-runtime.html b/approved/0022-adopt-encointer-runtime.html index 8ce7e08..09609ec 100644 --- a/approved/0022-adopt-encointer-runtime.html +++ b/approved/0022-adopt-encointer-runtime.html @@ -90,7 +90,7 @@ diff --git a/approved/0026-sassafras-consensus.html b/approved/0026-sassafras-consensus.html index 2eccfc7..62d36a3 100644 --- a/approved/0026-sassafras-consensus.html +++ b/approved/0026-sassafras-consensus.html @@ -90,7 +90,7 @@ diff --git a/approved/0032-minimal-relay.html b/approved/0032-minimal-relay.html index e534b39..7ec8333 100644 --- a/approved/0032-minimal-relay.html +++ b/approved/0032-minimal-relay.html @@ -90,7 +90,7 @@ diff --git a/approved/0042-extrinsics-state-version.html b/approved/0042-extrinsics-state-version.html index 26b684d..2018742 100644 --- a/approved/0042-extrinsics-state-version.html +++ b/approved/0042-extrinsics-state-version.html @@ -90,7 +90,7 @@ diff --git a/approved/0043-storage-proof-size-hostfunction.html b/approved/0043-storage-proof-size-hostfunction.html index afc1322..aaf5c42 100644 --- a/approved/0043-storage-proof-size-hostfunction.html +++ b/approved/0043-storage-proof-size-hostfunction.html @@ -90,7 +90,7 @@ diff --git a/approved/0045-nft-deposits-asset-hub.html b/approved/0045-nft-deposits-asset-hub.html index 16eeca4..4e62e1f 100644 --- a/approved/0045-nft-deposits-asset-hub.html +++ b/approved/0045-nft-deposits-asset-hub.html @@ -90,7 +90,7 @@ diff --git a/approved/0047-assignment-of-availability-chunks.html b/approved/0047-assignment-of-availability-chunks.html index 4a1b6e1..b58442d 100644 --- a/approved/0047-assignment-of-availability-chunks.html +++ b/approved/0047-assignment-of-availability-chunks.html @@ -90,7 +90,7 @@ diff --git a/approved/0048-session-keys-runtime-api.html b/approved/0048-session-keys-runtime-api.html index b9475b5..f73f7ba 100644 --- a/approved/0048-session-keys-runtime-api.html +++ b/approved/0048-session-keys-runtime-api.html @@ -90,7 +90,7 @@ diff --git a/approved/0050-fellowship-salaries.html b/approved/0050-fellowship-salaries.html index 330a5de..58c7ea1 100644 --- a/approved/0050-fellowship-salaries.html +++ b/approved/0050-fellowship-salaries.html @@ -90,7 +90,7 @@ diff --git a/approved/0056-one-transaction-per-notification.html b/approved/0056-one-transaction-per-notification.html index 4e268d8..1ebcc15 100644 --- a/approved/0056-one-transaction-per-notification.html +++ b/approved/0056-one-transaction-per-notification.html @@ -90,7 +90,7 @@ diff --git a/approved/0059-nodes-capabilities-discovery.html b/approved/0059-nodes-capabilities-discovery.html index 397a71e..4a3bbc4 100644 --- a/approved/0059-nodes-capabilities-discovery.html +++ b/approved/0059-nodes-capabilities-discovery.html @@ -90,7 +90,7 @@ diff --git a/approved/0078-merkleized-metadata.html b/approved/0078-merkleized-metadata.html index 870869a..5013da6 100644 --- a/approved/0078-merkleized-metadata.html +++ b/approved/0078-merkleized-metadata.html @@ -90,7 +90,7 @@ diff --git a/approved/0084-general-transaction-extrinsic-format.html b/approved/0084-general-transaction-extrinsic-format.html index 9fa6690..256b209 100644 --- a/approved/0084-general-transaction-extrinsic-format.html +++ b/approved/0084-general-transaction-extrinsic-format.html @@ -90,7 +90,7 @@ diff --git a/approved/0091-dht-record-creation-time.html b/approved/0091-dht-record-creation-time.html index 3e65a3b..c078abc 100644 --- a/approved/0091-dht-record-creation-time.html +++ b/approved/0091-dht-record-creation-time.html @@ -90,7 +90,7 @@ diff --git a/approved/0097-unbonding_queue.html b/approved/0097-unbonding_queue.html index fdbc40c..ce2be49 100644 --- a/approved/0097-unbonding_queue.html +++ b/approved/0097-unbonding_queue.html @@ -90,7 +90,7 @@ diff --git a/approved/0099-transaction-extension-version.html b/approved/0099-transaction-extension-version.html index d74bcc1..aa12c35 100644 --- a/approved/0099-transaction-extension-version.html +++ b/approved/0099-transaction-extension-version.html @@ -90,7 +90,7 @@ diff --git a/approved/0100-xcm-multi-type-asset-transfer.html b/approved/0100-xcm-multi-type-asset-transfer.html index c8bcc2f..370a74c 100644 --- a/approved/0100-xcm-multi-type-asset-transfer.html +++ b/approved/0100-xcm-multi-type-asset-transfer.html @@ -90,7 +90,7 @@ diff --git a/approved/0101-xcm-transact-remove-max-weight-param.html b/approved/0101-xcm-transact-remove-max-weight-param.html index 6e0adec..7ecd83e 100644 --- a/approved/0101-xcm-transact-remove-max-weight-param.html +++ b/approved/0101-xcm-transact-remove-max-weight-param.html @@ -90,7 +90,7 @@ diff --git a/approved/0103-introduce-core-index-commitment.html b/approved/0103-introduce-core-index-commitment.html index 802ad67..65d11cf 100644 --- a/approved/0103-introduce-core-index-commitment.html +++ b/approved/0103-introduce-core-index-commitment.html @@ -90,7 +90,7 @@ diff --git a/approved/0105-xcm-improved-fee-mechanism.html b/approved/0105-xcm-improved-fee-mechanism.html index bfd6cf6..51e6a9e 100644 --- a/approved/0105-xcm-improved-fee-mechanism.html +++ b/approved/0105-xcm-improved-fee-mechanism.html @@ -90,7 +90,7 @@ diff --git a/approved/0107-xcm-execution-hints.html b/approved/0107-xcm-execution-hints.html index 7cb102b..9d29066 100644 --- a/approved/0107-xcm-execution-hints.html +++ b/approved/0107-xcm-execution-hints.html @@ -90,7 +90,7 @@ diff --git a/approved/0108-xcm-remove-testnet-ids.html b/approved/0108-xcm-remove-testnet-ids.html index 4c74ce0..08e8323 100644 --- a/approved/0108-xcm-remove-testnet-ids.html +++ b/approved/0108-xcm-remove-testnet-ids.html @@ -90,7 +90,7 @@ @@ -245,7 +245,7 @@ using NetworkId::ByGenesis.

- @@ -259,7 +259,7 @@ using NetworkId::ByGenesis.

- diff --git a/proposed/0122-alias-origin-on-asset-transfers.html b/approved/0122-alias-origin-on-asset-transfers.html similarity index 73% rename from proposed/0122-alias-origin-on-asset-transfers.html rename to approved/0122-alias-origin-on-asset-transfers.html index bcf9968..a6d4bdd 100644 --- a/proposed/0122-alias-origin-on-asset-transfers.html +++ b/approved/0122-alias-origin-on-asset-transfers.html @@ -90,7 +90,7 @@ @@ -174,7 +174,7 @@
-

(source)

+

(source)

Table of Contents

diff --git a/index.html b/index.html index ff19e9c..d8843df 100644 --- a/index.html +++ b/index.html @@ -90,7 +90,7 @@ diff --git a/introduction.html b/introduction.html index ff19e9c..d8843df 100644 --- a/introduction.html +++ b/introduction.html @@ -90,7 +90,7 @@ diff --git a/new/0123-pending-code-as-storage-location-for-runtime-upgrades.html b/new/0123-pending-code-as-storage-location-for-runtime-upgrades.html index e385583..48f70e0 100644 --- a/new/0123-pending-code-as-storage-location-for-runtime-upgrades.html +++ b/new/0123-pending-code-as-storage-location-for-runtime-upgrades.html @@ -90,7 +90,7 @@ diff --git a/print.html b/print.html index cb59cc0..b93d477 100644 --- a/print.html +++ b/print.html @@ -91,7 +91,7 @@ @@ -1066,6 +1066,182 @@ for compression.

None.

None.

+

(source)

+

Table of Contents

+ +

RFC-0117: The Unbrick Collective

+
+ + + +
Start Date22 August 2024
DescriptionThe Unbrick Collective aims to help teams rescuing a para once it stops producing blocks
AuthorsBryan Chen, Pablo Dorado
+
+

Summary

+

A followup of the RFC-0014. This RFC proposes adding a new collective to the Polkadot Collectives +Chain: The Unbrick Collective, as well as improvements in the mechanisms that will allow teams +operating paras that had stopped producing blocks to be assisted, in order to restore the production +of blocks of these paras.

+

Motivation

+

Since the initial launch of Polkadot parachains, there has been many incidients causing parachains +to stop producing new blocks (therefore, being bricked) and many occurrences that requires +Polkadot governance to update the parachain head state/wasm. This can be due to many reasons range +from incorrectly registering the initial head state, inability to use sudo key, bad runtime +migration, bad weight configuration, and bugs in the development of the Polkadot SDK.

+

Currently, when the para is not unlocked in the paras registrar1, the Root origin is required to +perform such actions, involving the governance process to invoke this origin, which can be very +resource expensive for the teams. The long voting and enactment times also could result significant +damage to the parachain and users.

+

Finally, other instances of governance that might enact a call using the Root origin (like the +Polkadot Fellowship), due to the nature of their mission, are not fit to carry these kind of tasks.

+

In consequence, the idea of a Unbrick Collective that can provide assistance to para teams when +they brick and further protection against future halts is reasonable enough.

+

Stakeholders

+
    +
  • Parachain teams
  • +
  • Parachain users
  • +
  • OpenGov users
  • +
  • Polkadot Fellowship
  • +
+

Explanation

+

The Collective

+

The Unbrick Collective is defined as an unranked collective of members, not paid by the Polkadot +Treasury. Its main goal is to serve as a point of contact and assistance for enacting the actions +needed to unbrick a para. Such actions are:

+
    +
  • Updating the Parachain Verification Function (a.k.a. a new WASM) of a para.
  • +
  • Updating the head state of a para.
  • +
  • A combination of the above.
  • +
+

In order to ensure these changes are safe enough for the network, actions enacted by the Unbrick +Collective must be whitelisted via similar mechanisms followed by collectives like the Polkadot +Fellowship. This will prevent unintended, not overseen changes on other paras to occur.

+

Also, teams might opt-in to delegate handling their para in the registry to the Collective. This +allows to perform similar actions using the paras registrar, allowing for a shorter path to unbrick a +para.

+

Initially, the unbrick collective has powers similar to a parachains own sudo, but permits more +decentralized control. In the future, Polkadot shall provide functionality like SPREE or JAM that +exceeds sudo permissions, so the unbrick collective cannot modify those state roots or code.

+

The Unbrick Process

+
flowchart TD
+    A[Start] 
+
+    A -- Bricked --> C[Request para unlock via Root]
+    C -- Approved --> Y
+    C -- Rejected --> A
+    
+    D[unbrick call proposal on WhitelistedUnbrickCaller]
+    E[whitelist call proposal on the Unbrick governance]
+    E -- call whitelisted --> F[unbrick call enacted]
+    D -- unbrick called --> F
+    F --> Y
+
+    A -- Not bricked --> O[Opt-in to the Collective]
+    O -- Bricked --> D
+    O -- Bricked --> E
+
+    Y[update PVF / head state] -- Unbricked --> Z[End]
+
+

Initially, a para team has two paths to handle a potential unbrick of their para in the case it +stops producing blocks.

+
    +
  1. Opt-in to the Unbrick Collective: This is done by delegating the handling of the para +in the paras registrar to an origin related to the Collective. This doesn't require unlocking +the para. This way, the collective is enabled to perform changes in the paras module, after +the Unbrick Process proceeds.
  2. +
  3. Request a Para Unlock: In case the para hasn't delegated its handling in the paras +registrar, it'll be still possible for the para team to submit a proposal to unlock the para, +which can be assisted by the Collective. However, this involves submitting a proposal to the Root +governance origin.
  4. +
+

Belonging to the Collective

+

The collective will be initially created without members (no seeding). There will be additional +governance proposals to setup the seed members.

+

The origins able to modify the members of the collective are:

+
    +
  • The Fellows track in the Polkadot Fellowship.
  • +
  • Root track in the Relay.
  • +
  • More than two thrids of the existing Unbrick Collective.
  • +
+

The members are responsible to verify the technical details of the unbrick requests (i.e. the hash +of the new PVF being set). Therefore, they must have the technical capacity to perform such tasks.

+

Suggested requirements to become a member are the following:

+
    +
  • Rank 3 or above in the Polkadot Fellowship.
  • +
  • Being a CTO or Technical Lead in a para team that has opted-in to delegate the Unbrick Collective +to manage the PVF/head state of the para.
  • +
+

Drawbacks

+

The ability to modify the Head State and/or the PVF of a para means a possibility to perform +arbitrary modifications of it (i.e. take control the native parachain token or any bridged assets +in the para).

+

This could introduce a new attack vectorm, and therefore, such great power needs to be handled +carefully.

+

Testing, Security, and Privacy

+

The implementation of this RFC will be tested on testnets (Rococo and Westend) first.

+

An audit will be required to ensure the implementation doesn't introduce unwanted side effects.

+

There are no privacy related concerns.

+

Performance, Ergonomics, and Compatibility

+

Performance

+

This RFC should not introduce any performance impact.

+

Ergonomics

+

This RFC should improve the experience for new and existing parachain teams, lowering the barrier +to unbrick a stalled para.

+

Compatibility

+

This RFC is fully compatible with existing interfaces.

+

Prior Art and References

+ +

Unresolved Questions

+
    +
  • What are the parameters for the WhitelistedUnbrickCaller track?
  • +
  • Any other methods that shall be updated to accept Unbrick origin?
  • +
  • Any other requirements to become a member?
  • +
  • We would like to keep this simple, so no funding support from the Polkadot treasury. But do we +want to compensate the members somehow? i.e. Allow parachain teams to donate to the collective.
  • +
  • We hope SPREE/JAM would be carefully audited for miss-use risks before being
    +provided to parachain teams, but could the unbrick collective have an elections
    +that warranted trust beyond sudo powers?
  • +
  • An auditing framework/collective makes sense parachain code upgrades, but
    +could also strengthen the unbrick collective.
  • +
  • Do we want to have this collective offer additional technical support to help bricked parachains? +i.e. help debug the code, create the rescue plan, create postmortem report, provide resources on +how to avoid getting bricked
  • +
+ +
1 +

The paras registrar refers to a pallet in the Relay, responsible to gather registration info +of the paras, the locked/unlocked state, and the manager info.

+
+

(source)

Table of Contents

-

Drawbacks

+

Drawbacks

This RFC might be difficult to implement in Substrate due to the internal code design. It is not clear to the author of this RFC how difficult it would be.

Prior Art

The API of these new functions was heavily inspired by API used by the C programming language.

-

Unresolved Questions

+

Unresolved Questions

The changes in this RFC would need to be benchmarked. This involves implementing the RFC and measuring the speed difference.

It is expected that most host functions are faster or equal speed to their deprecated counterparts, with the following exceptions:

    @@ -7228,10 +7404,10 @@ This would remove the possibility to synchronize older blocks, which is probably LicenseMIT -

    Summary

    +

    Summary

    This RFC proposes a dynamic pricing model for the sale of Bulk Coretime on the Polkadot UC. The proposed model updates the regular price of cores for each sale period, by taking into account the number of cores sold in the previous sale, as well as a limit of cores and a target number of cores sold. It ensures a minimum price and limits price growth to a maximum price increase factor, while also giving govenance control over the steepness of the price change curve. It allows governance to address challenges arising from changing market conditions and should offer predictable and controlled price adjustments.

    Accompanying visualizations are provided at [1].

    -

    Motivation

    +

    Motivation

    RFC-1 proposes periodic Bulk Coretime Sales as a mechanism to sell continouos regions of blockspace (suggested to be 4 weeks in length). A number of Blockspace Regions (compare RFC-1 & RFC-3) are provided for sale to the Broker-Chain each period and shall be sold in a way that provides value-capture for the Polkadot network. The exact pricing mechanism is out of scope for RFC-1 and shall be provided by this RFC.

    A dynamic pricing model is needed. A limited number of Regions are offered for sale each period. The model needs to find the price for a period based on supply and demand of the previous period.

    The model shall give Coretime consumers predictability about upcoming price developments and confidence that Polkadot governance can adapt the pricing model to changing market conditions.

    @@ -7243,7 +7419,7 @@ This would remove the possibility to synchronize older blocks, which is probably
  • The solution SHOULD provide a maximum factor of price increase should the limit of Regions sold per period be reached.
  • The solution should allow governance to control the steepness of the price function
  • -

    Stakeholders

    +

    Stakeholders

    The primary stakeholders of this RFC are:

    • Protocol researchers and evelopers
    • @@ -7251,7 +7427,7 @@ This would remove the possibility to synchronize older blocks, which is probably
    • Polkadot parachains teams
    • Brokers involved in the trade of Bulk Coretime
    -

    Explanation

    +

    Explanation

    Overview

    The dynamic pricing model sets the new price based on supply and demand in the previous period. The model is a function of the number of Regions sold, piecewise-defined by two power functions.

      @@ -7350,9 +7526,9 @@ SCALE_DOWN = 1 SCALE_UP = 1 OLD_PRICE = 1000 -

      Drawbacks

      +

      Drawbacks

      None at present.

      -

      Prior Art and References

      +

      Prior Art and References

      This pricing model is based on the requirements from the basic linear solution proposed in RFC-1, which is a simple dynamic pricing model and only used as proof. The present model adds additional considerations to make the model more adaptable under real conditions.

      Future Possibilities

      This RFC, if accepted, shall be implemented in conjunction with RFC-1.

      @@ -7391,9 +7567,9 @@ OLD_PRICE = 1000 AuthorsPierre Krieger -

      Summary

      +

      Summary

      Improve the networking messages that query storage items from the remote, in order to reduce the bandwidth usage and number of round trips of light clients.

      -

      Motivation

      +

      Motivation

      Clients on the Polkadot peer-to-peer network can be divided into two categories: full nodes and light clients. So-called full nodes are nodes that store the content of the chain locally on their disk, while light clients are nodes that don't. In order to access for example the balance of an account, a full node can do a disk read, while a light client needs to send a network message to a full node and wait for the full node to reply with the desired value. This reply is in the form of a Merkle proof, which makes it possible for the light client to verify the exactness of the value.

      Unfortunately, this network protocol is suffering from some issues:

        @@ -7403,9 +7579,9 @@ OLD_PRICE = 1000

      Once Polkadot and Kusama will have transitioned to state_version = 1, which modifies the format of the trie entries, it will be possible to generate Merkle proofs that contain only the hashes of values in the storage. Thanks to this, it is already possible to prove the existence of a key without sending its entire value (only its hash), or to prove that a value has changed or not between two blocks (by sending just their hashes). Thus, the only reason why aforementioned issues exist is because the existing networking messages don't give the possibility for the querier to query this. This is what this proposal aims at fixing.

      -

      Stakeholders

      +

      Stakeholders

      This is the continuation of https://github.com/w3f/PPPs/pull/10, which itself is the continuation of https://github.com/w3f/PPPs/pull/5.

      -

      Explanation

      +

      Explanation

      The protobuf schema of the networking protocol can be found here: https://github.com/paritytech/substrate/blob/5b6519a7ff4a2d3cc424d78bc4830688f3b184c0/client/network/light/src/schema/light.v1.proto

      The proposal is to modify this protocol in this way:

      @@ -11,6 +11,7 @@ message Request {
      @@ -7463,24 +7639,24 @@ An alternative could have been to specify the child_trie_info for e
       Also note that child tries aren't considered as descendants of the main trie when it comes to the includeDescendants flag. In other words, if the request concerns the main trie, no content coming from child tries is ever sent back.

      This protocol keeps the same maximum response size limit as currently exists (16 MiB). It is not possible for the querier to know in advance whether its query will lead to a reply that exceeds the maximum size. If the reply is too large, the replier should send back only a limited number (but at least one) of requested items in the proof. The querier should then send additional requests for the rest of the items. A response containing none of the requested items is invalid.

      The server is allowed to silently discard some keys of the request if it judges that the number of requested keys is too high. This is in line with the fact that the server might truncate the response.

      -

      Drawbacks

      +

      Drawbacks

      This proposal doesn't handle one specific situation: what if a proof containing a single specific item would exceed the response size limit? For example, if the response size limit was 1 MiB, querying the runtime code (which is typically 1.0 to 1.5 MiB) would be impossible as it's impossible to generate a proof less than 1 MiB. The response size limit is currently 16 MiB, meaning that no single storage item must exceed 16 MiB.

      Unfortunately, because it's impossible to verify a Merkle proof before having received it entirely, parsing the proof in a streaming way is also not possible.

      A way to solve this issue would be to Merkle-ize large storage items, so that a proof could include only a portion of a large storage item. Since this would require a change to the trie format, it is not realistically feasible in a short time frame.

      -

      Testing, Security, and Privacy

      +

      Testing, Security, and Privacy

      The main security consideration concerns the size of replies and the resources necessary to generate them. It is for example easily possible to ask for all keys and values of the chain, which would take a very long time to generate. Since responses to this networking protocol have a maximum size, the replier should truncate proofs that would lead to the response being too large. Note that it is already possible to send a query that would lead to a very large reply with the existing network protocol. The only thing that this proposal changes is that it would make it less complicated to perform such an attack.

      Implementers of the replier side should be careful to detect early on when a reply would exceed the maximum reply size, rather than inconditionally generate a reply, as this could take a very large amount of CPU, disk I/O, and memory. Existing implementations might currently be accidentally protected from such an attack thanks to the fact that requests have a maximum size, and thus that the list of keys in the query was bounded. After this proposal, this accidental protection would no longer exist.

      Malicious server nodes might truncate Merkle proofs even when they don't strictly need to, and it is not possible for the client to (easily) detect this situation. However, malicious server nodes can already do undesirable things such as throttle down their upload bandwidth or simply not respond. There is no need to handle unnecessarily truncated Merkle proofs any differently than a server simply not answering the request.

      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      It is unclear to the author of the RFC what the performance implications are. Servers are supposed to have limits to the amount of resources they use to respond to requests, and as such the worst that can happen is that light client requests become a bit slower than they currently are.

      -

      Ergonomics

      +

      Ergonomics

      Irrelevant.

      -

      Compatibility

      +

      Compatibility

      The prior networking protocol is maintained for now. The older version of this protocol could get removed in a long time.

      -

      Prior Art and References

      +

      Prior Art and References

      None. This RFC is a clean-up of an existing mechanism.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None

      The current networking protocol could be deprecated in a long time. Additionally, the current "state requests" protocol (used for warp syncing) could also be deprecated in favor of this one.

      @@ -7512,9 +7688,9 @@ Also note that child tries aren't considered as descendants of the main trie whe AuthorsJonas Gehrlein -

      Summary

      +

      Summary

      This document is a proposal for restructuring the bulk markets in the Polkadot UC's coretime allocation system to improve efficiency and fairness. The proposal suggests separating the BULK_PERIOD into MARKET_PERIOD and RENEWAL_PERIOD, allowing for a market-driven price discovery through a clearing price Dutch auction during the MARKET_PERIOD followed by renewal offers at the MARKET_PRICE during the RENEWAL_PERIOD. The new system ensures synchronicity between renewal and market prices, fairness among all current tenants, and efficient price discovery, while preserving price caps to provide security for current tenants. It seeks to start a discussion about the possibility of long-term leases.

      -

      Motivation

      +

      Motivation

      While the initial RFC-1 has provided a robust framework for Coretime allocation within the Polkadot UC, this proposal builds upon its strengths and uses many provided building blocks to address some areas that could be further improved.

      In particular, this proposal introduces the following changes:

        @@ -7534,14 +7710,14 @@ Also note that child tries aren't considered as descendants of the main trie whe

      The premise of this proposal is to reduce complexity by introducing a common price (that develops releative to capacity consumption of Polkadot UC), while still allowing for market forces to add efficiency. Longterm lease owners still receive priority IF they can pay (close to) the market price. This prevents a situation where the renewal price significantly diverges from renewal prices which allows for core captures. While maximum price increase certainty might seem contradictory to efficient price discovery, the proposed model aims to balance these elements, utilizing market forces to determine the price and allocate cores effectively within certain bounds. It must be stated, that potential price increases remain predictable (in the worst-case) but could be higher than in the originally proposed design. The argument remains, however, that we need to allow market forces to affect all prices for an efficient Coretime pricing and allocation.

      Ultimately, this the framework proposed here adheres to all requirements stated in RFC-1.

      -

      Stakeholders

      +

      Stakeholders

      Primary stakeholder sets are:

      • Protocol researchers and developers, largely represented by the Polkadot Fellowship and Parity Technologies' Engineering division.
      • Polkadot Parachain teams both present and future, and their users.
      • Polkadot DOT token holders.
      -

      Explanation

      +

      Explanation

      Bulk Markets

      The BULK_PERIOD has been restructured into two primary segments: the MARKET_PERIOD and RENEWAL_PERIOD, along with an auxiliary SETTLEMENT_PERIOD. This latter period doesn't necessitate any actions from the coretime system chain, but it facilitates a more efficient allocation of coretime in secondary markets. A significant departure from the original proposal lies in the timing of renewals, which now occur post-market phase. This adjustment aims to harmonize renewal prices with their market counterparts, ensuring a more consistent and equitable pricing model.

      Market Period (14 days)

      @@ -7588,12 +7764,12 @@ Also note that child tries aren't considered as descendants of the main trie whe
      • Long-term Coretime: The Polkadot UC is undergoing a transition from two-year leases without an instantaneous market to a model encompassing instantaneous and one-month leases. This shift seems to pivot from one extreme to another. While the introduction of short-term leases, both instantaneous and for one month, is a constructive move to lower barriers to entry and promote experimentation, it seems to be the case that established projects might benefit from more extended lease options. We could consider offering another product, such as a six-month Coretime lease, using the same mechanism described herein. Although the majority of leases would still be sold on a one-month basis, the addition of this option would enhance market efficiency as it would strengthen the impact of a secondary market.
      -

      Drawbacks

      +

      Drawbacks

      There are trade-offs that arise from this proposal, compared to the initial model. The most notable one is that here, I prioritize requirement 6 over requirement 2. The price, in the very "worst-case" (meaning a huge explosion in demand for coretime) could lead to a much larger increase of prices in Coretime. From an economic perspective, this (rare edgecase) would also mean that we'd vastly underprice Coretime in the original model, leading to highly inefficient allocations.

      -

      Prior Art and References

      +

      Prior Art and References

      This RFC builds extensively on the available ideas put forward in RFC-1.

      Additionally, I want to express a special thanks to Samuel Haefner and Shahar Dobzinski for fruitful discussions and helping me structure my thoughts.

      -

      Unresolved Questions

      +

      Unresolved Questions

      The technical feasability needs to be assessed.

      (source)

      Table of Contents

      @@ -7625,16 +7801,16 @@ Also note that child tries aren't considered as descendants of the main trie whe AuthorsGabriel Facco de Arruda -

      Summary

      +

      Summary

      This RFC proposes changes that enable the use of absolute locations in AccountId derivations, which allows protocols built using XCM to have static account derivations in any runtime, regardless of its position in the family hierarchy.

      -

      Motivation

      +

      Motivation

      These changes would allow protocol builders to leverage absolute locations to maintain the exact same derived account address across all networks in the ecosystem, thus enhancing user experience.

      One such protocol, that is the original motivation for this proposal, is InvArch's Saturn Multisig, which gives users a unifying multisig and DAO experience across all XCM connected chains.

      -

      Stakeholders

      +

      Stakeholders

      • Ecosystem developers
      -

      Explanation

      +

      Explanation

      This proposal aims to make it possible to derive accounts for absolute locations, enabling protocols that require the ability to maintain the same derived account in any runtime. This is done by deriving accounts from the hash of described absolute locations, which are static across different destinations.

      The same location can be represented in relative form and absolute form like so:

      #![allow(unused)]
      @@ -7691,25 +7867,25 @@ Also note that child tries aren't considered as descendants of the main trie whe
       

      DescribeFamily

      The DescribeFamily location descriptor is part of the HashedDescription MultiLocation hashing system and exists to describe locations in an easy format for encoding and hashing, so that an AccountId can be derived from this MultiLocation.

      This implementation contains a match statement that does not match against absolute locations, so changes to it involve matching against absolute locations and providing appropriate descriptions for hashing.

      -

      Drawbacks

      +

      Drawbacks

      No drawbacks have been identified with this proposal.

      -

      Testing, Security, and Privacy

      +

      Testing, Security, and Privacy

      Tests can be done using simple unit tests, as this is not a change to XCM itself but rather to types defined in xcm-builder.

      Security considerations should be taken with the implementation to make sure no unwanted behavior is introduced.

      This proposal does not introduce any privacy considerations.

      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      Depending on the final implementation, this proposal should not introduce much overhead to performance.

      -

      Ergonomics

      +

      Ergonomics

      The ergonomics of this proposal depend on the final implementation details.

      -

      Compatibility

      +

      Compatibility

      Backwards compatibility should remain unchanged, although that depend on the final implementation.

      -

      Prior Art and References

      +

      Prior Art and References

      • DescirbeFamily type: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/location_conversion.rs#L122
      • WithComputedOrigin type: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/xcm-builder/src/barriers.rs#L153
      -

      Unresolved Questions

      +

      Unresolved Questions

      Implementation details and overall code is still up to discussion.

      (source)

      Table of Contents

      @@ -7741,7 +7917,7 @@ Also note that child tries aren't considered as descendants of the main trie whe ChaosDAO -

      Summary

      +

      Summary

      This RFC proposes to make modifications to voting power delegations as part of the Conviction Voting pallet. The changes being proposed include:

      1. Allow a Delegator to vote independently of their Delegate if they so desire.
      2. @@ -7749,7 +7925,7 @@ Also note that child tries aren't considered as descendants of the main trie whe
      3. Make a change so that when a delegate votes abstain their delegated votes also vote abstain.
      4. Allow a Delegator to delegate/ undelegate their votes for all tracks with a single call.
      -

      Motivation

      +

      Motivation

      It has become clear since the launch of OpenGov that there are a few common tropes which pop up time and time again:

      1. The frequency of referenda is often too high for network participants to have sufficient time to review, comprehend, and ultimately vote on each individual referendum. This means that these network participants end up being inactive in on-chain governance.
      2. @@ -7757,13 +7933,13 @@ Also note that child tries aren't considered as descendants of the main trie whe
      3. Delegating votes for all tracks currently requires long batched calls which result in high fees for the Delegator - resulting in a reluctance from many to delegate their votes.

      We believe (based on feedback from token holders with a larger stake in the network) that if there were some changes made to delegation mechanics, these larger stake holders would be more likely to delegate their voting power to active network participants – thus greatly increasing the support turnout.

      -

      Stakeholders

      +

      Stakeholders

      The primary stakeholders of this RFC are:

      • The Polkadot Technical Fellowship who will have to research and implement the technical aspects of this RFC
      • DOT token holders in general
      -

      Explanation

      +

      Explanation

      This RFC proposes to make 4 changes to the convictionVoting pallet logic in order to improve the user experience of those delegating their voting power to another account.

      1. @@ -7779,19 +7955,19 @@ Also note that child tries aren't considered as descendants of the main trie whe

        Allow a Delegator to delegate/ undelegate their votes for all tracks with a single call - in order to delegate votes across all tracks, a user must batch 15 calls - resulting in high costs for delegation. A single call for delegate_all/ undelegate_all would reduce the complexity and therefore costs of delegations considerably for prospective Delegators.

      -

      Drawbacks

      +

      Drawbacks

      We do not foresee any drawbacks by implementing these changes. If anything we believe that this should help to increase overall voter turnout (via the means of delegation) which we see as a net positive.

      -

      Testing, Security, and Privacy

      +

      Testing, Security, and Privacy

      We feel that the Polkadot Technical Fellowship would be the most competent collective to identify the testing requirements for the ideas presented in this RFC.

      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      This change may add extra chain storage requirements on Polkadot, especially with respect to nested delegations.

      Ergonomics & Compatibility

      The change to add nested delegations may affect governance interfaces such as Nova Wallet who will have to apply changes to their indexers to support nested delegations. It may also affect the Polkadot Delegation Dashboard as well as Polkassembly & SubSquare.

      We want to highlight the importance for ecosystem builders to create a mechanism for indexers and wallets to be able to understand that changes have occurred such as increasing the pallet version, etc.

      -

      Prior Art and References

      +

      Prior Art and References

      N/A

      -

      Unresolved Questions

      +

      Unresolved Questions

      N/A

      Additionally we would like to re-open the conversation about the potential for there to be free delegations. This was discussed by Dr Gavin Wood at Sub0 2022 and we feel like this would go a great way towards increasing the amount of network participants that are delegating: https://youtu.be/hSoSA6laK3Q?t=526

      @@ -7835,9 +8011,9 @@ Also note that child tries aren't considered as descendants of the main trie whe AuthorsSergej Sakac -

      Summary

      +

      Summary

      This RFC proposes a new model for a sustainable on-demand parachain registration, involving a smaller initial deposit and periodic rent payments. The new model considers that on-demand chains may be unregistered and later re-registered. The proposed solution also ensures a quick startup for on-demand chains on Polkadot in such cases.

      -

      Motivation

      +

      Motivation

      With the support of on-demand parachains on Polkadot, there is a need to explore a new, more cost-effective model for registering validation code. In the current model, the parachain manager is responsible for reserving a unique ParaId and covering the cost of storing the validation code of the parachain. These costs can escalate, particularly if the validation code is large. We need a better, sustainable model for registering on-demand parachains on Polkadot to help smaller teams deploy more easily.

      This RFC suggests a new payment model to create a more financially viable approach to on-demand parachain registration. In this model, a lower initial deposit is required, followed by recurring payments upon parachain registration.

      This new model will coexist with the existing one-time deposit payment model, offering teams seeking to deploy on-demand parachains on Polkadot a more cost-effective alternative.

      @@ -7851,11 +8027,11 @@ Also note that child tries aren't considered as descendants of the main trie whe
    • The solution MUST allow anyone to pay the rent.
    • The solution MUST prevent the removal of validation code if it could still be required for disputes or approval checking.
    • -

      Stakeholders

      +

      Stakeholders

      • Future Polkadot on-demand Parachains
      -

      Explanation

      +

      Explanation

      This RFC proposes a set of changes that will enable the new rent based approach to registering and storing validation code on-chain. The new model, compared to the current one, will require periodic rent payments. The parachain won't be pruned automatically if the rent is not paid, but by permitting anyone to prune the parachain and rewarding the caller, there will be an incentive for the removal of the validation code.

      On-demand parachains should still be able to utilize the current one-time payment model. However, given the size of the deposit required, it's highly likely that most on-demand parachains will opt for the new rent-based model.

      @@ -7962,25 +8138,25 @@ pub(super) type CheckedCodeHash<T: Config> = StorageMap<_, Twox64Concat, ParaId, ValidationCodeHash>; }

      To enable parachain re-registration, we should introduce a new extrinsic in the paras-registrar pallet that allows this. The logic of this extrinsic will be same as regular registration, with the distinction that it can be called by anyone, and the required deposit will be smaller since it only has to cover for the storage of the validation code.

      -

      Drawbacks

      +

      Drawbacks

      This RFC does not alter the process of reserving a ParaId, and therefore, it does not propose reducing it, even though such a reduction could be beneficial.

      Even though this RFC doesn't delve into the specifics of the configuration values for parachain registration but rather focuses on the mechanism, configuring it carelessly could lead to potential problems.

      Since the validation code hash and head data are not removed when the parachain is pruned but only when the deregister extrinsic is called, the T::DataDepositPerByte must be set to a higher value to create a strong enough incentive for removing it from the state.

      -

      Testing, Security, and Privacy

      +

      Testing, Security, and Privacy

      The implementation of this RFC will be tested on Rococo first.

      Proper research should be conducted on setting the configuration values of the new system since these values can have great impact on the network.

      An audit is required to ensure the implementation's correctness.

      The proposal introduces no new privacy concerns.

      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      This RFC should not introduce any performance impact.

      -

      Ergonomics

      +

      Ergonomics

      This RFC does not affect the current parachains, nor the parachains that intend to use the one-time payment model for parachain registration.

      -

      Compatibility

      +

      Compatibility

      This RFC does not break compatibility.

      -

      Prior Art and References

      +

      Prior Art and References

      Prior discussion on this topic: https://github.com/paritytech/polkadot-sdk/issues/1796

      -

      Unresolved Questions

      +

      Unresolved Questions

      None at this time.

      As noted in this GitHub issue, we want to raise the per-byte cost of on-chain data storage. However, a substantial increase in this cost would make it highly impractical for on-demand parachains to register on Polkadot. @@ -8016,16 +8192,16 @@ This RFC offers an alternative solution for on-demand parachains, ensuring that AuthorsPierre Krieger -

      Summary

      +

      Summary

      Rather than enforce a limit to the total memory consumption on the client side by loading the value at :heappages, enforce that limit on the runtime side.

      -

      Motivation

      +

      Motivation

      From the early days of Substrate up until recently, the runtime was present in two forms: the wasm runtime (wasm bytecode passed through an interpreter) and the native runtime (native code directly run by the client).

      Since the wasm runtime has a lower amount of available memory (4 GiB maximum) compared to the native runtime, and in order to ensure sure that the wasm and native runtimes always produce the same outcome, it was necessary to clamp the amount of memory available to both runtimes to the same value.

      In order to achieve this, a special storage key (a "well-known" key) :heappages was introduced and represents the number of "wasm pages" (one page equals 64kiB) of memory that are available to the memory allocator of the runtimes. If this storage key is absent, it defaults to 2048, which is 128 MiB.

      The native runtime has since then been disappeared, but the concept of "heap pages" still exists. This RFC proposes a simplification to the design of Polkadot by removing the concept of "heap pages" as is currently known, and proposes alternative ways to achieve the goal of limiting the amount of memory available.

      -

      Stakeholders

      +

      Stakeholders

      Client implementers and low-level runtime developers.

      -

      Explanation

      +

      Explanation

      This RFC proposes the following changes to the client:

      • The client no longer considers :heappages as special.
      • @@ -8051,25 +8227,25 @@ This RFC offers an alternative solution for on-demand parachains, ensuring that

      Each parachain can choose the option that they prefer, but the author of this RFC strongly suggests either option C or B.

      -

      Drawbacks

      +

      Drawbacks

      In case of path A, there is one situation where the behaviour pre-RFC is not equivalent to the one post-RFC: when a host function that performs an allocation (for example ext_storage_get) is called, without this RFC this allocation might fail due to reaching the maximum heap pages, while after this RFC this will always succeed. This is most likely not a problem, as storage values aren't supposed to be larger than a few megabytes at the very maximum.

      In the unfortunate event where the runtime runs out of memory, path B would make it more difficult to relax the memory limit, as we would need to re-upload the entire Wasm, compared to updating only :heappages in path A or before this RFC. In the case where the runtime runs out of memory only in the specific event where the Wasm runtime is modified, this could brick the chain. However, this situation is no different than the thousands of other ways that a bug in the runtime can brick a chain, and there's no reason to be particularily worried about this situation in particular.

      -

      Testing, Security, and Privacy

      +

      Testing, Security, and Privacy

      This RFC would reduce the chance of a consensus issue between clients. The :heappages are a rather obscure feature, and it is not clear what happens in some corner cases such as the value being too large (error? clamp?) or malformed. This RFC would completely erase these questions.

      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      In case of path A, it is unclear how performances would be affected. Path A consists in moving client-side operations to the runtime without changing these operations, and as such performance differences are expected to be minimal. Overall, we're talking about one addition/subtraction per malloc and per free, so this is more than likely completely negligible.

      In case of path B and C, the performance gain would be a net positive, as this RFC strictly removes things.

      -

      Ergonomics

      +

      Ergonomics

      This RFC would isolate the client and runtime more from each other, making it a bit easier to reason about the client or the runtime in isolation.

      -

      Compatibility

      +

      Compatibility

      Not a breaking change. The runtime-side changes can be applied immediately (without even having to wait for changes in the client), then as soon as the runtime is updated, the client can be updated without any transition period. One can even consider updating the client before the runtime, as it corresponds to path C.

      -

      Prior Art and References

      +

      Prior Art and References

      None.

      -

      Unresolved Questions

      +

      Unresolved Questions

      None.

      This RFC follows the same path as https://github.com/polkadot-fellows/RFCs/pull/4 by scoping everything related to memory allocations to the runtime.

      @@ -8109,7 +8285,7 @@ The :heappages are a rather obscure feature, and it is not clear wh AuthorAdam Clay Steeber -

      Summary

      +

      Summary

      This RFC proposes adding a trivial governance track on Kusama to facilitate X (formerly known as Twitter) posts on the @kusamanetwork account. The technical aspect of implementing this in the runtime is very inconsequential and straight-forward, though it might get more technical if the Fellowship wants to regulate this track with a non-existent permission set. If this is implemented it would need to be followed up with:

      @@ -8117,7 +8293,7 @@ with a non-existent permission set. If this is implemented it would need to be f
    • the establishment of specifications for proposing X posts via this track, and
    • the development of tools/processes to ensure that the content contained in referenda enacted in this track would be automatically posted on X.
    • -

      Motivation

      +

      Motivation

      The overall motivation for this RFC is to decentralize the management of the Kusama brand/communication channel to KSM holders. This is necessary in my opinion primarily because of the inactivity of the account in recent history, with posts spanning weeks or months apart. I am currently unaware of who/what entity manages the Kusama X account, but if they are affiliated with Parity or W3F this proposed solution could also offload some of the legal ramifications of making (or not making) @@ -8127,11 +8303,11 @@ and the community becomes totally autonomous in the management of Kusama's X pos that could be offloaded to openGov, provided this proof-of-concept is successful.

      Finally, this RFC is the epitome of experimentation that Kusama is ideal for. This proposal may spark newfound excitement for Kusama and help us realize Kusama's potential for pushing boundaries and trying new unconventional ideas.

      -

      Stakeholders

      +

      Stakeholders

      This idea has not been formalized by any individual (or group of) KSM holder(s). To my knowledge the socialization of this idea is contained entirely in my recent X post here, but it is possible that an idea like this one has been discussed in other places. It appears to me that the ecosystem would welcome a change like this which is why I am taking action to formalize the discussion.

      -

      Explanation

      +

      Explanation

      The implementation of this idea can be broken down into 3 primary phases:

      Phase 1 - Track configurations

      First, we begin with this RFC to ensure all feedback can be discussed and implemented in the proposal. After the Fellowship and the community come to a reasonable @@ -8184,7 +8360,7 @@ to implement them. Here's what would be needed:

    • a UI to allow layman users to propose referenda on this track

    After everything is complete, we can update the Kusama wiki to include documentation on the X post specifications and include links to the tools/UI.

    -

    Drawbacks

    +

    Drawbacks

    The main drawback to this change is that it requires a lot of off-chain coordination. It's easy enough to include the track on Kusama but it's a totally different challenge to make it function as intended. The tools need to be built and the auth tokens need to be managed. It would certainly add an administrative burden to whoever manages the X account since they would either need to run the tools themselves or manage auth tokens.

    @@ -8196,28 +8372,28 @@ If that happens, we risk getting Kusama banned on X!

    agency to manage posts. It wouldn't be decentralized but it would probably be more effective in terms of creating good content.

    Finally, this solution is merely pseudo-decentralization since the X account manager would still have ultimate control of the account. It's decentralized insofar as the auth tokens are given to people actually running the tools; a house of cards is required to facilitate X posts via this track. Not ideal.

    -

    Testing, Security, and Privacy

    +

    Testing, Security, and Privacy

    There's major precedent for configuring tracks on openGov given the amount of power tracks have, so it shouldn't be hard to come up with a sound configuration. That's why I recommend restricting permissions of this track to remarks and batches of remarks, or something equally inconsequential.

    Building the tools for this implementation is really straight-forward and could be audited by Fellowship members, and the community at large, on Github.

    The largest security concern would be the management of Kusama's X account's auth tokens. We would need to ensure that they aren't compromised.

    -

    Performance, Ergonomics, and Compatibility

    -

    Performance

    +

    Performance, Ergonomics, and Compatibility

    +

    Performance

    If a track on Kusama promises users that compliant referenda enacted therein would be posted on Kusama's X account, users would expect that track to perform as promised. If the house of cards tumbles down and a compliant referendum doesn't actually get anything posted, users might think that Kusama is broken or unreliable. This could be damaging to Kusama's image and cause people to question the soundness of other features on Kusama.

    As mentioned in the drawbacks, the performance of this feature would depend on off-chain coordinations. We can reduce the administrative burden of these coordinations by funding third parties with the Treasury to deal with it, but then we're relying on trusting these parties.

    -

    Ergonomics

    +

    Ergonomics

    By adding a new track to Kusama, governance platforms like Polkassembly or Nova Wallet would need to include it on their applications. This shouldn't be too much of a burden or overhead since they've already built the infrastructure for other openGov tracks.

    -

    Compatibility

    +

    Compatibility

    This change wouldn't break any compatibility as far as I know.

    References

    One reference to a similar feature requiring on-chain/off-chain coordination would be the Kappa-Sigma-Mu Society. Nothing on-chain necessarily enforces the rules or facilitates bids, challenges, defenses, etc. However, the Society has managed to maintain itself with integrity to its rules. So I don't think this is totally out of Kusama's scope. But it will require some off-chain effort to maintain.

    -

    Unresolved Questions

    +

    Unresolved Questions

    • Who will develop the tools necessary to implement this feature? How do we select them?
    • How can this idea be better implemented with on-chain/substrate features?
    • @@ -8256,11 +8432,11 @@ out of Kusama's scope. But it will require some off-chain effort to maintain.

      AuthorsJelliedOwl -

      Summary

      +

      Summary

      The current size of the decision deposit on some tracks is too high for many proposers. As a result, those needing to use it have to find someone else willing to put up the deposit for them - and a number of legitimate attempts to use the root track have timed out. This track would provide a more affordable (though slower) route for these holders to use the root track.

      -

      Motivation

      +

      Motivation

      There have been recent attempts to use the Kusama root track which have timed out with no decision deposit placed. Usually, these referenda have been related to parachain registration related issues.

      -

      Explanation

      +

      Explanation

      Propose to address this by adding a new referendum track [22] Referendum Deposit which can place the decision deposit on another referendum. This would require the following changes:

      • [Referenda Pallet] Modify the placeDecisionDesposit function to additionally allow it to be called by root, with root call bypassing the requirements for a deposit payment.
      • @@ -8284,23 +8460,23 @@ out of Kusama's scope. But it will require some off-chain effort to maintain.

        Approval & Support curves: As per the root track, timed to match the decision period
      • Maximum deciding: 10
      -

      Drawbacks

      +

      Drawbacks

      This track would provide a route to starting a root referendum with a much-reduced slashable deposit. This might be undesirable but, assuming the decision deposit cost for this track is still high enough, slashing would still act as a disincentive.

      An alternative to this might be to reduce the decision deposit size some of the more expensive tracks. However, part of the purpose of the high deposit - at least on the root track - is to prevent spamming the limited queue with junk referenda.

      -

      Testing, Security, and Privacy

      +

      Testing, Security, and Privacy

      Will need additional tests case for the modified pallet and runtime. No security or privacy issues.

      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      No significant performance impact.

      -

      Ergonomics

      +

      Ergonomics

      Only changes related to adding the track. Existing functionality is unchanged.

      -

      Compatibility

      +

      Compatibility

      No compatibility issues.

      -

      Prior Art and References

      +

      Prior Art and References

      -

      Unresolved Questions

      +

      Unresolved Questions

      Feedback on whether my proposed implementation of this is the best way to address the issue - including which calls the track should be allowed to make. Are the track parameters correct or should be use something different? Alternative would be welcome.

      (source)

      Table of Contents

      @@ -8345,7 +8521,7 @@ out of Kusama's scope. But it will require some off-chain effort to maintain.

      AuthorsAbdelrahman Soliman (Boda) -

      Summary

      +

      Summary

      A pallet to facilitate enhanced multisig accounts. The main enhancement is that we store a multisig account in the state with related info (signers, threshold,..etc). The module affords enhanced control over administrative operations such as adding/removing signers, changing the threshold, account deletion, canceling an existing proposal. Each signer can approve/reject a proposal while still exists. The proposal is not intended for migrating or getting rid of existing multisig. It's to allow both options to coexist.

      For the rest of the RFC We use the following terms:

        @@ -8353,7 +8529,7 @@ out of Kusama's scope. But it will require some off-chain effort to maintain.

        Stateful Multisig to refer to the proposed pallet.
      • Stateless Multisig to refer to the current multisig pallet in polkadot-sdk.
      -

      Motivation

      +

      Motivation

      Problem

      Entities in the Polkadot ecosystem need to have a way to manage their funds and other operations in a secure and efficient way. Multisig accounts are a common way to achieve this. Entities by definition change over time, members of the entity may change, threshold requirements may change, and the multisig account may need to be deleted. For even more enhanced hierarchical control, the multisig account may need to be controlled by other multisig accounts.

      Current native solutions for multisig operations are less optimal, performance-wise (as we'll explain later in the RFC), and lack fine-grained control over the multisig account.

      @@ -8395,12 +8571,12 @@ DAOs can utilize multisig accounts to ensure that decisions are made collectivel

    and much more...

    -

    Stakeholders

    +

    Stakeholders

    • Polkadot holders
    • Polkadot developers
    -

    Explanation

    +

    Explanation

    I've created the stateful multisig pallet during my studies in Polkadot Blockchain Academy under supervision from @shawntabrizi and @ank4n. After that, I've enhanced it to be fully functional and this is a draft PR#3300 in polkadot-sdk. I'll list all the details and design decisions in the following sections. Note that the PR is not 1-1 exactly to the current RFC as the RFC is a more polished version of the PR after updating based on the feedback and discussions.

    Let's start with a sequence diagram to illustrate the main operations of the Stateful Multisig.

    multisig operations

    @@ -8813,14 +8989,14 @@ pub type PendingProposals<T: Config> = StorageDoubleMap<
  • In case threshold is lower than the number of approvers then the proposal is still valid.
  • In case threshold is higher than the number of approvers then we catch it during execute proposal and error.
-

Drawbacks

+

Drawbacks

  • New pallet to maintain.
-

Testing, Security, and Privacy

+

Testing, Security, and Privacy

Standard audit/review requirements apply.

-

Performance, Ergonomics, and Compatibility

-

Performance

+

Performance, Ergonomics, and Compatibility

+

Performance

Doing back of the envelop calculation to proof that the stateful multisig is more efficient than the stateless multisig given it's smaller footprint size on blocks.

Quick review over the extrinsics for both as it affects the block size:

Stateless Multisig: @@ -8884,13 +9060,13 @@ We have the following extrinsics:

| Stateless | N^2 | Nil | | Stateful | N | N |

So even though the stateful multisig has a larger state size, it's still more efficient in terms of block size and total footprint on the blockchain.

-

Ergonomics

+

Ergonomics

The Stateful Multisig will have better ergonomics for managing multisig accounts for both developers and end-users.

-

Compatibility

+

Compatibility

This RFC is compatible with the existing implementation and can be handled via upgrades and migration. It's not intended to replace the existing multisig pallet.

-

Prior Art and References

+

Prior Art and References

multisig pallet in polkadot-sdk

-

Unresolved Questions

+

Unresolved Questions

  • On account deletion, should we transfer remaining deposits to treasury or remove signers' addition deposits completely and consider it as fees to start with?
@@ -8940,9 +9116,9 @@ Implement call filters. This will allow multisig accounts to only accept certain AuthorsLuke Schoen -

Summary

+

Summary

This proposes to increase the maximum length of PGP Fingerprint values from a 20 bytes/chars limit to a 40 bytes/chars limit.

-

Motivation

+

Motivation

Background

Pretty Good Privacy (PGP) Fingerprints are shorter versions of their corresponding Public Key that may be printed on a business card.

They may be used by someone to validate the correct corresponding Public Key.

@@ -8960,7 +9136,7 @@ Implement call filters. This will allow multisig accounts to only accept certain

Solution Requirements

The maximum length of identity PGP Fingerprint values should be increased from the current 20 bytes/chars limit at least a 40 bytes/chars limit to support PGP Fingerprints and GPG Fingerprints.

-

Stakeholders

+

Stakeholders

  • Any Polkadot account holder wishing to use a Polkadot on-chain identity for their:
      @@ -8969,28 +9145,28 @@ Implement call filters. This will allow multisig accounts to only accept certain
-

Explanation

+

Explanation

If a user tries to setting an on-chain identity by creating an extrinsic using Polkadot.js with identity > setIdentity(info), then if they try to provide their 40 character long PGP Fingerprint or GPG Fingerprint, which is longer than the maximum length of 20 bytes/chars [u8;20], then they will encounter this error:

createType(Call):: Call: failed decoding identity.setIdentity:: Struct: failed on args: {...}:: Struct: failed on pgpFingerprint: Option<[u8;20]>:: Expected input with 20 bytes (160 bits), found 40 bytes
 

Increasing maximum length of identity PGP Fingerprint values from the current 20 bytes/chars limit to at least a 40 bytes/chars limit would overcome these errors and support PGP Fingerprints and GPG Fingerprints, satisfying the solution requirements.

-

Drawbacks

+

Drawbacks

No drawbacks have been identified.

-

Testing, Security, and Privacy

+

Testing, Security, and Privacy

Implementations would be tested for adherance by checking that 40 bytes/chars PGP Fingerprints are supported.

No effect on security or privacy has been identified than already exists.

No implementation pitfalls have been identified.

-

Performance, Ergonomics, and Compatibility

-

Performance

+

Performance, Ergonomics, and Compatibility

+

Performance

It would be an optimization, since the associated exposed interfaces to developers and end-users could start being used.

To minimize additional overhead the proposal suggests a 40 bytes/chars limit since that would at least provide support for PGP Fingerprints, satisfying the solution requirements.

-

Ergonomics

+

Ergonomics

No potential ergonomic optimizations have been identified.

-

Compatibility

+

Compatibility

Updates to Polkadot.js Apps, API and its documentation and those referring to it may be required.

-

Prior Art and References

+

Prior Art and References

No prior articles or references.

-

Unresolved Questions

+

Unresolved Questions

No further questions at this stage.

Relates to RFC entitled "Increase maximum length of identity raw data values from 32 bytes".

@@ -9038,10 +9214,10 @@ Implement call filters. This will allow multisig accounts to only accept certain AuthorsLuke Schoen -

Summary

+

Summary

This proposes to require a slashable deposit in the broker pallet when initially purchasing or renewing Bulk Coretime or Instantaneous Coretime cores.

Additionally, it proposes to record a reputational status based on the behavior of the purchaser, as it relates to their use of Kusama Coretime cores that they purchase, and to possibly reserve a proportion of the cores for prospective purchasers that have an on-chain identity.

-

Motivation

+

Motivation

Background

There are sales of Kusama Coretime cores that are scheduled to occur later this month by Coretime Marketplace Lastic.xyz initially in limited quantities, and potentially also by RegionX in future that is subject to their Polkadot referendum #582. This poses a risk in that some Kusama Coretime core purchasers may buy Kusama Coretime cores when they have no intention of actually placing a workload on them or leasing them out, which would prevent those that wish to purchase and actually use Kusama Coretime cores from being able to use any at cores at all.

Problem

@@ -9073,32 +9249,32 @@ Implement call filters. This will allow multisig accounts to only accept certain

Reputation. To disincentivise certain behaviours, a reputational status indicator could be used to record the historic behavior of the purchaser and whether on-chain judgement has determined they have adequately rectified that behaviour, as it relates to their usage of Kusama Coretime cores that they purchase.

-

Stakeholders

+

Stakeholders

  • Any Kusama account holder wishing to use the Broker pallet in any upcoming Kusama Coretime sales.
  • Any prospective Kusama Coretime purchaser, developer, and user.
  • KSM holders.
-

Drawbacks

-

Performance

+

Drawbacks

+

Performance

The slashable deposit if set too high, may result in an economic impact, where less Kusama Coretime core sales are purchased.

-

Testing, Security, and Privacy

+

Testing, Security, and Privacy

Lack of a slashable deposit in the Broker pallet is a security concern, since it exposes Kusama Coretime sales to potential abuse.

Reserving a proportion of Kusama Coretime sales cores for those with on-chain identities should not be to the exclusion of accounts that wish to remain anonymous or cause cores to be wasted unnecessarily. As such, if cores that are reserved for on-chain identities remain unsold then they should be released to anonymous accounts that are on a waiting list.

No implementation pitfalls have been identified.

-

Performance, Ergonomics, and Compatibility

-

Performance

+

Performance, Ergonomics, and Compatibility

+

Performance

It should improve performance as it reduces the potential for state bloat since there is less risk of undesirable Kusama Coretime sales activity that would be apparent with no requirement for a slashable deposit or there being no reputational risk to purchasers that waste or misuse Kusama Coretime cores.

The solution proposes to minimize the risk of some Kusama Coretime cores not even being used or leased to perform any tasks at all.

It will be important to monitor and manage the slashable deposits, purchaser reputations, and utilization of the proportion of cores that are reserved for accounts with an on-chain identity.

-

Ergonomics

+

Ergonomics

The mechanism for setting a slashable deposit amount, should avoid undue complexity for users.

-

Compatibility

+

Compatibility

Updates to Polkadot.js Apps, API and its documentation and those referring to it may be required.

-

Prior Art and References

+

Prior Art and References

Prior Art

No prior articles.

-

Unresolved Questions

+

Unresolved Questions

None

None

@@ -9133,7 +9309,7 @@ Implement call filters. This will allow multisig accounts to only accept certain AuthorsKian Paimani -

Summary

+

Summary

This RFC proposes a new pallet_inflation to be added to the Polkadot runtime, which improves inflation machinery of the Polkadot relay chain in a number of ways:

    @@ -9143,7 +9319,7 @@ inflation machinery of the Polkadot relay chain in a number of ways:

    systems, as proposed RFC32.
-

Motivation

+

Motivation

The existing inflation logic in the relay chain suffers from a number of drawbacks:

  • It is dated, as the number of parachain slots (and consequently auctions) will soon no longer be a @@ -9176,7 +9352,7 @@ changes to the token holders and researchers and further governance proposals. -

    Stakeholders

    +

    Stakeholders

    This RFC is relevant to the following stakeholders, listed from high to low impact:

    • All token holders who participate in governance, as they can possibly now propose (some degree of) @@ -9186,7 +9362,7 @@ changes may or may not require a particular governance track.
    • the means that this pallet provides.
    • All other token holders.
    -

    Explanation

    +

    Explanation

    Existing Order

    First, let's further elaborate on the existing order. The current inflation logic is deeply nested in pallet_staking, and pallet_staking::Config::EraPayout interface. Through this trait, the @@ -9309,7 +9485,7 @@ prefixed by dynamic_params.

    Whereby FixedAnnualInflationAmount is the fixed absolute value (as opposed to ratio) by which the chain inflates annually, for example 100m DOTs.

    -

    Drawbacks

    +

    Drawbacks

    The following drawbacks are noted:

    1. The solution provided here is possibly an over-engineering, if we want to achieve the goal of @@ -9323,24 +9499,24 @@ of the inflation amount being reliant on eg. the staking-rate. We acknowledge th but given that many PoS inflationary systems rely on the staking rate, we believe it is a reasonable compromise. Such parameters can be ignored if the implementation does not need them.
    -

    Testing, Security, and Privacy

    +

    Testing, Security, and Privacy

    The new pallet_inflation, among its integration into pallet_staking must be thoroughly audited and reviewed by fellows. We also emphasize on simulating the actual inflation logic using the real polkadot state with Chopsticks and try-runtime.

    -

    Performance, Ergonomics, and Compatibility

    +

    Performance, Ergonomics, and Compatibility

    The proposed system in this RFC implies a handful of extra storage reads and writes "per inflation cycle", but given that a reasonable instance of this pallet would probably decide to inflation eg. once per day, the performance impact is negligible.

    The drawback section above noted some ergonomic concerns.

    The "New Order" section above notes the compatibility notes with the existing staking and inflation system.

    -

    Prior Art and References

    +

    Prior Art and References

    • Previous updates to the inflation system:
    • pallet_parameters
    • https://forum.polkadot.network/t/adjusting-the-current-inflation-model-to-sustain-treasury-inflow/3301
    -

    Unresolved Questions

    +

    Unresolved Questions

    • Whether the design proposed in this RFC is worthy of the complexity implementing and integrating it? Note that a draft implementation already exists, yet the amount of further work needed to @@ -9395,13 +9571,13 @@ the pallet design as it stands, this is very unlikely.
    • AuthorsAurora Poppyseed, Philip Lucsok -

      Summary

      +

      Summary

      This RFC proposes the addition of a secondary market feature to either the broker pallet or as a separate pallet maintained by Lastic, enabling users to list and purchase regions. This includes creating, purchasing, and removing listings, as well as emitting relevant events and handling associated errors.

      -

      Motivation

      +

      Motivation

      Currently, the broker pallet lacks functionality for a secondary market, which limits users' ability to freely trade regions. This RFC aims to introduce a secure and straightforward mechanism for users to list regions they own for sale and allow other users to purchase these regions.

      While integrating this functionality directly into the broker pallet is one option, another viable approach is to implement it as a separate pallet maintained by Lastic. This separate pallet would have access to the broker pallet and add minimal functionality necessary to support the secondary market.

      Adding smart contracts to the Coretime chain could also address this need; however, this process is expected to be lengthy and complex. We cannot afford to wait for this extended timeline to enable basic secondary market functionality. By proposing either integration into the broker pallet or the creation of a dedicated pallet, we can quickly enhance the flexibility and utility of the broker pallet, making it more user-friendly and valuable.

      -

      Stakeholders

      +

      Stakeholders

      Primary stakeholders include:

      • Developers working on the broker pallet.
      • @@ -9409,7 +9585,7 @@ the pallet design as it stands, this is very unlikely.
      • Users who own regions and wish to trade them.
      • Community members interested in enhancing the broker pallet’s capabilities.
      -

      Explanation

      +

      Explanation

      This RFC introduces the following key features:

      1. @@ -9450,10 +9626,10 @@ the pallet design as it stands, this is very unlikely.
  • -

    Drawbacks

    +

    Drawbacks

    The main drawback of adding the additional complexity directly to the broker pallet is the potential increase in maintenance overhead. Therefore, we propose adding additional functionality as a separate pallet on the Coretime chain. To take the pressure off from implementing these features, implementation along with unit tests would be taken care of by Lastic (Aurora Makovac, Philip Lucsok).

    There are potential risks of security vulnerabilities in the new market functionalities, such as unauthorized region transfers or incorrect balance adjustments. Therefore, extensive security measures would have to be implemented.

    -

    Testing, Security, and Privacy

    +

    Testing, Security, and Privacy

    Testing

    • Comprehensive unit tests need to be provided to ensure the correctness of the new functionalities.
    • @@ -9469,27 +9645,27 @@ the pallet design as it stands, this is very unlikely.
      • The proposal does not introduce new privacy concerns as it only affects region trading functionality within the existing framework.
      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      • This feature is expected to introduce minimal overhead since it primarily involves read and write operations to storage maps.
      • Efforts will be made to optimize the code to prevent unnecessary computational costs.
      -

      Ergonomics

      +

      Ergonomics

      • The new functions are designed to be intuitive and easy to use, providing clear feedback through events and errors.
      • Documentation and examples will be provided to assist developers and users.
      -

      Compatibility

      +

      Compatibility

      • This proposal does not break compatibility with existing interfaces or previous versions.
      • No migrations are necessary as it introduces new functionality without altering existing features.
      -

      Prior Art and References

      +

      Prior Art and References

      • All related discussions are going to be under this PR.
      -

      Unresolved Questions

      +

      Unresolved Questions

      • Are there additional security measures needed to prevent potential abuses of the new functionalities?
      @@ -9536,12 +9712,12 @@ the pallet design as it stands, this is very unlikely. AuthorsAurora Poppyseed, Phil Lucksok -

      Summary

      +

      Summary

      This RFC proposes the integration of smart contracts on the Coretime chain to enhance flexibility and enable complex decentralized applications, including secondary market functionalities.

      -

      Motivation

      +

      Motivation

      Currently, the Coretime chain lacks the capability to support smart contracts, which limits the range of decentralized applications that can be developed and deployed. By enabling smart contracts, the Coretime chain can facilitate more sophisticated functionalities such as automated region trading, dynamic pricing mechanisms, and other decentralized applications that require programmable logic. This will enhance the utility of the Coretime chain, attract more developers, and create more opportunities for innovation.

      Additionally, while there is a proposal (#885) to allow EVM-compatible contracts on Polkadot’s Asset Hub, the implementation of smart contracts directly on the Coretime chain will provide synchronous interactions and avoid the complexities of asynchronous operations via XCM.

      -

      Stakeholders

      +

      Stakeholders

      Primary stakeholders include:

      • Developers working on the Coretime chain.
      • @@ -9549,7 +9725,7 @@ the pallet design as it stands, this is very unlikely.
      • Community members interested in expanding the capabilities of the Coretime chain.
      • Secondary Coretime marketplaces.
      -

      Explanation

      +

      Explanation

      This RFC introduces the following key components:

      1. @@ -9581,14 +9757,14 @@ the pallet design as it stands, this is very unlikely.
    -

    Drawbacks

    +

    Drawbacks

    There are several drawbacks to consider:

    • Complexity: Adding smart contracts introduces significant complexity to the Coretime chain, which may increase maintenance overhead and the potential for bugs.
    • Performance: The execution of smart contracts can be resource-intensive, potentially affecting the performance of the Coretime chain.
    • Security: Smart contracts are prone to vulnerabilities and exploits, necessitating rigorous security measures and continuous monitoring.
    -

    Testing, Security, and Privacy

    +

    Testing, Security, and Privacy

    Testing

    • Comprehensive unit tests and integration tests should be developed to ensure the correct functionality of smart contracts.
    • @@ -9604,30 +9780,30 @@ the pallet design as it stands, this is very unlikely.
      • The proposal does not introduce new privacy concerns as it extends existing functionalities with programmable logic.
      -

      Performance, Ergonomics, and Compatibility

      -

      Performance

      +

      Performance, Ergonomics, and Compatibility

      +

      Performance

      • The introduction of smart contracts may impact performance due to the additional computational overhead.
      • Optimization techniques, such as efficient gas fee mechanisms and resource management, should be employed to minimize performance degradation.
      -

      Ergonomics

      +

      Ergonomics

      • The new functionality should be designed to be intuitive and easy to use for developers, with comprehensive documentation and examples.
      • Provide developer tools and SDKs to facilitate the creation and deployment of smart contracts.
      -

      Compatibility

      +

      Compatibility

      • This proposal should maintain compatibility with existing interfaces and functionalities of the Coretime chain.
      • Ensure backward compatibility and provide migration paths if necessary.
      -

      Prior Art and References

      +

      Prior Art and References

      • Ethereum’s implementation of smart contracts using Solidity.
      • Polkadot’s Ink! smart contract platform.
      • Existing decentralized applications and use cases on other blockchain platforms.
      • Proposal #885: EVM-compatible contracts on Asset Hub, which highlights the community's interest in integrating smart contracts within the Polkadot ecosystem.
      -

      Unresolved Questions

      +

      Unresolved Questions

      • What specific security measures should be implemented to prevent smart contract vulnerabilities?
      • How can we ensure optimal performance while supporting complex smart contracts?
      • @@ -9674,9 +9850,9 @@ the pallet design as it stands, this is very unlikely. AuthorsRodrigo Quelhas -

        Summary

        +

        Summary

        This RFC proposes a new host function, secp256r1_ecdsa_verify_prehashed, for verifying NIST-P256 signatures. The function takes as input the message hash, r and s components of the signature, and the x and y coordinates of the public key. By providing this function, runtime authors can leverage a more efficient verification mechanism for "secp256r1" elliptic curve signatures, reducing computational costs and improving overall performance.

        -

        Motivation

        +

        Motivation

        “secp256r1” elliptic curve is a standardized curve by NIST which has the same calculations by different input parameters with “secp256k1” elliptic curve. The cost of combined attacks and the security conditions are almost the same for both curves. Adding a host function can provide signature verifications using the “secp256r1” elliptic curve in the runtime and multi-faceted benefits can occur. One important factor is that this curve is widely used and supported in many modern devices such as Apple’s Secure Enclave, Webauthn, Android Keychain which proves the user adoption. Additionally, the introduction of this host function could enable valuable features in the account abstraction which allows more efficient and flexible management of accounts by transaction signs in mobile devices. Most of the modern devices and applications rely on the “secp256r1” elliptic curve. The addition of this host function enables a more efficient verification of device native transaction signing mechanisms. For example:

          @@ -9685,11 +9861,11 @@ Most of the modern devices and applications rely on the “secp256r1” elliptic
        1. Android Keystore: Android Keystore is an API that manages the private keys and signing methods. The private keys are not processed while using Keystore as the applications’ signing method. Also, it can be done in the “Trusted Execution Environment” in the microchip.
        2. Passkeys: Passkeys is utilizing FIDO Alliance and W3C standards. It replaces passwords with cryptographic key-pairs which is also can be used for the elliptic curve cryptography.
        -

        Stakeholders

        +

        Stakeholders

        • Runtime Authors
        -

        Explanation

        +

        Explanation

        This RFC proposes a new host function for runtime authors to leverage a more efficient verification mechanism for "secp256r1" elliptic curve signatures.

        Proposed host function signature:

        #![allow(unused)]
        @@ -9701,188 +9877,23 @@ Most of the modern devices and applications rely on the “secp256r1” elliptic
         ) -> bool;
         }

        The host function MUST return true if the signature is valid or false otherwise.

        -

        Drawbacks

        +

        Drawbacks

        N/A

        -

        Testing, Security, and Privacy

        +

        Testing, Security, and Privacy

        Security

        The changes are not directly affecting the protocol security, parachains are not enforced to use the host function.

        -

        Performance, Ergonomics, and Compatibility

        -

        Performance

        +

        Performance, Ergonomics, and Compatibility

        +

        Performance

        N/A

        -

        Ergonomics

        +

        Ergonomics

        The host function proposed in this RFC allows parachain runtime developers to use a more efficient verification mechanism for "secp256r1" elliptic curve signatures.

        -

        Compatibility

        +

        Compatibility

        Parachain teams will need to include this host function to upgrade.

        -

        Prior Art and References

        +

        Prior Art and References

        -

        (source)

        -

        Table of Contents

        - -

        RFC-0117: The Unbrick Collective

        -
        - - - -
        Start Date22 August 2024
        DescriptionThe Unbrick Collective aims to help teams rescuing a para once it stops producing blocks
        AuthorsBryan Chen, Pablo Dorado
        -
        -

        Summary

        -

        A followup of the RFC-0014. This RFC proposes adding a new collective to the Polkadot Collectives -Chain: The Unbrick Collective, as well as improvements in the mechanisms that will allow teams -operating paras that had stopped producing blocks to be assisted, in order to restore the production -of blocks of these paras.

        -

        Motivation

        -

        Since the initial launch of Polkadot parachains, there has been many incidients causing parachains -to stop producing new blocks (therefore, being bricked) and many occurrences that requires -Polkadot governance to update the parachain head state/wasm. This can be due to many reasons range -from incorrectly registering the initial head state, inability to use sudo key, bad runtime -migration, bad weight configuration, and bugs in the development of the Polkadot SDK.

        -

        Currently, when the para is not unlocked in the paras registrar1, the Root origin is required to -perform such actions, involving the governance process to invoke this origin, which can be very -resource expensive for the teams. The long voting and enactment times also could result significant -damage to the parachain and users.

        -

        Finally, other instances of governance that might enact a call using the Root origin (like the -Polkadot Fellowship), due to the nature of their mission, are not fit to carry these kind of tasks.

        -

        In consequence, the idea of a Unbrick Collective that can provide assistance to para teams when -they brick and further protection against future halts is reasonable enough.

        -

        Stakeholders

        -
          -
        • Parachain teams
        • -
        • Parachain users
        • -
        • OpenGov users
        • -
        • Polkadot Fellowship
        • -
        -

        Explanation

        -

        The Collective

        -

        The Unbrick Collective is defined as an unranked collective of members, not paid by the Polkadot -Treasury. Its main goal is to serve as a point of contact and assistance for enacting the actions -needed to unbrick a para. Such actions are:

        -
          -
        • Updating the Parachain Verification Function (a.k.a. a new WASM) of a para.
        • -
        • Updating the head state of a para.
        • -
        • A combination of the above.
        • -
        -

        In order to ensure these changes are safe enough for the network, actions enacted by the Unbrick -Collective must be whitelisted via similar mechanisms followed by collectives like the Polkadot -Fellowship. This will prevent unintended, not overseen changes on other paras to occur.

        -

        Also, teams might opt-in to delegate handling their para in the registry to the Collective. This -allows to perform similar actions using the paras registrar, allowing for a shorter path to unbrick a -para.

        -

        The Unbrick Process

        -
        flowchart TD
        -    A[Start] 
        -    
        -    A -- Bricked --> C[Request Unbrick «via governance»]
        -    C --> D[unbrick call proposal on WhitelistedUnbrickCaller]
        -    C --> E[whitelist call proposal on the Unbrick governance]
        -    E -- call whitelisted --> F[unbrick call enacted]
        -    D -- unbrick called --> F
        -    F --> Y
        -
        -    A -- Not bricked --> O[Opt-in to the Collective]
        -    O -- Bricked --> P[Collective calls registrar]
        -    P --> Y
        -    
        -    Y[update PVF / head state] -- Unbricked --> Z[End]
        -
        -

        Initially, a para team has two paths to handle a potential unbrick of their para in the case it -stops producing blocks.

        -
          -
        1. Opt-in to the Unbrick Collective: This is done by delegating the handling of the para -in the paras registrar to the Collective. This doesn't require unlocking the para. This way, -the collective is enabled to perform changes in the paras registrar without the need for -whitelisting.
        2. -
        3. Request an Unbrick Process: In case the para hasn't delegated its handling in the paras -registrar, it'll be still possible for the para team to submit a proposal to unbrick the para, -assisted by the Collective. This process is expected to be more expedite (and less expensive) -for a team to perform than submitting a proposal on the Root governance track.
        4. -
        -

        Belonging to the Collective

        -

        The collective will be initially created without members (no seeding). There will be additional -governance proposals to setup the seed members.

        -

        The origins able to modify the members of the collective are:

        -
          -
        • The Fellows track in the Polkadot Fellowship.
        • -
        • Root track in the Relay.
        • -
        • More than two thrids of the existing Unbrick Collective.
        • -
        -

        The members are responsible to verify the technical details of the unbrick requests (i.e. the hash -of the new PVF being set). Therefore, they must have the technical capacity to perform such tasks.

        -

        Suggested requirements to become a member are the following:

        -
          -
        • Rank 3 or above in the Polkadot Fellowship.
        • -
        • Being a CTO or Technical Lead in a para team that has opted-in to delegate the Unbrick Collective -to manage the PVF/head state of the para.
        • -
        -

        Drawbacks

        -

        The ability to modify the Head State and/or the PVF of a para means a possibility to perform -arbitrary modifications of it (i.e. take control the native parachain token or any bridged assets -in the para).

        -

        This could introduce a new attack vectorm, and therefore, such great power needs to be handled -carefully.

        -

        Testing, Security, and Privacy

        -

        The implementation of this RFC will be tested on testnets (Rococo and Westend) first.

        -

        An audit will be required to ensure the implementation doesn't introduce unwanted side effects.

        -

        There are no privacy related concerns.

        -

        Performance, Ergonomics, and Compatibility

        -

        Performance

        -

        This RFC should not introduce any performance impact.

        -

        Ergonomics

        -

        This RFC should improve the experience for new and existing parachain teams, lowering the barrier -to unbrick a stalled para.

        -

        Compatibility

        -

        This RFC is fully compatible with existing interfaces.

        -

        Prior Art and References

        - -

        Unresolved Questions

        -
          -
        • What are the parameters for the WhitelistedUnbrickCaller track?
        • -
        • Any other methods that shall be updated to accept Unbrick origin?
        • -
        • Any other requirements to become a member?
        • -
        • We would like to keep this simple, so no funding support from the Polkadot treasury. But do we -want to compensate the members somehow? i.e. Allow parachain teams to donate to the collective
        • -
        • Do we want to have this collective offer additional technical support to help bricked parachains? -i.e. help debug the code, create the rescue plan, create postmortem report, provide resources on -how to avoid getting bricked
        • -
        - -
        1 -

        The paras registrar refers to a pallet in the Relay, responsible to gather registration info -of the paras, the locked/unlocked state, and the manager info.

        -
        -

        (source)

        Table of Contents

          diff --git a/proposed/0000-rewards.html b/proposed/0000-rewards.html index 6d62218..e50b871 100644 --- a/proposed/0000-rewards.html +++ b/proposed/0000-rewards.html @@ -90,7 +90,7 @@ diff --git a/proposed/0102-offchain-parachain-runtime-upgrades.html b/proposed/0102-offchain-parachain-runtime-upgrades.html index 2ca296a..7c270d0 100644 --- a/proposed/0102-offchain-parachain-runtime-upgrades.html +++ b/proposed/0102-offchain-parachain-runtime-upgrades.html @@ -90,7 +90,7 @@ diff --git a/proposed/0106-xcm-remove-fees-mode.html b/proposed/0106-xcm-remove-fees-mode.html index 1a47afb..ed15ef2 100644 --- a/proposed/0106-xcm-remove-fees-mode.html +++ b/proposed/0106-xcm-remove-fees-mode.html @@ -90,7 +90,7 @@ diff --git a/proposed/0111-pure-proxy-replication.html b/proposed/0111-pure-proxy-replication.html index 7759fef..6188564 100644 --- a/proposed/0111-pure-proxy-replication.html +++ b/proposed/0111-pure-proxy-replication.html @@ -90,7 +90,7 @@ diff --git a/proposed/0112-compress-state-response-message-in-state-sync.html b/proposed/0112-compress-state-response-message-in-state-sync.html index 49867e7..3249953 100644 --- a/proposed/0112-compress-state-response-message-in-state-sync.html +++ b/proposed/0112-compress-state-response-message-in-state-sync.html @@ -90,7 +90,7 @@ @@ -253,7 +253,7 @@ for compression. - @@ -267,7 +267,7 @@ for compression. - diff --git a/stale/0117-unbrick-collective.html b/proposed/0117-unbrick-collective.html similarity index 65% rename from stale/0117-unbrick-collective.html rename to proposed/0117-unbrick-collective.html index 9db91a2..41ce34c 100644 --- a/stale/0117-unbrick-collective.html +++ b/proposed/0117-unbrick-collective.html @@ -90,7 +90,7 @@ @@ -252,34 +252,40 @@ Fellowship. This will prevent unintended, not overseen changes on other paras to

          Also, teams might opt-in to delegate handling their para in the registry to the Collective. This allows to perform similar actions using the paras registrar, allowing for a shorter path to unbrick a para.

          +

          Initially, the unbrick collective has powers similar to a parachains own sudo, but permits more +decentralized control. In the future, Polkadot shall provide functionality like SPREE or JAM that +exceeds sudo permissions, so the unbrick collective cannot modify those state roots or code.

          The Unbrick Process

          flowchart TD
               A[Start] 
          +
          +    A -- Bricked --> C[Request para unlock via Root]
          +    C -- Approved --> Y
          +    C -- Rejected --> A
               
          -    A -- Bricked --> C[Request Unbrick «via governance»]
          -    C --> D[unbrick call proposal on WhitelistedUnbrickCaller]
          -    C --> E[whitelist call proposal on the Unbrick governance]
          +    D[unbrick call proposal on WhitelistedUnbrickCaller]
          +    E[whitelist call proposal on the Unbrick governance]
               E -- call whitelisted --> F[unbrick call enacted]
               D -- unbrick called --> F
               F --> Y
           
               A -- Not bricked --> O[Opt-in to the Collective]
          -    O -- Bricked --> P[Collective calls registrar]
          -    P --> Y
          -    
          +    O -- Bricked --> D
          +    O -- Bricked --> E
          +
               Y[update PVF / head state] -- Unbricked --> Z[End]
           

          Initially, a para team has two paths to handle a potential unbrick of their para in the case it stops producing blocks.

          1. Opt-in to the Unbrick Collective: This is done by delegating the handling of the para -in the paras registrar to the Collective. This doesn't require unlocking the para. This way, -the collective is enabled to perform changes in the paras registrar without the need for -whitelisting.
          2. -
          3. Request an Unbrick Process: In case the para hasn't delegated its handling in the paras -registrar, it'll be still possible for the para team to submit a proposal to unbrick the para, -assisted by the Collective. This process is expected to be more expedite (and less expensive) -for a team to perform than submitting a proposal on the Root governance track.
          4. +in the paras registrar to an origin related to the Collective. This doesn't require unlocking +the para. This way, the collective is enabled to perform changes in the paras module, after +the Unbrick Process proceeds. +
          5. Request a Para Unlock: In case the para hasn't delegated its handling in the paras +registrar, it'll be still possible for the para team to submit a proposal to unlock the para, +which can be assisted by the Collective. However, this involves submitting a proposal to the Root +governance origin.

          Belonging to the Collective

          The collective will be initially created without members (no seeding). There will be additional @@ -328,7 +334,12 @@ to unbrick a stalled para.

        • Any other methods that shall be updated to accept Unbrick origin?
        • Any other requirements to become a member?
        • We would like to keep this simple, so no funding support from the Polkadot treasury. But do we -want to compensate the members somehow? i.e. Allow parachain teams to donate to the collective
        • +want to compensate the members somehow? i.e. Allow parachain teams to donate to the collective. +
        • We hope SPREE/JAM would be carefully audited for miss-use risks before being
          +provided to parachain teams, but could the unbrick collective have an elections
          +that warranted trust beyond sudo powers?
        • +
        • An auditing framework/collective makes sense parachain code upgrades, but
          +could also strengthen the unbrick collective.
        • Do we want to have this collective offer additional technical support to help bricked parachains? i.e. help debug the code, create the rescue plan, create postmortem report, provide resources on how to avoid getting bricked
        • @@ -344,11 +355,11 @@ of the paras, the locked/unlocked state, and the manager info.