mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-30 00:17:56 +00:00
Introduce Pallet paged-list (#14120)
* Prototype StoragePagedList Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add drain Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove stale docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add fuzzer tests Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Review Co-authored-by: Koute <koute@users.noreply.github.com> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * fmt Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Docs and clippy Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Sum docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Cleanup Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Undo WIP Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add pallet-paged-list Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Move code to pallet Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Move fuzzer Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Cleanup Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * fmt Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Rename Appendix -> Appender Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Rename clear -> delete Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Feature gate testing stuff Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Docs review Co-authored-by: Koute <koute@users.noreply.github.com> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Cleanup Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * doc review Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Review renames Co-authored-by: Koute <koute@users.noreply.github.com> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Add docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix fuzzer Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Docs + examples Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove hasher Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove empty Event and Call Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove MaxPages Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix docs Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Test eager page removal Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Cleanup Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update frame/paged-list/src/paged_list.rs Co-authored-by: Koute <koute@users.noreply.github.com> * Fix docs Co-authored-by: Koute <koute@users.noreply.github.com> Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Remove as_*_vec Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update versions Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Rename ValuesPerPage -> ValuesPerNewPage Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Update lockfile Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> * Fix mock Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> --------- Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io> Co-authored-by: Koute <koute@users.noreply.github.com> Co-authored-by: parity-processbot <>
This commit is contained in:
committed by
GitHub
parent
817c97d65d
commit
daa9ecf77d
Generated
+55
-2
@@ -2145,13 +2145,39 @@ version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
|
||||
|
||||
[[package]]
|
||||
name = "docify"
|
||||
version = "0.1.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af1b04e6ef3d21119d3eb7b032bca17f99fe041e9c072f30f32cc0e1a2b1f3c4"
|
||||
dependencies = [
|
||||
"docify_macros 0.1.16",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "docify"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6491709f76fb7ceb951244daf624d480198b427556084391d6e3c33d3ae74b9"
|
||||
dependencies = [
|
||||
"docify_macros",
|
||||
"docify_macros 0.2.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "docify_macros"
|
||||
version = "0.1.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b5610df7f2acf89a1bb5d1a66ae56b1c7fcdcfe3948856fb3ace3f644d70eb7"
|
||||
dependencies = [
|
||||
"common-path",
|
||||
"derive-syn-parse",
|
||||
"lazy_static",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"regex",
|
||||
"syn 2.0.18",
|
||||
"termcolor",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6654,7 +6680,7 @@ dependencies = [
|
||||
name = "pallet-fast-unstake"
|
||||
version = "4.0.0-dev"
|
||||
dependencies = [
|
||||
"docify",
|
||||
"docify 0.2.0",
|
||||
"frame-benchmarking",
|
||||
"frame-election-provider-support",
|
||||
"frame-support",
|
||||
@@ -7114,6 +7140,33 @@ dependencies = [
|
||||
"sp-std",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pallet-paged-list"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"docify 0.1.16",
|
||||
"frame-benchmarking",
|
||||
"frame-support",
|
||||
"frame-system",
|
||||
"parity-scale-codec",
|
||||
"scale-info",
|
||||
"sp-core",
|
||||
"sp-io",
|
||||
"sp-runtime",
|
||||
"sp-std",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pallet-paged-list-fuzzer"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"arbitrary",
|
||||
"frame-support",
|
||||
"honggfuzz",
|
||||
"pallet-paged-list",
|
||||
"sp-io",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pallet-preimage"
|
||||
version = "4.0.0-dev"
|
||||
|
||||
@@ -177,6 +177,8 @@ members = [
|
||||
"frame/nomination-pools/benchmarking",
|
||||
"frame/nomination-pools/test-staking",
|
||||
"frame/nomination-pools/runtime-api",
|
||||
"frame/paged-list",
|
||||
"frame/paged-list/fuzzer",
|
||||
"frame/insecure-randomness-collective-flip",
|
||||
"frame/ranked-collective",
|
||||
"frame/recovery",
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
[package]
|
||||
name = "pallet-paged-list"
|
||||
version = "0.1.0"
|
||||
description = "FRAME pallet that provides a paged list data structure."
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
homepage = "https://substrate.io"
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/paritytech/substrate"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[dependencies]
|
||||
codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] }
|
||||
docify = "0.1.13"
|
||||
scale-info = { version = "2.0.0", default-features = false, features = ["derive"] }
|
||||
|
||||
frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" }
|
||||
frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" }
|
||||
frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" }
|
||||
|
||||
sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" }
|
||||
sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" }
|
||||
sp-core = { version = "21.0.0", path = "../../primitives/core", default-features = false }
|
||||
sp-io = { version = "23.0.0", path = "../../primitives/io", default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
|
||||
std = ["codec/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std"]
|
||||
|
||||
runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks"]
|
||||
|
||||
try-runtime = ["frame-support/try-runtime"]
|
||||
@@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "pallet-paged-list-fuzzer"
|
||||
version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://substrate.io"
|
||||
repository = "https://github.com/paritytech/substrate/"
|
||||
description = "Fuzz storage types of pallet-paged-list"
|
||||
publish = false
|
||||
|
||||
[[bin]]
|
||||
name = "pallet-paged-list"
|
||||
path = "src/paged_list.rs"
|
||||
|
||||
[dependencies]
|
||||
arbitrary = "1.3.0"
|
||||
honggfuzz = "0.5.49"
|
||||
|
||||
frame-support = { version = "4.0.0-dev", default-features = false, features = [ "std" ], path = "../../support" }
|
||||
sp-io = { path = "../../../primitives/io", default-features = false, features = [ "std" ] }
|
||||
pallet-paged-list = { path = "../", default-features = false, features = [ "std" ] }
|
||||
@@ -0,0 +1,103 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! # Running
|
||||
//! Running this fuzzer can be done with `cargo hfuzz run pallet-paged-list`. `honggfuzz` CLI
|
||||
//! options can be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads.
|
||||
//!
|
||||
//! # Debugging a panic
|
||||
//! Once a panic is found, it can be debugged with
|
||||
//! `cargo hfuzz run-debug pallet-paged-list hfuzz_workspace/pallet-paged-list/*.fuzz`.
|
||||
//!
|
||||
//! # More information
|
||||
//! More information about `honggfuzz` can be found
|
||||
//! [here](https://docs.rs/honggfuzz/).
|
||||
|
||||
use arbitrary::Arbitrary;
|
||||
use honggfuzz::fuzz;
|
||||
|
||||
use frame_support::{storage::StorageList, StorageNoopGuard};
|
||||
use pallet_paged_list::mock::{PagedList as List, *};
|
||||
use sp_io::TestExternalities;
|
||||
type Meta = MetaOf<Test, ()>;
|
||||
|
||||
fn main() {
|
||||
loop {
|
||||
fuzz!(|data: (Vec<Op>, u8)| {
|
||||
drain_append_work(data.0, data.1);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends and drains random number of elements in random order and checks storage invariants.
|
||||
///
|
||||
/// It also changes the maximal number of elements per page dynamically, hence the `page_size`.
|
||||
fn drain_append_work(ops: Vec<Op>, page_size: u8) {
|
||||
if page_size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
TestExternalities::default().execute_with(|| {
|
||||
ValuesPerNewPage::set(&page_size.into());
|
||||
let _g = StorageNoopGuard::default();
|
||||
let mut total: i64 = 0;
|
||||
|
||||
for op in ops.into_iter() {
|
||||
total += op.exec();
|
||||
|
||||
assert!(total >= 0);
|
||||
assert_eq!(List::iter().count(), total as usize);
|
||||
|
||||
// We have the assumption that the queue removes the metadata when empty.
|
||||
if total == 0 {
|
||||
assert_eq!(List::drain().count(), 0);
|
||||
assert_eq!(Meta::from_storage().unwrap_or_default(), Default::default());
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(List::drain().count(), total as usize);
|
||||
// `StorageNoopGuard` checks that there is no storage leaked.
|
||||
});
|
||||
}
|
||||
|
||||
enum Op {
|
||||
Append(Vec<u32>),
|
||||
Drain(u8),
|
||||
}
|
||||
|
||||
impl Arbitrary<'_> for Op {
|
||||
fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
|
||||
if u.arbitrary::<bool>()? {
|
||||
Ok(Op::Append(Vec::<u32>::arbitrary(u)?))
|
||||
} else {
|
||||
Ok(Op::Drain(u.arbitrary::<u8>()?))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Op {
|
||||
pub fn exec(self) -> i64 {
|
||||
match self {
|
||||
Op::Append(v) => {
|
||||
let l = v.len();
|
||||
List::append_many(v);
|
||||
l as i64
|
||||
},
|
||||
Op::Drain(v) => -(List::drain().take(v as usize).count() as i64),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! > Made with *Substrate*, for *DotSama*.
|
||||
//!
|
||||
//! [![github]](https://github.com/paritytech/substrate/frame/fast-unstake) -
|
||||
//! [![polkadot]](https://polkadot.network)
|
||||
//!
|
||||
//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white
|
||||
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
|
||||
//!
|
||||
//! # Paged List Pallet
|
||||
//!
|
||||
//! A thin wrapper pallet around a [`paged_list::StoragePagedList`]. It provides an API for a single
|
||||
//! paginated list. It can be instantiated multiple times to provide multiple lists.
|
||||
//!
|
||||
//! ## Overview
|
||||
//!
|
||||
//! The pallet is quite unique since it does not expose any `Call`s, `Error`s or `Event`s. All
|
||||
//! interaction goes through the implemented [`StorageList`][frame_support::storage::StorageList]
|
||||
//! trait.
|
||||
//!
|
||||
//! A fuzzer for testing is provided in crate `pallet-paged-list-fuzzer`.
|
||||
//!
|
||||
//! ## Examples
|
||||
//!
|
||||
//! 1. **Appending** some data to the list can happen either by [`Pallet::append_one`]:
|
||||
#![doc = docify::embed!("frame/paged-list/src/tests.rs", append_one_works)]
|
||||
//! 2. or by [`Pallet::append_many`]. This should always be preferred to repeated calls to
|
||||
//! [`Pallet::append_one`]:
|
||||
#![doc = docify::embed!("frame/paged-list/src/tests.rs", append_many_works)]
|
||||
//! 3. If you want to append many values (ie. in a loop), then best use the [`Pallet::appender`]:
|
||||
#![doc = docify::embed!("frame/paged-list/src/tests.rs", appender_works)]
|
||||
//! 4. **Iterating** over the list can be done with [`Pallet::iter`]. It uses the standard
|
||||
//! `Iterator` trait:
|
||||
#![doc = docify::embed!("frame/paged-list/src/tests.rs", iter_works)]
|
||||
//! 5. **Draining** elements happens through the [`Pallet::drain`] iterator. Note that even
|
||||
//! *peeking* a value will already remove it.
|
||||
#![doc = docify::embed!("frame/paged-list/src/tests.rs", drain_works)]
|
||||
//!
|
||||
//! ## Pallet API
|
||||
//!
|
||||
//! None. Only things to consider is the [`Config`] traits.
|
||||
//!
|
||||
//! ## Low Level / Implementation Details
|
||||
//!
|
||||
//! Implementation details are documented in [`paged_list::StoragePagedList`].
|
||||
//! All storage entries are prefixed with a unique prefix that is generated by [`ListPrefix`].
|
||||
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
pub use pallet::*;
|
||||
|
||||
pub mod mock;
|
||||
mod paged_list;
|
||||
mod tests;
|
||||
|
||||
use codec::FullCodec;
|
||||
use frame_support::{
|
||||
pallet_prelude::StorageList,
|
||||
traits::{PalletInfoAccess, StorageInstance},
|
||||
};
|
||||
pub use paged_list::StoragePagedList;
|
||||
|
||||
#[frame_support::pallet]
|
||||
pub mod pallet {
|
||||
use super::*;
|
||||
use frame_support::pallet_prelude::*;
|
||||
|
||||
#[pallet::pallet]
|
||||
pub struct Pallet<T, I = ()>(_);
|
||||
|
||||
#[pallet::config]
|
||||
pub trait Config<I: 'static = ()>: frame_system::Config {
|
||||
/// The value type that can be stored in the list.
|
||||
type Value: FullCodec;
|
||||
|
||||
/// The number of values that can be put into newly created pages.
|
||||
///
|
||||
/// Note that this does not retroactively affect already created pages. This value can be
|
||||
/// changed at any time without requiring a runtime migration.
|
||||
#[pallet::constant]
|
||||
type ValuesPerNewPage: Get<u32>;
|
||||
}
|
||||
|
||||
/// A storage paged list akin to what the FRAME macros would generate.
|
||||
// Note that FRAME does natively support paged lists in storage.
|
||||
pub type List<T, I> = StoragePagedList<
|
||||
ListPrefix<T, I>,
|
||||
<T as Config<I>>::Value,
|
||||
<T as Config<I>>::ValuesPerNewPage,
|
||||
>;
|
||||
}
|
||||
|
||||
// This exposes the list functionality to other pallets.
|
||||
impl<T: Config<I>, I: 'static> StorageList<T::Value> for Pallet<T, I> {
|
||||
type Iterator = <List<T, I> as StorageList<T::Value>>::Iterator;
|
||||
type Appender = <List<T, I> as StorageList<T::Value>>::Appender;
|
||||
|
||||
fn iter() -> Self::Iterator {
|
||||
List::<T, I>::iter()
|
||||
}
|
||||
|
||||
fn drain() -> Self::Iterator {
|
||||
List::<T, I>::drain()
|
||||
}
|
||||
|
||||
fn appender() -> Self::Appender {
|
||||
List::<T, I>::appender()
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates a unique storage prefix for each instance of the pallet.
|
||||
pub struct ListPrefix<T, I>(core::marker::PhantomData<(T, I)>);
|
||||
|
||||
impl<T: Config<I>, I: 'static> StorageInstance for ListPrefix<T, I> {
|
||||
fn pallet_prefix() -> &'static str {
|
||||
crate::Pallet::<T, I>::name()
|
||||
}
|
||||
|
||||
const STORAGE_PREFIX: &'static str = "paged_list";
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Helpers for tests.
|
||||
|
||||
#![cfg(feature = "std")]
|
||||
|
||||
use crate::{paged_list::StoragePagedListMeta, Config, ListPrefix};
|
||||
use frame_support::traits::{ConstU16, ConstU64};
|
||||
use sp_core::H256;
|
||||
use sp_runtime::{
|
||||
traits::{BlakeTwo256, IdentityLookup},
|
||||
BuildStorage,
|
||||
};
|
||||
|
||||
type Block = frame_system::mocking::MockBlock<Test>;
|
||||
|
||||
// Configure a mock runtime to test the pallet.
|
||||
frame_support::construct_runtime!(
|
||||
pub enum Test {
|
||||
System: frame_system,
|
||||
PagedList: crate,
|
||||
PagedList2: crate::<Instance2>,
|
||||
}
|
||||
);
|
||||
|
||||
impl frame_system::Config for Test {
|
||||
type BaseCallFilter = frame_support::traits::Everything;
|
||||
type BlockWeights = ();
|
||||
type BlockLength = ();
|
||||
type DbWeight = ();
|
||||
type RuntimeOrigin = RuntimeOrigin;
|
||||
type RuntimeCall = RuntimeCall;
|
||||
type Nonce = u64;
|
||||
type Hash = H256;
|
||||
type Hashing = BlakeTwo256;
|
||||
type AccountId = u64;
|
||||
type Lookup = IdentityLookup<Self::AccountId>;
|
||||
type Block = Block;
|
||||
type RuntimeEvent = RuntimeEvent;
|
||||
type BlockHashCount = ConstU64<250>;
|
||||
type Version = ();
|
||||
type PalletInfo = PalletInfo;
|
||||
type AccountData = ();
|
||||
type OnNewAccount = ();
|
||||
type OnKilledAccount = ();
|
||||
type SystemWeightInfo = ();
|
||||
type SS58Prefix = ConstU16<42>;
|
||||
type OnSetCode = ();
|
||||
type MaxConsumers = frame_support::traits::ConstU32<16>;
|
||||
}
|
||||
|
||||
frame_support::parameter_types! {
|
||||
pub storage ValuesPerNewPage: u32 = 5;
|
||||
pub const MaxPages: Option<u32> = Some(20);
|
||||
}
|
||||
|
||||
impl crate::Config for Test {
|
||||
type Value = u32;
|
||||
type ValuesPerNewPage = ValuesPerNewPage;
|
||||
}
|
||||
|
||||
impl crate::Config<crate::Instance2> for Test {
|
||||
type Value = u32;
|
||||
type ValuesPerNewPage = ValuesPerNewPage;
|
||||
}
|
||||
|
||||
pub type MetaOf<T, I> =
|
||||
StoragePagedListMeta<ListPrefix<T, I>, <T as Config>::Value, <T as Config>::ValuesPerNewPage>;
|
||||
|
||||
/// Build genesis storage according to the mock runtime.
|
||||
pub fn new_test_ext() -> sp_io::TestExternalities {
|
||||
frame_system::GenesisConfig::<Test>::default().build_storage().unwrap().into()
|
||||
}
|
||||
|
||||
/// Run this closure in test externalities.
|
||||
pub fn test_closure<R>(f: impl FnOnce() -> R) -> R {
|
||||
let mut ext = new_test_ext();
|
||||
ext.execute_with(f)
|
||||
}
|
||||
@@ -0,0 +1,581 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Paged storage list.
|
||||
|
||||
// links are better than no links - even when they refer to private stuff.
|
||||
#![allow(rustdoc::private_intra_doc_links)]
|
||||
#![deny(rustdoc::broken_intra_doc_links)]
|
||||
#![deny(missing_docs)]
|
||||
#![deny(unsafe_code)]
|
||||
|
||||
use codec::{Decode, Encode, EncodeLike, FullCodec};
|
||||
use core::marker::PhantomData;
|
||||
use frame_support::{
|
||||
defensive,
|
||||
storage::StoragePrefixedContainer,
|
||||
traits::{Get, StorageInstance},
|
||||
CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound,
|
||||
};
|
||||
use sp_runtime::traits::Saturating;
|
||||
use sp_std::prelude::*;
|
||||
|
||||
pub type PageIndex = u32;
|
||||
pub type ValueIndex = u32;
|
||||
|
||||
/// A paginated storage list.
|
||||
///
|
||||
/// # Motivation
|
||||
///
|
||||
/// This type replaces `StorageValue<Vec<V>>` in situations where only iteration and appending is
|
||||
/// needed. There are a few places where this is the case. A paginated structure reduces the memory
|
||||
/// usage when a storage transactions needs to be rolled back. The main motivation is therefore a
|
||||
/// reduction of runtime memory on storage transaction rollback. Should be configured such that the
|
||||
/// size of a page is about 64KiB. This can only be ensured when `V` implements `MaxEncodedLen`.
|
||||
///
|
||||
/// # Implementation
|
||||
///
|
||||
/// The metadata of this struct is stored in [`StoragePagedListMeta`]. The data is stored in
|
||||
/// [`Page`]s.
|
||||
///
|
||||
/// Each [`Page`] holds at most `ValuesPerNewPage` values in its `values` vector. The last page is
|
||||
/// the only one that could have less than `ValuesPerNewPage` values.
|
||||
/// **Iteration** happens by starting
|
||||
/// at [`first_page`][StoragePagedListMeta::first_page]/
|
||||
/// [`first_value_offset`][StoragePagedListMeta::first_value_offset] and incrementing these indices
|
||||
/// as long as there are elements in the page and there are pages in storage. All elements of a page
|
||||
/// are loaded once a page is read from storage. Iteration then happens on the cached elements. This
|
||||
/// reduces the number of storage `read` calls on the overlay. **Appending** to the list happens by
|
||||
/// appending to the last page by utilizing [`sp_io::storage::append`]. It allows to directly extend
|
||||
/// the elements of `values` vector of the page without loading the whole vector from storage. A new
|
||||
/// page is instantiated once [`Page::next`] overflows `ValuesPerNewPage`. Its vector will also be
|
||||
/// created through [`sp_io::storage::append`]. **Draining** advances the internal indices identical
|
||||
/// to Iteration. It additionally persists the increments to storage and thereby 'drains' elements.
|
||||
/// Completely drained pages are deleted from storage.
|
||||
///
|
||||
/// # Further Observations
|
||||
///
|
||||
/// - The encoded layout of a page is exactly its [`Page::values`]. The [`Page::next`] offset is
|
||||
/// stored in the [`StoragePagedListMeta`] instead. There is no particular reason for this,
|
||||
/// besides having all management state handy in one location.
|
||||
/// - The PoV complexity of iterating compared to a `StorageValue<Vec<V>>` is improved for
|
||||
/// "shortish" iterations and worse for total iteration. The append complexity is identical in the
|
||||
/// asymptotic case when using an `Appender`, and worse in all. For example when appending just
|
||||
/// one value.
|
||||
/// - It does incur a read overhead on the host side as compared to a `StorageValue<Vec<V>>`.
|
||||
pub struct StoragePagedList<Prefix, Value, ValuesPerNewPage> {
|
||||
_phantom: PhantomData<(Prefix, Value, ValuesPerNewPage)>,
|
||||
}
|
||||
|
||||
/// The state of a [`StoragePagedList`].
|
||||
///
|
||||
/// This struct doubles as [`frame_support::storage::StorageList::Appender`].
|
||||
#[derive(
|
||||
Encode, Decode, CloneNoBound, PartialEqNoBound, EqNoBound, DebugNoBound, DefaultNoBound,
|
||||
)]
|
||||
// todo ignore scale bounds
|
||||
pub struct StoragePagedListMeta<Prefix, Value, ValuesPerNewPage> {
|
||||
/// The first page that could contain a value.
|
||||
///
|
||||
/// Can be >0 when pages were deleted.
|
||||
pub first_page: PageIndex,
|
||||
/// The first index inside `first_page` that could contain a value.
|
||||
///
|
||||
/// Can be >0 when values were deleted.
|
||||
pub first_value_offset: ValueIndex,
|
||||
|
||||
/// The last page that could contain data.
|
||||
///
|
||||
/// Appending starts at this page index.
|
||||
pub last_page: PageIndex,
|
||||
/// The last value inside `last_page` that could contain a value.
|
||||
///
|
||||
/// Appending starts at this index. If the page does not hold a value at this index, then the
|
||||
/// whole list is empty. The only case where this can happen is when both are `0`.
|
||||
pub last_page_len: ValueIndex,
|
||||
|
||||
_phantom: PhantomData<(Prefix, Value, ValuesPerNewPage)>,
|
||||
}
|
||||
|
||||
impl<Prefix, Value, ValuesPerNewPage> frame_support::storage::StorageAppender<Value>
|
||||
for StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>
|
||||
where
|
||||
Prefix: StorageInstance,
|
||||
Value: FullCodec,
|
||||
ValuesPerNewPage: Get<u32>,
|
||||
{
|
||||
fn append<EncodeLikeValue>(&mut self, item: EncodeLikeValue)
|
||||
where
|
||||
EncodeLikeValue: EncodeLike<Value>,
|
||||
{
|
||||
self.append_one(item);
|
||||
}
|
||||
}
|
||||
|
||||
impl<Prefix, Value, ValuesPerNewPage> StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>
|
||||
where
|
||||
Prefix: StorageInstance,
|
||||
Value: FullCodec,
|
||||
ValuesPerNewPage: Get<u32>,
|
||||
{
|
||||
pub fn from_storage() -> Option<Self> {
|
||||
let key = Self::key();
|
||||
|
||||
sp_io::storage::get(&key).and_then(|raw| Self::decode(&mut &raw[..]).ok())
|
||||
}
|
||||
|
||||
pub fn key() -> Vec<u8> {
|
||||
meta_key::<Prefix>()
|
||||
}
|
||||
|
||||
pub fn append_one<EncodeLikeValue>(&mut self, item: EncodeLikeValue)
|
||||
where
|
||||
EncodeLikeValue: EncodeLike<Value>,
|
||||
{
|
||||
// Note: we use >= here in case someone decreased it in a runtime upgrade.
|
||||
if self.last_page_len >= ValuesPerNewPage::get() {
|
||||
self.last_page.saturating_inc();
|
||||
self.last_page_len = 0;
|
||||
}
|
||||
let key = page_key::<Prefix>(self.last_page);
|
||||
self.last_page_len.saturating_inc();
|
||||
sp_io::storage::append(&key, item.encode());
|
||||
self.store();
|
||||
}
|
||||
|
||||
pub fn store(&self) {
|
||||
let key = Self::key();
|
||||
self.using_encoded(|enc| sp_io::storage::set(&key, enc));
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
*self = Default::default();
|
||||
Self::delete();
|
||||
}
|
||||
|
||||
pub fn delete() {
|
||||
sp_io::storage::clear(&Self::key());
|
||||
}
|
||||
}
|
||||
|
||||
/// A page that was decoded from storage and caches its values.
|
||||
pub struct Page<V> {
|
||||
/// The index of the page.
|
||||
index: PageIndex,
|
||||
/// The remaining values of the page, to be drained by [`Page::next`].
|
||||
values: sp_std::iter::Skip<sp_std::vec::IntoIter<V>>,
|
||||
}
|
||||
|
||||
impl<V: FullCodec> Page<V> {
|
||||
/// Read the page with `index` from storage and assume the first value at `value_index`.
|
||||
pub fn from_storage<Prefix: StorageInstance>(
|
||||
index: PageIndex,
|
||||
value_index: ValueIndex,
|
||||
) -> Option<Self> {
|
||||
let key = page_key::<Prefix>(index);
|
||||
let values = sp_io::storage::get(&key)
|
||||
.and_then(|raw| sp_std::vec::Vec::<V>::decode(&mut &raw[..]).ok())?;
|
||||
if values.is_empty() {
|
||||
// Dont create empty pages.
|
||||
return None
|
||||
}
|
||||
let values = values.into_iter().skip(value_index as usize);
|
||||
|
||||
Some(Self { index, values })
|
||||
}
|
||||
|
||||
/// Whether no more values can be read from this page.
|
||||
pub fn is_eof(&self) -> bool {
|
||||
self.values.len() == 0
|
||||
}
|
||||
|
||||
/// Delete this page from storage.
|
||||
pub fn delete<Prefix: StorageInstance>(&self) {
|
||||
delete_page::<Prefix>(self.index);
|
||||
}
|
||||
}
|
||||
|
||||
/// Delete a page with `index` from storage.
|
||||
// Does not live under `Page` since it does not require the `Value` generic.
|
||||
pub(crate) fn delete_page<Prefix: StorageInstance>(index: PageIndex) {
|
||||
let key = page_key::<Prefix>(index);
|
||||
sp_io::storage::clear(&key);
|
||||
}
|
||||
|
||||
/// Storage key of a page with `index`.
|
||||
// Does not live under `Page` since it does not require the `Value` generic.
|
||||
pub(crate) fn page_key<Prefix: StorageInstance>(index: PageIndex) -> Vec<u8> {
|
||||
(StoragePagedListPrefix::<Prefix>::final_prefix(), b"page", index).encode()
|
||||
}
|
||||
|
||||
pub(crate) fn meta_key<Prefix: StorageInstance>() -> Vec<u8> {
|
||||
(StoragePagedListPrefix::<Prefix>::final_prefix(), b"meta").encode()
|
||||
}
|
||||
|
||||
impl<V> Iterator for Page<V> {
|
||||
type Item = V;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.values.next()
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates over values of a [`StoragePagedList`].
|
||||
///
|
||||
/// Can optionally drain the iterated values.
|
||||
pub struct StoragePagedListIterator<Prefix, Value, ValuesPerNewPage> {
|
||||
// Design: we put the Page into the iterator to have fewer storage look-ups. Yes, these
|
||||
// look-ups would be cached anyway, but bugging the overlay on each `.next` call still seems
|
||||
// like a poor trade-off than caching it in the iterator directly. Iterating and modifying is
|
||||
// not allowed at the same time anyway, just like with maps. Note: if Page is empty then
|
||||
// the iterator did not find any data upon setup or ran out of pages.
|
||||
page: Option<Page<Value>>,
|
||||
drain: bool,
|
||||
meta: StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>,
|
||||
}
|
||||
|
||||
impl<Prefix, Value, ValuesPerNewPage> StoragePagedListIterator<Prefix, Value, ValuesPerNewPage>
|
||||
where
|
||||
Prefix: StorageInstance,
|
||||
Value: FullCodec,
|
||||
ValuesPerNewPage: Get<u32>,
|
||||
{
|
||||
/// Read self from the storage.
|
||||
pub fn from_meta(
|
||||
meta: StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>,
|
||||
drain: bool,
|
||||
) -> Self {
|
||||
let page = Page::<Value>::from_storage::<Prefix>(meta.first_page, meta.first_value_offset);
|
||||
Self { page, drain, meta }
|
||||
}
|
||||
}
|
||||
|
||||
impl<Prefix, Value, ValuesPerNewPage> Iterator
|
||||
for StoragePagedListIterator<Prefix, Value, ValuesPerNewPage>
|
||||
where
|
||||
Prefix: StorageInstance,
|
||||
Value: FullCodec,
|
||||
ValuesPerNewPage: Get<u32>,
|
||||
{
|
||||
type Item = Value;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let page = self.page.as_mut()?;
|
||||
let value = match page.next() {
|
||||
Some(value) => value,
|
||||
None => {
|
||||
defensive!("There are no empty pages in storage; nuking the list");
|
||||
self.meta.reset();
|
||||
self.page = None;
|
||||
return None
|
||||
},
|
||||
};
|
||||
|
||||
if page.is_eof() {
|
||||
if self.drain {
|
||||
page.delete::<Prefix>();
|
||||
self.meta.first_value_offset = 0;
|
||||
self.meta.first_page.saturating_inc();
|
||||
}
|
||||
|
||||
debug_assert!(!self.drain || self.meta.first_page == page.index + 1);
|
||||
self.page = Page::from_storage::<Prefix>(page.index.saturating_add(1), 0);
|
||||
if self.drain {
|
||||
if self.page.is_none() {
|
||||
self.meta.reset();
|
||||
} else {
|
||||
self.meta.store();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if self.drain {
|
||||
self.meta.first_value_offset.saturating_inc();
|
||||
self.meta.store();
|
||||
}
|
||||
}
|
||||
Some(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Prefix, Value, ValuesPerNewPage> frame_support::storage::StorageList<Value>
|
||||
for StoragePagedList<Prefix, Value, ValuesPerNewPage>
|
||||
where
|
||||
Prefix: StorageInstance,
|
||||
Value: FullCodec,
|
||||
ValuesPerNewPage: Get<u32>,
|
||||
{
|
||||
type Iterator = StoragePagedListIterator<Prefix, Value, ValuesPerNewPage>;
|
||||
type Appender = StoragePagedListMeta<Prefix, Value, ValuesPerNewPage>;
|
||||
|
||||
fn iter() -> Self::Iterator {
|
||||
StoragePagedListIterator::from_meta(Self::read_meta(), false)
|
||||
}
|
||||
|
||||
fn drain() -> Self::Iterator {
|
||||
StoragePagedListIterator::from_meta(Self::read_meta(), true)
|
||||
}
|
||||
|
||||
fn appender() -> Self::Appender {
|
||||
Self::appender()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Prefix, Value, ValuesPerNewPage> StoragePagedList<Prefix, Value, ValuesPerNewPage>
|
||||
where
|
||||
Prefix: StorageInstance,
|
||||
Value: FullCodec,
|
||||
ValuesPerNewPage: Get<u32>,
|
||||
{
|
||||
fn read_meta() -> StoragePagedListMeta<Prefix, Value, ValuesPerNewPage> {
|
||||
// Use default here to not require a setup migration.
|
||||
StoragePagedListMeta::from_storage().unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Provides a fast append iterator.
|
||||
///
|
||||
/// The list should not be modified while appending. Also don't call it recursively.
|
||||
fn appender() -> StoragePagedListMeta<Prefix, Value, ValuesPerNewPage> {
|
||||
Self::read_meta()
|
||||
}
|
||||
|
||||
/// Return the elements of the list.
|
||||
#[cfg(test)]
|
||||
fn as_vec() -> Vec<Value> {
|
||||
<Self as frame_support::storage::StorageList<_>>::iter().collect()
|
||||
}
|
||||
|
||||
/// Return and remove the elements of the list.
|
||||
#[cfg(test)]
|
||||
fn as_drained_vec() -> Vec<Value> {
|
||||
<Self as frame_support::storage::StorageList<_>>::drain().collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides the final prefix for a [`StoragePagedList`].
|
||||
///
|
||||
/// It solely exists so that when re-using it from the iterator and meta struct, none of the un-used
|
||||
/// generics bleed through. Otherwise when only having the `StoragePrefixedContainer` implementation
|
||||
/// on the list directly, the iterator and metadata need to muster *all* generics, even the ones
|
||||
/// that are completely useless for prefix calculation.
|
||||
struct StoragePagedListPrefix<Prefix>(PhantomData<Prefix>);
|
||||
|
||||
impl<Prefix> frame_support::storage::StoragePrefixedContainer for StoragePagedListPrefix<Prefix>
|
||||
where
|
||||
Prefix: StorageInstance,
|
||||
{
|
||||
fn module_prefix() -> &'static [u8] {
|
||||
Prefix::pallet_prefix().as_bytes()
|
||||
}
|
||||
|
||||
fn storage_prefix() -> &'static [u8] {
|
||||
Prefix::STORAGE_PREFIX.as_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Prefix, Value, ValuesPerNewPage> frame_support::storage::StoragePrefixedContainer
|
||||
for StoragePagedList<Prefix, Value, ValuesPerNewPage>
|
||||
where
|
||||
Prefix: StorageInstance,
|
||||
Value: FullCodec,
|
||||
ValuesPerNewPage: Get<u32>,
|
||||
{
|
||||
fn module_prefix() -> &'static [u8] {
|
||||
StoragePagedListPrefix::<Prefix>::module_prefix()
|
||||
}
|
||||
|
||||
fn storage_prefix() -> &'static [u8] {
|
||||
StoragePagedListPrefix::<Prefix>::storage_prefix()
|
||||
}
|
||||
}
|
||||
|
||||
/// Prelude for (doc)tests.
|
||||
#[cfg(feature = "std")]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) mod mock {
|
||||
pub use super::*;
|
||||
pub use frame_support::{
|
||||
metadata_ir::{StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR},
|
||||
parameter_types,
|
||||
storage::{types::ValueQuery, StorageList as _},
|
||||
StorageNoopGuard,
|
||||
};
|
||||
pub use sp_io::{hashing::twox_128, TestExternalities};
|
||||
|
||||
parameter_types! {
|
||||
pub const ValuesPerNewPage: u32 = 5;
|
||||
pub const MaxPages: Option<u32> = Some(20);
|
||||
}
|
||||
|
||||
pub struct Prefix;
|
||||
impl StorageInstance for Prefix {
|
||||
fn pallet_prefix() -> &'static str {
|
||||
"test"
|
||||
}
|
||||
const STORAGE_PREFIX: &'static str = "foo";
|
||||
}
|
||||
|
||||
pub type List = StoragePagedList<Prefix, u32, ValuesPerNewPage>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::mock::*;
|
||||
|
||||
#[test]
|
||||
fn append_works() {
|
||||
TestExternalities::default().execute_with(|| {
|
||||
List::append_many(0..1000);
|
||||
assert_eq!(List::as_vec(), (0..1000).collect::<Vec<_>>());
|
||||
});
|
||||
}
|
||||
|
||||
/// Draining all works.
|
||||
#[test]
|
||||
fn simple_drain_works() {
|
||||
TestExternalities::default().execute_with(|| {
|
||||
let _g = StorageNoopGuard::default(); // All in all a No-Op
|
||||
List::append_many(0..1000);
|
||||
|
||||
assert_eq!(List::as_drained_vec(), (0..1000).collect::<Vec<_>>());
|
||||
|
||||
assert_eq!(List::read_meta(), Default::default());
|
||||
|
||||
// all gone
|
||||
assert_eq!(List::as_vec(), Vec::<u32>::new());
|
||||
// Need to delete the metadata manually.
|
||||
StoragePagedListMeta::<Prefix, u32, ValuesPerNewPage>::delete();
|
||||
});
|
||||
}
|
||||
|
||||
/// Drain half of the elements and iterator the rest.
|
||||
#[test]
|
||||
fn partial_drain_works() {
|
||||
TestExternalities::default().execute_with(|| {
|
||||
List::append_many(0..100);
|
||||
|
||||
let vals = List::drain().take(50).collect::<Vec<_>>();
|
||||
assert_eq!(vals, (0..50).collect::<Vec<_>>());
|
||||
|
||||
let meta = List::read_meta();
|
||||
// Will switch over to `10/0`, but will in the next call.
|
||||
assert_eq!((meta.first_page, meta.first_value_offset), (10, 0));
|
||||
|
||||
// 50 gone, 50 to go
|
||||
assert_eq!(List::as_vec(), (50..100).collect::<Vec<_>>());
|
||||
});
|
||||
}
|
||||
|
||||
/// Draining, appending and iterating work together.
|
||||
#[test]
|
||||
fn drain_append_iter_works() {
|
||||
TestExternalities::default().execute_with(|| {
|
||||
for r in 1..=100 {
|
||||
List::append_many(0..12);
|
||||
List::append_many(0..12);
|
||||
|
||||
let dropped = List::drain().take(12).collect::<Vec<_>>();
|
||||
assert_eq!(dropped, (0..12).collect::<Vec<_>>());
|
||||
|
||||
assert_eq!(List::as_vec(), (0..12).cycle().take(r * 12).collect::<Vec<_>>());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Pages are removed ASAP.
|
||||
#[test]
|
||||
fn drain_eager_page_removal() {
|
||||
TestExternalities::default().execute_with(|| {
|
||||
List::append_many(0..9);
|
||||
|
||||
assert!(sp_io::storage::exists(&page_key::<Prefix>(0)));
|
||||
assert!(sp_io::storage::exists(&page_key::<Prefix>(1)));
|
||||
|
||||
assert_eq!(List::drain().take(5).count(), 5);
|
||||
// Page 0 is eagerly removed.
|
||||
assert!(!sp_io::storage::exists(&page_key::<Prefix>(0)));
|
||||
assert!(sp_io::storage::exists(&page_key::<Prefix>(1)));
|
||||
});
|
||||
}
|
||||
|
||||
/// Appending encodes pages as `Vec`.
|
||||
#[test]
|
||||
fn append_storage_layout() {
|
||||
TestExternalities::default().execute_with(|| {
|
||||
List::append_many(0..9);
|
||||
|
||||
let key = page_key::<Prefix>(0);
|
||||
let raw = sp_io::storage::get(&key).expect("Page should be present");
|
||||
let as_vec = Vec::<u32>::decode(&mut &raw[..]).unwrap();
|
||||
assert_eq!(as_vec.len(), 5, "First page contains 5");
|
||||
|
||||
let key = page_key::<Prefix>(1);
|
||||
let raw = sp_io::storage::get(&key).expect("Page should be present");
|
||||
let as_vec = Vec::<u32>::decode(&mut &raw[..]).unwrap();
|
||||
assert_eq!(as_vec.len(), 4, "Second page contains 4");
|
||||
|
||||
let meta = sp_io::storage::get(&meta_key::<Prefix>()).expect("Meta should be present");
|
||||
let meta: StoragePagedListMeta<Prefix, u32, ValuesPerNewPage> =
|
||||
Decode::decode(&mut &meta[..]).unwrap();
|
||||
assert_eq!(meta.first_page, 0);
|
||||
assert_eq!(meta.first_value_offset, 0);
|
||||
assert_eq!(meta.last_page, 1);
|
||||
assert_eq!(meta.last_page_len, 4);
|
||||
|
||||
let page = Page::<u32>::from_storage::<Prefix>(0, 0).unwrap();
|
||||
assert_eq!(page.index, 0);
|
||||
assert_eq!(page.values.count(), 5);
|
||||
|
||||
let page = Page::<u32>::from_storage::<Prefix>(1, 0).unwrap();
|
||||
assert_eq!(page.index, 1);
|
||||
assert_eq!(page.values.count(), 4);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn page_key_correct() {
|
||||
let got = page_key::<Prefix>(0);
|
||||
let pallet_prefix = StoragePagedListPrefix::<Prefix>::final_prefix();
|
||||
let want = (pallet_prefix, b"page", 0).encode();
|
||||
|
||||
assert_eq!(want.len(), 32 + 4 + 4);
|
||||
assert!(want.starts_with(&pallet_prefix[..]));
|
||||
assert_eq!(got, want);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn meta_key_correct() {
|
||||
let got = meta_key::<Prefix>();
|
||||
let pallet_prefix = StoragePagedListPrefix::<Prefix>::final_prefix();
|
||||
let want = (pallet_prefix, b"meta").encode();
|
||||
|
||||
assert_eq!(want.len(), 32 + 4);
|
||||
assert!(want.starts_with(&pallet_prefix[..]));
|
||||
assert_eq!(got, want);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peekable_drain_also_deletes() {
|
||||
TestExternalities::default().execute_with(|| {
|
||||
List::append_many(0..10);
|
||||
|
||||
let mut iter = List::drain().peekable();
|
||||
assert_eq!(iter.peek(), Some(&0));
|
||||
// `peek` does remove one element...
|
||||
assert_eq!(List::iter().count(), 9);
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Mostly pallet doc-tests. Real tests are in [`super::paged_list`] and crate
|
||||
//! `pallet-paged-list-fuzzer`.
|
||||
|
||||
#![cfg(test)]
|
||||
|
||||
use crate::{mock::*, *};
|
||||
use frame_support::storage::{StorageList, StoragePrefixedContainer};
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn append_one_works() {
|
||||
test_closure(|| {
|
||||
PagedList::append_one(1);
|
||||
|
||||
assert_eq!(PagedList::iter().collect::<Vec<_>>(), vec![1]);
|
||||
});
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn append_many_works() {
|
||||
test_closure(|| {
|
||||
PagedList::append_many(0..3);
|
||||
|
||||
assert_eq!(PagedList::iter().collect::<Vec<_>>(), vec![0, 1, 2]);
|
||||
});
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn appender_works() {
|
||||
use frame_support::storage::StorageAppender;
|
||||
test_closure(|| {
|
||||
let mut appender = PagedList::appender();
|
||||
|
||||
appender.append(0);
|
||||
appender.append(1); // Repeated calls are fine here.
|
||||
appender.append_many(2..4);
|
||||
|
||||
assert_eq!(PagedList::iter().collect::<Vec<_>>(), vec![0, 1, 2, 3]);
|
||||
});
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn iter_works() {
|
||||
test_closure(|| {
|
||||
PagedList::append_many(0..10);
|
||||
let mut iter = PagedList::iter();
|
||||
|
||||
assert_eq!(iter.next(), Some(0));
|
||||
assert_eq!(iter.next(), Some(1));
|
||||
assert_eq!(iter.collect::<Vec<_>>(), (2..10).collect::<Vec<_>>());
|
||||
});
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn drain_works() {
|
||||
test_closure(|| {
|
||||
PagedList::append_many(0..3);
|
||||
PagedList::drain().next();
|
||||
assert_eq!(PagedList::iter().collect::<Vec<_>>(), vec![1, 2], "0 is drained");
|
||||
PagedList::drain().peekable().peek();
|
||||
assert_eq!(PagedList::iter().collect::<Vec<_>>(), vec![2], "Peeking removed 1");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter_independent_works() {
|
||||
test_closure(|| {
|
||||
PagedList::append_many(0..1000);
|
||||
PagedList2::append_many(0..1000);
|
||||
|
||||
assert_eq!(PagedList::iter().collect::<Vec<_>>(), (0..1000).collect::<Vec<_>>());
|
||||
assert_eq!(PagedList::iter().collect::<Vec<_>>(), (0..1000).collect::<Vec<_>>());
|
||||
|
||||
// drain
|
||||
assert_eq!(PagedList::drain().collect::<Vec<_>>(), (0..1000).collect::<Vec<_>>());
|
||||
assert_eq!(PagedList2::iter().collect::<Vec<_>>(), (0..1000).collect::<Vec<_>>());
|
||||
|
||||
assert_eq!(PagedList::iter().count(), 0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prefix_distinct() {
|
||||
let p1 = List::<Test, ()>::final_prefix();
|
||||
let p2 = List::<Test, crate::Instance2>::final_prefix();
|
||||
assert_ne!(p1, p2);
|
||||
}
|
||||
@@ -1553,6 +1553,7 @@ pub mod pallet_prelude {
|
||||
CountedStorageMap, Key as NMapKey, OptionQuery, ResultQuery, StorageDoubleMap,
|
||||
StorageMap, StorageNMap, StorageValue, ValueQuery,
|
||||
},
|
||||
StorageList,
|
||||
},
|
||||
traits::{
|
||||
BuildGenesisConfig, ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks,
|
||||
|
||||
@@ -160,6 +160,75 @@ pub trait StorageValue<T: FullCodec> {
|
||||
}
|
||||
}
|
||||
|
||||
/// A non-continuous container type.
|
||||
pub trait StorageList<V: FullCodec> {
|
||||
/// Iterator for normal and draining iteration.
|
||||
type Iterator: Iterator<Item = V>;
|
||||
|
||||
/// Append iterator for fast append operations.
|
||||
type Appender: StorageAppender<V>;
|
||||
|
||||
/// List the elements in append order.
|
||||
fn iter() -> Self::Iterator;
|
||||
|
||||
/// Drain the elements in append order.
|
||||
///
|
||||
/// Note that this drains a value as soon as it is being inspected. For example `take_while(|_|
|
||||
/// false)` still drains the first element. This also applies to `peek()`.
|
||||
fn drain() -> Self::Iterator;
|
||||
|
||||
/// A fast append iterator.
|
||||
fn appender() -> Self::Appender;
|
||||
|
||||
/// Append a single element.
|
||||
///
|
||||
/// Should not be called repeatedly; use `append_many` instead.
|
||||
/// Worst case linear `O(len)` with `len` being the number if elements in the list.
|
||||
fn append_one<EncodeLikeValue>(item: EncodeLikeValue)
|
||||
where
|
||||
EncodeLikeValue: EncodeLike<V>,
|
||||
{
|
||||
Self::append_many(core::iter::once(item));
|
||||
}
|
||||
|
||||
/// Append many elements.
|
||||
///
|
||||
/// Should not be called repeatedly; use `appender` instead.
|
||||
/// Worst case linear `O(len + items.count())` with `len` beings the number if elements in the
|
||||
/// list.
|
||||
fn append_many<EncodeLikeValue, I>(items: I)
|
||||
where
|
||||
EncodeLikeValue: EncodeLike<V>,
|
||||
I: IntoIterator<Item = EncodeLikeValue>,
|
||||
{
|
||||
let mut ap = Self::appender();
|
||||
ap.append_many(items);
|
||||
}
|
||||
}
|
||||
|
||||
/// Append iterator to append values to a storage struct.
|
||||
///
|
||||
/// Can be used in situations where appending does not have constant time complexity.
|
||||
pub trait StorageAppender<V: FullCodec> {
|
||||
/// Append a single item in constant time `O(1)`.
|
||||
fn append<EncodeLikeValue>(&mut self, item: EncodeLikeValue)
|
||||
where
|
||||
EncodeLikeValue: EncodeLike<V>;
|
||||
|
||||
/// Append many items in linear time `O(items.count())`.
|
||||
// Note: a default impl is provided since `Self` is already assumed to be optimal for single
|
||||
// append operations.
|
||||
fn append_many<EncodeLikeValue, I>(&mut self, items: I)
|
||||
where
|
||||
EncodeLikeValue: EncodeLike<V>,
|
||||
I: IntoIterator<Item = EncodeLikeValue>,
|
||||
{
|
||||
for item in items.into_iter() {
|
||||
self.append(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A strongly-typed map in storage.
|
||||
///
|
||||
/// Details on implementation can be found at [`generator::StorageMap`].
|
||||
@@ -1193,6 +1262,20 @@ impl<T> Iterator for ChildTriePrefixIterator<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for storage types that store all its value after a unique prefix.
|
||||
pub trait StoragePrefixedContainer {
|
||||
/// Module prefix. Used for generating final key.
|
||||
fn module_prefix() -> &'static [u8];
|
||||
|
||||
/// Storage prefix. Used for generating final key.
|
||||
fn storage_prefix() -> &'static [u8];
|
||||
|
||||
/// Final full prefix that prefixes all keys.
|
||||
fn final_prefix() -> [u8; 32] {
|
||||
crate::storage::storage_prefix(Self::module_prefix(), Self::storage_prefix())
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for maps that store all its value after a unique prefix.
|
||||
///
|
||||
/// By default the final prefix is:
|
||||
@@ -1201,7 +1284,7 @@ impl<T> Iterator for ChildTriePrefixIterator<T> {
|
||||
/// ```
|
||||
pub trait StoragePrefixedMap<Value: FullCodec> {
|
||||
/// Module prefix. Used for generating final key.
|
||||
fn module_prefix() -> &'static [u8];
|
||||
fn module_prefix() -> &'static [u8]; // TODO move to StoragePrefixedContainer
|
||||
|
||||
/// Storage prefix. Used for generating final key.
|
||||
fn storage_prefix() -> &'static [u8];
|
||||
|
||||
@@ -134,7 +134,7 @@ impl<T> ScaleContainerStreamIter<T> {
|
||||
///
|
||||
/// - `key`: Storage key of the container in the state.
|
||||
///
|
||||
/// Same as [`Self::try_new`], but logs a potential error and sets the length to `0`.
|
||||
/// Same as [`Self::new_try`], but logs a potential error and sets the length to `0`.
|
||||
pub fn new(key: Vec<u8>) -> Self {
|
||||
let mut input = StorageInput::new(key);
|
||||
let length = if input.exists() {
|
||||
|
||||
Reference in New Issue
Block a user