archive: Implement archive_unstable_storage (#1846)

This PR implements the `archive_unstable_storage` method that offers
support for:
- fetching values
- fetching hashes
- iterating over keys and values
- iterating over keys and hashes
- fetching merkle values from the trie-db

A common component dedicated to RPC-V2 storage queries is created to
bridge the gap between `chainHead/storage` and `archive/storage`.
Query pagination is supported by `paginationStartKey`, similar to the
old APIs.
Similarly to the `chainHead/storage`, the `archive/storage` method
accepts a maximum number of queried items.

The design builds upon:
https://github.com/paritytech/json-rpc-interface-spec/pull/94.
Closes https://github.com/paritytech/polkadot-sdk/issues/1512.

cc @paritytech/subxt-team

---------

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com>
This commit is contained in:
Alexandru Vasile
2024-01-15 16:03:32 +02:00
committed by GitHub
parent 46090ff114
commit 53bcbb15f1
15 changed files with 1278 additions and 363 deletions
@@ -18,7 +18,10 @@
//! API trait of the archive methods.
use crate::MethodResult;
use crate::{
common::events::{ArchiveStorageResult, PaginatedStorageQuery},
MethodResult,
};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
#[rpc(client, server)]
@@ -88,4 +91,17 @@ pub trait ArchiveApi<Hash> {
function: String,
call_parameters: String,
) -> RpcResult<MethodResult>;
/// Returns storage entries at a specific block's state.
///
/// # Unstable
///
/// This method is unstable and subject to change in the future.
#[method(name = "archive_unstable_storage", blocking)]
fn archive_unstable_storage(
&self,
hash: Hash,
items: Vec<PaginatedStorageQuery<String>>,
child_trie: Option<String>,
) -> RpcResult<ArchiveStorageResult>;
}
@@ -20,14 +20,15 @@
use crate::{
archive::{error::Error as ArchiveError, ArchiveApiServer},
chain_head::hex_string,
MethodResult,
common::events::{ArchiveStorageResult, PaginatedStorageQuery},
hex_string, MethodResult,
};
use codec::Encode;
use jsonrpsee::core::{async_trait, RpcResult};
use sc_client_api::{
Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, StorageProvider,
Backend, BlockBackend, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, StorageKey,
StorageProvider,
};
use sp_api::{CallApiAt, CallContext};
use sp_blockchain::{
@@ -40,6 +41,8 @@ use sp_runtime::{
};
use std::{collections::HashSet, marker::PhantomData, sync::Arc};
use super::archive_storage::ArchiveStorage;
/// An API for archive RPC calls.
pub struct Archive<BE: Backend<Block>, Block: BlockT, Client> {
/// Substrate client.
@@ -48,8 +51,12 @@ pub struct Archive<BE: Backend<Block>, Block: BlockT, Client> {
backend: Arc<BE>,
/// The hexadecimal encoded hash of the genesis block.
genesis_hash: String,
/// The maximum number of reported items by the `archive_storage` at a time.
storage_max_reported_items: usize,
/// The maximum number of queried items allowed for the `archive_storage` at a time.
storage_max_queried_items: usize,
/// Phantom member to pin the block type.
_phantom: PhantomData<(Block, BE)>,
_phantom: PhantomData<Block>,
}
impl<BE: Backend<Block>, Block: BlockT, Client> Archive<BE, Block, Client> {
@@ -58,9 +65,18 @@ impl<BE: Backend<Block>, Block: BlockT, Client> Archive<BE, Block, Client> {
client: Arc<Client>,
backend: Arc<BE>,
genesis_hash: GenesisHash,
storage_max_reported_items: usize,
storage_max_queried_items: usize,
) -> Self {
let genesis_hash = hex_string(&genesis_hash.as_ref());
Self { client, backend, genesis_hash, _phantom: PhantomData }
Self {
client,
backend,
genesis_hash,
storage_max_reported_items,
storage_max_queried_items,
_phantom: PhantomData,
}
}
}
@@ -185,4 +201,48 @@ where
Err(error) => MethodResult::err(error.to_string()),
})
}
fn archive_unstable_storage(
&self,
hash: Block::Hash,
items: Vec<PaginatedStorageQuery<String>>,
child_trie: Option<String>,
) -> RpcResult<ArchiveStorageResult> {
let items = items
.into_iter()
.map(|query| {
let key = StorageKey(parse_hex_param(query.key)?);
let pagination_start_key = query
.pagination_start_key
.map(|key| parse_hex_param(key).map(|key| StorageKey(key)))
.transpose()?;
// Paginated start key is only supported
if pagination_start_key.is_some() && !query.query_type.is_descendant_query() {
return Err(ArchiveError::InvalidParam(
"Pagination start key is only supported for descendants queries"
.to_string(),
))
}
Ok(PaginatedStorageQuery {
key,
query_type: query.query_type,
pagination_start_key,
})
})
.collect::<Result<Vec<_>, ArchiveError>>()?;
let child_trie = child_trie
.map(|child_trie| parse_hex_param(child_trie))
.transpose()?
.map(ChildInfo::new_default_from_vec);
let storage_client = ArchiveStorage::new(
self.client.clone(),
self.storage_max_reported_items,
self.storage_max_queried_items,
);
Ok(storage_client.handle_query(hash, items, child_trie))
}
}
@@ -0,0 +1,125 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Implementation of the `archive_storage` method.
use std::sync::Arc;
use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider};
use sp_runtime::traits::Block as BlockT;
use crate::common::{
events::{ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType},
storage::{IterQueryType, QueryIter, Storage},
};
/// Generates the events of the `chainHead_storage` method.
pub struct ArchiveStorage<Client, Block, BE> {
/// Storage client.
client: Storage<Client, Block, BE>,
/// The maximum number of reported items by the `archive_storage` at a time.
storage_max_reported_items: usize,
/// The maximum number of queried items allowed for the `archive_storage` at a time.
storage_max_queried_items: usize,
}
impl<Client, Block, BE> ArchiveStorage<Client, Block, BE> {
/// Constructs a new [`ArchiveStorage`].
pub fn new(
client: Arc<Client>,
storage_max_reported_items: usize,
storage_max_queried_items: usize,
) -> Self {
Self { client: Storage::new(client), storage_max_reported_items, storage_max_queried_items }
}
}
impl<Client, Block, BE> ArchiveStorage<Client, Block, BE>
where
Block: BlockT + 'static,
BE: Backend<Block> + 'static,
Client: StorageProvider<Block, BE> + 'static,
{
/// Generate the response of the `archive_storage` method.
pub fn handle_query(
&self,
hash: Block::Hash,
mut items: Vec<PaginatedStorageQuery<StorageKey>>,
child_key: Option<ChildInfo>,
) -> ArchiveStorageResult {
let discarded_items = items.len().saturating_sub(self.storage_max_queried_items);
items.truncate(self.storage_max_queried_items);
let mut storage_results = Vec::with_capacity(items.len());
for item in items {
match item.query_type {
StorageQueryType::Value => {
match self.client.query_value(hash, &item.key, child_key.as_ref()) {
Ok(Some(value)) => storage_results.push(value),
Ok(None) => continue,
Err(error) => return ArchiveStorageResult::err(error),
}
},
StorageQueryType::Hash =>
match self.client.query_hash(hash, &item.key, child_key.as_ref()) {
Ok(Some(value)) => storage_results.push(value),
Ok(None) => continue,
Err(error) => return ArchiveStorageResult::err(error),
},
StorageQueryType::ClosestDescendantMerkleValue =>
match self.client.query_merkle_value(hash, &item.key, child_key.as_ref()) {
Ok(Some(value)) => storage_results.push(value),
Ok(None) => continue,
Err(error) => return ArchiveStorageResult::err(error),
},
StorageQueryType::DescendantsValues => {
match self.client.query_iter_pagination(
QueryIter {
query_key: item.key,
ty: IterQueryType::Value,
pagination_start_key: item.pagination_start_key,
},
hash,
child_key.as_ref(),
self.storage_max_reported_items,
) {
Ok((results, _)) => storage_results.extend(results),
Err(error) => return ArchiveStorageResult::err(error),
}
},
StorageQueryType::DescendantsHashes => {
match self.client.query_iter_pagination(
QueryIter {
query_key: item.key,
ty: IterQueryType::Hash,
pagination_start_key: item.pagination_start_key,
},
hash,
child_key.as_ref(),
self.storage_max_reported_items,
) {
Ok((results, _)) => storage_results.extend(results),
Err(error) => return ArchiveStorageResult::err(error),
}
},
};
}
ArchiveStorageResult::ok(storage_results, discarded_items)
}
}
@@ -25,6 +25,8 @@
#[cfg(test)]
mod tests;
mod archive_storage;
pub mod api;
pub mod archive;
pub mod error;
+536 -11
View File
@@ -16,7 +16,13 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{chain_head::hex_string, MethodResult};
use crate::{
common::events::{
ArchiveStorageMethodOk, ArchiveStorageResult, PaginatedStorageQuery, StorageQueryType,
StorageResultType,
},
hex_string, MethodResult,
};
use super::{archive::Archive, *};
@@ -24,17 +30,20 @@ use assert_matches::assert_matches;
use codec::{Decode, Encode};
use jsonrpsee::{
core::error::Error,
rpc_params,
types::{error::CallError, EmptyServerParams as EmptyParams},
RpcModule,
};
use sc_block_builder::BlockBuilderBuilder;
use sc_client_api::ChildInfo;
use sp_blockchain::HeaderBackend;
use sp_consensus::BlockOrigin;
use sp_core::{Blake2Hasher, Hasher};
use sp_runtime::{
traits::{Block as BlockT, Header as HeaderT},
SaturatedConversion,
};
use std::sync::Arc;
use std::{collections::HashMap, sync::Arc};
use substrate_test_runtime::Transfer;
use substrate_test_runtime_client::{
prelude::*, runtime, Backend, BlockBuilderExt, Client, ClientBlockImportExt,
@@ -42,23 +51,39 @@ use substrate_test_runtime_client::{
const CHAIN_GENESIS: [u8; 32] = [0; 32];
const INVALID_HASH: [u8; 32] = [1; 32];
const MAX_PAGINATION_LIMIT: usize = 5;
const MAX_QUERIED_LIMIT: usize = 5;
const KEY: &[u8] = b":mock";
const VALUE: &[u8] = b"hello world";
const CHILD_STORAGE_KEY: &[u8] = b"child";
const CHILD_VALUE: &[u8] = b"child value";
type Header = substrate_test_runtime_client::runtime::Header;
type Block = substrate_test_runtime_client::runtime::Block;
fn setup_api() -> (Arc<Client<Backend>>, RpcModule<Archive<Backend, Block, Client<Backend>>>) {
let builder = TestClientBuilder::new();
fn setup_api(
max_returned_items: usize,
max_queried_items: usize,
) -> (Arc<Client<Backend>>, RpcModule<Archive<Backend, Block, Client<Backend>>>) {
let child_info = ChildInfo::new_default(CHILD_STORAGE_KEY);
let builder = TestClientBuilder::new().add_extra_child_storage(
&child_info,
KEY.to_vec(),
CHILD_VALUE.to_vec(),
);
let backend = builder.backend();
let client = Arc::new(builder.build());
let api = Archive::new(client.clone(), backend, CHAIN_GENESIS).into_rpc();
let api =
Archive::new(client.clone(), backend, CHAIN_GENESIS, max_returned_items, max_queried_items)
.into_rpc();
(client, api)
}
#[tokio::test]
async fn archive_genesis() {
let (_client, api) = setup_api();
let (_client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
let genesis: String =
api.call("archive_unstable_genesisHash", EmptyParams::new()).await.unwrap();
@@ -67,7 +92,7 @@ async fn archive_genesis() {
#[tokio::test]
async fn archive_body() {
let (mut client, api) = setup_api();
let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
// Invalid block hash.
let invalid_hash = hex_string(&INVALID_HASH);
@@ -101,7 +126,7 @@ async fn archive_body() {
#[tokio::test]
async fn archive_header() {
let (mut client, api) = setup_api();
let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
// Invalid block hash.
let invalid_hash = hex_string(&INVALID_HASH);
@@ -135,7 +160,7 @@ async fn archive_header() {
#[tokio::test]
async fn archive_finalized_height() {
let (client, api) = setup_api();
let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
let client_height: u32 = client.info().finalized_number.saturated_into();
@@ -147,7 +172,7 @@ async fn archive_finalized_height() {
#[tokio::test]
async fn archive_hash_by_height() {
let (mut client, api) = setup_api();
let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
// Genesis height.
let hashes: Vec<String> = api.call("archive_unstable_hashByHeight", [0]).await.unwrap();
@@ -253,7 +278,7 @@ async fn archive_hash_by_height() {
#[tokio::test]
async fn archive_call() {
let (mut client, api) = setup_api();
let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
let invalid_hash = hex_string(&INVALID_HASH);
// Invalid parameter (non-hex).
@@ -309,3 +334,503 @@ async fn archive_call() {
let expected = MethodResult::ok("0x0000000000000000");
assert_eq!(result, expected);
}
#[tokio::test]
async fn archive_storage_hashes_values() {
let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
let block = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().genesis_hash)
.with_parent_block_number(0)
.build()
.unwrap()
.build()
.unwrap()
.block;
client.import(BlockOrigin::Own, block.clone()).await.unwrap();
let block_hash = format!("{:?}", block.header.hash());
let key = hex_string(&KEY);
let items: Vec<PaginatedStorageQuery<String>> = vec![
PaginatedStorageQuery {
key: key.clone(),
query_type: StorageQueryType::DescendantsHashes,
pagination_start_key: None,
},
PaginatedStorageQuery {
key: key.clone(),
query_type: StorageQueryType::DescendantsValues,
pagination_start_key: None,
},
PaginatedStorageQuery {
key: key.clone(),
query_type: StorageQueryType::Hash,
pagination_start_key: None,
},
PaginatedStorageQuery {
key: key.clone(),
query_type: StorageQueryType::Value,
pagination_start_key: None,
},
];
let result: ArchiveStorageResult = api
.call("archive_unstable_storage", rpc_params![&block_hash, items.clone()])
.await
.unwrap();
match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
// Key has not been imported yet.
assert_eq!(result.len(), 0);
assert_eq!(discarded_items, 0);
},
_ => panic!("Unexpected result"),
};
// Import a block with the given key value pair.
let mut builder = BlockBuilderBuilder::new(&*client)
.on_parent_block(block.hash())
.with_parent_block_number(1)
.build()
.unwrap();
builder.push_storage_change(KEY.to_vec(), Some(VALUE.to_vec())).unwrap();
let block = builder.build().unwrap().block;
client.import(BlockOrigin::Own, block.clone()).await.unwrap();
let block_hash = format!("{:?}", block.header.hash());
let expected_hash = format!("{:?}", Blake2Hasher::hash(&VALUE));
let expected_value = hex_string(&VALUE);
let result: ArchiveStorageResult = api
.call("archive_unstable_storage", rpc_params![&block_hash, items])
.await
.unwrap();
match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
assert_eq!(result.len(), 4);
assert_eq!(discarded_items, 0);
assert_eq!(result[0].key, key);
assert_eq!(result[0].result, StorageResultType::Hash(expected_hash.clone()));
assert_eq!(result[1].key, key);
assert_eq!(result[1].result, StorageResultType::Value(expected_value.clone()));
assert_eq!(result[2].key, key);
assert_eq!(result[2].result, StorageResultType::Hash(expected_hash));
assert_eq!(result[3].key, key);
assert_eq!(result[3].result, StorageResultType::Value(expected_value));
},
_ => panic!("Unexpected result"),
};
}
#[tokio::test]
async fn archive_storage_closest_merkle_value() {
let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT);
/// The core of this test.
///
/// Checks keys that are exact match, keys with descedant and keys that should not return
/// values.
///
/// Returns (key, merkle value) pairs.
async fn expect_merkle_request(
api: &RpcModule<Archive<Backend, Block, Client<Backend>>>,
block_hash: String,
) -> HashMap<String, String> {
let result: ArchiveStorageResult = api
.call(
"archive_unstable_storage",
rpc_params![
&block_hash,
vec![
PaginatedStorageQuery {
key: hex_string(b":AAAA"),
query_type: StorageQueryType::ClosestDescendantMerkleValue,
pagination_start_key: None,
},
PaginatedStorageQuery {
key: hex_string(b":AAAB"),
query_type: StorageQueryType::ClosestDescendantMerkleValue,
pagination_start_key: None,
},
// Key with descedent.
PaginatedStorageQuery {
key: hex_string(b":A"),
query_type: StorageQueryType::ClosestDescendantMerkleValue,
pagination_start_key: None,
},
PaginatedStorageQuery {
key: hex_string(b":AA"),
query_type: StorageQueryType::ClosestDescendantMerkleValue,
pagination_start_key: None,
},
// Keys below this comment do not produce a result.
// Key that exceed the keyspace of the trie.
PaginatedStorageQuery {
key: hex_string(b":AAAAX"),
query_type: StorageQueryType::ClosestDescendantMerkleValue,
pagination_start_key: None,
},
PaginatedStorageQuery {
key: hex_string(b":AAABX"),
query_type: StorageQueryType::ClosestDescendantMerkleValue,
pagination_start_key: None,
},
// Key that are not part of the trie.
PaginatedStorageQuery {
key: hex_string(b":AAX"),
query_type: StorageQueryType::ClosestDescendantMerkleValue,
pagination_start_key: None,
},
PaginatedStorageQuery {
key: hex_string(b":AAAX"),
query_type: StorageQueryType::ClosestDescendantMerkleValue,
pagination_start_key: None,
},
]
],
)
.await
.unwrap();
let merkle_values: HashMap<_, _> = match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, .. }) => result
.into_iter()
.map(|res| {
let value = match res.result {
StorageResultType::ClosestDescendantMerkleValue(value) => value,
_ => panic!("Unexpected StorageResultType"),
};
(res.key, value)
})
.collect(),
_ => panic!("Unexpected result"),
};
// Response for AAAA, AAAB, A and AA.
assert_eq!(merkle_values.len(), 4);
// While checking for expected merkle values to align,
// the following will check that the returned keys are
// expected.
// Values for AAAA and AAAB are different.
assert_ne!(
merkle_values.get(&hex_string(b":AAAA")).unwrap(),
merkle_values.get(&hex_string(b":AAAB")).unwrap()
);
// Values for A and AA should be on the same branch node.
assert_eq!(
merkle_values.get(&hex_string(b":A")).unwrap(),
merkle_values.get(&hex_string(b":AA")).unwrap()
);
// The branch node value must be different than the leaf of either
// AAAA and AAAB.
assert_ne!(
merkle_values.get(&hex_string(b":A")).unwrap(),
merkle_values.get(&hex_string(b":AAAA")).unwrap()
);
assert_ne!(
merkle_values.get(&hex_string(b":A")).unwrap(),
merkle_values.get(&hex_string(b":AAAB")).unwrap()
);
merkle_values
}
// Import a new block with storage changes.
let mut builder = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().genesis_hash)
.with_parent_block_number(0)
.build()
.unwrap();
builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap();
builder.push_storage_change(b":AAAB".to_vec(), Some(vec![2; 64])).unwrap();
let block = builder.build().unwrap().block;
let block_hash = format!("{:?}", block.header.hash());
client.import(BlockOrigin::Own, block.clone()).await.unwrap();
let merkle_values_lhs = expect_merkle_request(&api, block_hash).await;
// Import a new block with and change AAAB value.
let mut builder = BlockBuilderBuilder::new(&*client)
.on_parent_block(block.hash())
.with_parent_block_number(1)
.build()
.unwrap();
builder.push_storage_change(b":AAAA".to_vec(), Some(vec![1; 64])).unwrap();
builder.push_storage_change(b":AAAB".to_vec(), Some(vec![3; 64])).unwrap();
let block = builder.build().unwrap().block;
let block_hash = format!("{:?}", block.header.hash());
client.import(BlockOrigin::Own, block.clone()).await.unwrap();
let merkle_values_rhs = expect_merkle_request(&api, block_hash).await;
// Change propagated to the root.
assert_ne!(
merkle_values_lhs.get(&hex_string(b":A")).unwrap(),
merkle_values_rhs.get(&hex_string(b":A")).unwrap()
);
assert_ne!(
merkle_values_lhs.get(&hex_string(b":AAAB")).unwrap(),
merkle_values_rhs.get(&hex_string(b":AAAB")).unwrap()
);
// However the AAAA branch leaf remains unchanged.
assert_eq!(
merkle_values_lhs.get(&hex_string(b":AAAA")).unwrap(),
merkle_values_rhs.get(&hex_string(b":AAAA")).unwrap()
);
}
#[tokio::test]
async fn archive_storage_paginate_iterations() {
// 1 iteration allowed before pagination kicks in.
let (mut client, api) = setup_api(1, MAX_QUERIED_LIMIT);
// Import a new block with storage changes.
let mut builder = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().genesis_hash)
.with_parent_block_number(0)
.build()
.unwrap();
builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap();
builder.push_storage_change(b":mo".to_vec(), Some(b"ab".to_vec())).unwrap();
builder.push_storage_change(b":moc".to_vec(), Some(b"abc".to_vec())).unwrap();
builder.push_storage_change(b":moD".to_vec(), Some(b"abcmoD".to_vec())).unwrap();
builder.push_storage_change(b":mock".to_vec(), Some(b"abcd".to_vec())).unwrap();
let block = builder.build().unwrap().block;
let block_hash = format!("{:?}", block.header.hash());
client.import(BlockOrigin::Own, block.clone()).await.unwrap();
// Calling with an invalid hash.
let invalid_hash = hex_string(&INVALID_HASH);
let result: ArchiveStorageResult = api
.call(
"archive_unstable_storage",
rpc_params![
&invalid_hash,
vec![PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::DescendantsValues,
pagination_start_key: None,
}]
],
)
.await
.unwrap();
match result {
ArchiveStorageResult::Err(_) => (),
_ => panic!("Unexpected result"),
};
// Valid call with storage at the key.
let result: ArchiveStorageResult = api
.call(
"archive_unstable_storage",
rpc_params![
&block_hash,
vec![PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::DescendantsValues,
pagination_start_key: None,
}]
],
)
.await
.unwrap();
match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
assert_eq!(result.len(), 1);
assert_eq!(discarded_items, 0);
assert_eq!(result[0].key, hex_string(b":m"));
assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a")));
},
_ => panic!("Unexpected result"),
};
// Continue with pagination.
let result: ArchiveStorageResult = api
.call(
"archive_unstable_storage",
rpc_params![
&block_hash,
vec![PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::DescendantsValues,
pagination_start_key: Some(hex_string(b":m")),
}]
],
)
.await
.unwrap();
match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
assert_eq!(result.len(), 1);
assert_eq!(discarded_items, 0);
assert_eq!(result[0].key, hex_string(b":mo"));
assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"ab")));
},
_ => panic!("Unexpected result"),
};
// Continue with pagination.
let result: ArchiveStorageResult = api
.call(
"archive_unstable_storage",
rpc_params![
&block_hash,
vec![PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::DescendantsValues,
pagination_start_key: Some(hex_string(b":mo")),
}]
],
)
.await
.unwrap();
match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
assert_eq!(result.len(), 1);
assert_eq!(discarded_items, 0);
assert_eq!(result[0].key, hex_string(b":moD"));
assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcmoD")));
},
_ => panic!("Unexpected result"),
};
// Continue with pagination.
let result: ArchiveStorageResult = api
.call(
"archive_unstable_storage",
rpc_params![
&block_hash,
vec![PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::DescendantsValues,
pagination_start_key: Some(hex_string(b":moD")),
}]
],
)
.await
.unwrap();
match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
assert_eq!(result.len(), 1);
assert_eq!(discarded_items, 0);
assert_eq!(result[0].key, hex_string(b":moc"));
assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abc")));
},
_ => panic!("Unexpected result"),
};
// Continue with pagination.
let result: ArchiveStorageResult = api
.call(
"archive_unstable_storage",
rpc_params![
&block_hash,
vec![PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::DescendantsValues,
pagination_start_key: Some(hex_string(b":moc")),
}]
],
)
.await
.unwrap();
match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
assert_eq!(result.len(), 1);
assert_eq!(discarded_items, 0);
assert_eq!(result[0].key, hex_string(b":mock"));
assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"abcd")));
},
_ => panic!("Unexpected result"),
};
// Continue with pagination until no keys are returned.
let result: ArchiveStorageResult = api
.call(
"archive_unstable_storage",
rpc_params![
&block_hash,
vec![PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::DescendantsValues,
pagination_start_key: Some(hex_string(b":mock")),
}]
],
)
.await
.unwrap();
match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
assert_eq!(result.len(), 0);
assert_eq!(discarded_items, 0);
},
_ => panic!("Unexpected result"),
};
}
#[tokio::test]
async fn archive_storage_discarded_items() {
// One query at a time
let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, 1);
// Import a new block with storage changes.
let mut builder = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().genesis_hash)
.with_parent_block_number(0)
.build()
.unwrap();
builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap();
let block = builder.build().unwrap().block;
let block_hash = format!("{:?}", block.header.hash());
client.import(BlockOrigin::Own, block.clone()).await.unwrap();
// Valid call with storage at the key.
let result: ArchiveStorageResult = api
.call(
"archive_unstable_storage",
rpc_params![
&block_hash,
vec![
PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::Value,
pagination_start_key: None,
},
PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::Hash,
pagination_start_key: None,
},
PaginatedStorageQuery {
key: hex_string(b":m"),
query_type: StorageQueryType::Hash,
pagination_start_key: None,
}
]
],
)
.await
.unwrap();
match result {
ArchiveStorageResult::Ok(ArchiveStorageMethodOk { result, discarded_items }) => {
assert_eq!(result.len(), 1);
assert_eq!(discarded_items, 2);
assert_eq!(result[0].key, hex_string(b":m"));
assert_eq!(result[0].result, StorageResultType::Value(hex_string(b"a")));
},
_ => panic!("Unexpected result"),
};
}