storage: Fix partial key storage iteration (#1298)

* storage/fix: Use partial key instead of root key for iter

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>

* storage: Allow partial key construction

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>

* storage: Allow less provided types than num of hashes

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>

* storage: Error on more fields than types

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>

* testing: Check partial key iteration

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>

* storage: Rename variable wrt partial address bytes

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>

* storage: Identical storage key to root key if no keys are provided

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>

---------

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
This commit is contained in:
Alexandru Vasile
2023-12-01 15:33:33 +02:00
committed by GitHub
parent ea8735f863
commit b03679fcd9
4 changed files with 89 additions and 6 deletions
+10 -3
View File
@@ -172,6 +172,12 @@ where
.resolve(*key_ty)
.ok_or(MetadataError::TypeNotFound(*key_ty))?;
// If the provided keys are empty, the storage address must be
// equal to the storage root address.
if self.storage_entry_keys.is_empty() {
return Ok(());
}
// If the key is a tuple, we encode each value to the corresponding tuple type.
// If the key is not a tuple, encode a single value to the key type.
let type_ids = match &ty.type_def {
@@ -181,7 +187,8 @@ where
_other => either::Either::Right(std::iter::once(*key_ty)),
};
if type_ids.len() != self.storage_entry_keys.len() {
if type_ids.len() < self.storage_entry_keys.len() {
// Provided more keys than fields.
return Err(StorageAddressError::WrongNumberOfKeys {
expected: type_ids.len(),
actual: self.storage_entry_keys.len(),
@@ -198,7 +205,7 @@ where
}
hash_bytes(&input, &hashers[0], bytes);
Ok(())
} else if hashers.len() == type_ids.len() {
} else if hashers.len() >= type_ids.len() {
let iter = self.storage_entry_keys.iter().zip(type_ids).zip(hashers);
// A hasher per field; encode and hash each field independently.
for ((key, type_id), hasher) in iter {
@@ -208,7 +215,7 @@ where
}
Ok(())
} else {
// Mismatch; wrong number of hashers/fields.
// Provided more fields than hashers.
Err(StorageAddressError::WrongNumberOfHashers {
hashers: hashers.len(),
fields: type_ids.len(),
+3 -3
View File
@@ -228,12 +228,12 @@ where
// in the iterator.
let return_type_id = return_type_from_storage_entry_type(entry.entry_type());
// The root pallet/entry bytes for this storage entry:
let address_root_bytes = super::utils::storage_address_root_bytes(&address);
// The address bytes of this entry:
let address_bytes = super::utils::storage_address_bytes(&address, &metadata)?;
let s = client
.backend()
.storage_fetch_descendant_values(address_root_bytes, block_ref.hash())
.storage_fetch_descendant_values(address_bytes, block_ref.hash())
.await?
.map(move |kv| {
let kv = match kv {
@@ -45,6 +45,9 @@ async fn storage_iter() {
let api = ctx.client();
let addr = node_runtime::storage().system().account_iter();
let addr_bytes = api.storage().address_bytes(&addr).unwrap();
assert_eq!(addr_bytes, addr.to_root_bytes());
let len = api
.storage()
.at_latest()
@@ -126,6 +126,79 @@ async fn storage_n_map_storage_lookup() -> Result<(), subxt::Error> {
Ok(())
}
#[tokio::test]
async fn storage_partial_lookup() -> Result<(), subxt::Error> {
let ctx = test_context().await;
let api = ctx.client();
// Boilerplate; we create a new asset class with ID 99, and then
// we "approveTransfer" of some of this asset class. This gives us an
// entry in the `Approvals` StorageNMap that we can try to look up.
let signer = dev::alice();
let alice: AccountId32 = dev::alice().public_key().into();
let bob: AccountId32 = dev::bob().public_key().into();
// Create two assets; one with ID 99 and one with ID 100.
let assets = [
(99, alice.clone(), bob.clone(), 123),
(100, bob.clone(), alice.clone(), 124),
];
for (asset_id, admin, delegate, amount) in assets.clone() {
let tx1 = node_runtime::tx()
.assets()
.create(asset_id, admin.into(), 1);
let tx2 = node_runtime::tx()
.assets()
.approve_transfer(asset_id, delegate.into(), amount);
api.tx()
.sign_and_submit_then_watch_default(&tx1, &signer)
.await?
.wait_for_finalized_success()
.await?;
api.tx()
.sign_and_submit_then_watch_default(&tx2, &signer)
.await?
.wait_for_finalized_success()
.await?;
}
// Check all approvals.
let addr = node_runtime::storage().assets().approvals_iter();
let addr_bytes = api.storage().address_bytes(&addr)?;
let mut results = api.storage().at_latest().await?.iter(addr).await?;
let mut approvals = Vec::new();
while let Some(Ok((key, value))) = results.next().await {
assert!(key.starts_with(&addr_bytes));
approvals.push(value);
}
assert_eq!(approvals.len(), assets.len());
let mut amounts = approvals.iter().map(|a| a.amount).collect::<Vec<_>>();
amounts.sort();
let mut expected = assets.iter().map(|a| a.3).collect::<Vec<_>>();
expected.sort();
assert_eq!(amounts, expected);
// Check all assets starting with ID 99.
for (asset_id, _, _, amount) in assets.clone() {
let addr = node_runtime::storage().assets().approvals_iter1(asset_id);
let second_addr_bytes = api.storage().address_bytes(&addr)?;
// Keys must be different, since we are adding to the root key.
assert_ne!(addr_bytes, second_addr_bytes);
let mut results = api.storage().at_latest().await?.iter(addr).await?;
let mut approvals = Vec::new();
while let Some(Ok((key, value))) = results.next().await {
assert!(key.starts_with(&addr_bytes));
approvals.push(value);
}
assert_eq!(approvals.len(), 1);
assert_eq!(approvals[0].amount, amount);
}
Ok(())
}
#[tokio::test]
async fn storage_runtime_wasm_code() -> Result<(), subxt::Error> {
let ctx = test_context().await;