diff --git a/subxt/src/storage/storage_address.rs b/subxt/src/storage/storage_address.rs index df6fdbb874..893474b518 100644 --- a/subxt/src/storage/storage_address.rs +++ b/subxt/src/storage/storage_address.rs @@ -172,6 +172,12 @@ where .resolve(*key_ty) .ok_or(MetadataError::TypeNotFound(*key_ty))?; + // If the provided keys are empty, the storage address must be + // equal to the storage root address. + if self.storage_entry_keys.is_empty() { + return Ok(()); + } + // If the key is a tuple, we encode each value to the corresponding tuple type. // If the key is not a tuple, encode a single value to the key type. let type_ids = match &ty.type_def { @@ -181,7 +187,8 @@ where _other => either::Either::Right(std::iter::once(*key_ty)), }; - if type_ids.len() != self.storage_entry_keys.len() { + if type_ids.len() < self.storage_entry_keys.len() { + // Provided more keys than fields. return Err(StorageAddressError::WrongNumberOfKeys { expected: type_ids.len(), actual: self.storage_entry_keys.len(), @@ -198,7 +205,7 @@ where } hash_bytes(&input, &hashers[0], bytes); Ok(()) - } else if hashers.len() == type_ids.len() { + } else if hashers.len() >= type_ids.len() { let iter = self.storage_entry_keys.iter().zip(type_ids).zip(hashers); // A hasher per field; encode and hash each field independently. for ((key, type_id), hasher) in iter { @@ -208,7 +215,7 @@ where } Ok(()) } else { - // Mismatch; wrong number of hashers/fields. + // Provided more fields than hashers. Err(StorageAddressError::WrongNumberOfHashers { hashers: hashers.len(), fields: type_ids.len(), diff --git a/subxt/src/storage/storage_type.rs b/subxt/src/storage/storage_type.rs index 43cc6e3367..2669c1abce 100644 --- a/subxt/src/storage/storage_type.rs +++ b/subxt/src/storage/storage_type.rs @@ -228,12 +228,12 @@ where // in the iterator. let return_type_id = return_type_from_storage_entry_type(entry.entry_type()); - // The root pallet/entry bytes for this storage entry: - let address_root_bytes = super::utils::storage_address_root_bytes(&address); + // The address bytes of this entry: + let address_bytes = super::utils::storage_address_bytes(&address, &metadata)?; let s = client .backend() - .storage_fetch_descendant_values(address_root_bytes, block_ref.hash()) + .storage_fetch_descendant_values(address_bytes, block_ref.hash()) .await? .map(move |kv| { let kv = match kv { diff --git a/testing/integration-tests/src/full_client/client/mod.rs b/testing/integration-tests/src/full_client/client/mod.rs index dd4fc71dff..cd9208da38 100644 --- a/testing/integration-tests/src/full_client/client/mod.rs +++ b/testing/integration-tests/src/full_client/client/mod.rs @@ -45,6 +45,9 @@ async fn storage_iter() { let api = ctx.client(); let addr = node_runtime::storage().system().account_iter(); + let addr_bytes = api.storage().address_bytes(&addr).unwrap(); + assert_eq!(addr_bytes, addr.to_root_bytes()); + let len = api .storage() .at_latest() diff --git a/testing/integration-tests/src/full_client/storage/mod.rs b/testing/integration-tests/src/full_client/storage/mod.rs index dc697d6267..e1cec5f939 100644 --- a/testing/integration-tests/src/full_client/storage/mod.rs +++ b/testing/integration-tests/src/full_client/storage/mod.rs @@ -126,6 +126,79 @@ async fn storage_n_map_storage_lookup() -> Result<(), subxt::Error> { Ok(()) } +#[tokio::test] +async fn storage_partial_lookup() -> Result<(), subxt::Error> { + let ctx = test_context().await; + let api = ctx.client(); + + // Boilerplate; we create a new asset class with ID 99, and then + // we "approveTransfer" of some of this asset class. This gives us an + // entry in the `Approvals` StorageNMap that we can try to look up. + let signer = dev::alice(); + let alice: AccountId32 = dev::alice().public_key().into(); + let bob: AccountId32 = dev::bob().public_key().into(); + + // Create two assets; one with ID 99 and one with ID 100. + let assets = [ + (99, alice.clone(), bob.clone(), 123), + (100, bob.clone(), alice.clone(), 124), + ]; + for (asset_id, admin, delegate, amount) in assets.clone() { + let tx1 = node_runtime::tx() + .assets() + .create(asset_id, admin.into(), 1); + let tx2 = node_runtime::tx() + .assets() + .approve_transfer(asset_id, delegate.into(), amount); + api.tx() + .sign_and_submit_then_watch_default(&tx1, &signer) + .await? + .wait_for_finalized_success() + .await?; + api.tx() + .sign_and_submit_then_watch_default(&tx2, &signer) + .await? + .wait_for_finalized_success() + .await?; + } + + // Check all approvals. + let addr = node_runtime::storage().assets().approvals_iter(); + let addr_bytes = api.storage().address_bytes(&addr)?; + let mut results = api.storage().at_latest().await?.iter(addr).await?; + let mut approvals = Vec::new(); + while let Some(Ok((key, value))) = results.next().await { + assert!(key.starts_with(&addr_bytes)); + approvals.push(value); + } + assert_eq!(approvals.len(), assets.len()); + let mut amounts = approvals.iter().map(|a| a.amount).collect::>(); + amounts.sort(); + let mut expected = assets.iter().map(|a| a.3).collect::>(); + expected.sort(); + assert_eq!(amounts, expected); + + // Check all assets starting with ID 99. + for (asset_id, _, _, amount) in assets.clone() { + let addr = node_runtime::storage().assets().approvals_iter1(asset_id); + let second_addr_bytes = api.storage().address_bytes(&addr)?; + // Keys must be different, since we are adding to the root key. + assert_ne!(addr_bytes, second_addr_bytes); + + let mut results = api.storage().at_latest().await?.iter(addr).await?; + + let mut approvals = Vec::new(); + while let Some(Ok((key, value))) = results.next().await { + assert!(key.starts_with(&addr_bytes)); + approvals.push(value); + } + assert_eq!(approvals.len(), 1); + assert_eq!(approvals[0].amount, amount); + } + + Ok(()) +} + #[tokio::test] async fn storage_runtime_wasm_code() -> Result<(), subxt::Error> { let ctx = test_context().await;