diff --git a/codegen/src/api/storage.rs b/codegen/src/api/storage.rs index 88ad1cedfe..922aca7d7a 100644 --- a/codegen/src/api/storage.rs +++ b/codegen/src/api/storage.rs @@ -110,47 +110,49 @@ fn generate_storage_entry_fns( StorageEntryType::Map { key_ty, hashers, .. } => { - match &type_gen - .resolve_type(*key_ty) - .expect("key type should be present") - .type_def - { - // An N-map; return each of the keys separately. - TypeDef::Tuple(tuple) => { - let key_count = tuple.fields.len(); - let hasher_count = hashers.len(); - if hasher_count != 1 && hasher_count != key_count { - return Err(CodegenError::InvalidStorageHasherCount { - storage_entry_name: storage_entry.name().to_owned(), - key_count, - hasher_count, - }); - } - - let mut map_entry_keys: Vec = vec![]; - for (idx, field) in tuple.fields.iter().enumerate() { - // Note: these are in bounds because of the checks above, qed; - let hasher = if idx >= hasher_count { - hashers[0] - } else { - hashers[idx] - }; - map_entry_keys.push(map_entry_key(idx, field.id, hasher)); - } - map_entry_keys - } - // A map with a single key; return the single key. - _ => { - let Some(hasher) = hashers.first() else { + if hashers.len() == 1 { + // If there's exactly 1 hasher, then we have a plain StorageMap. We can't + // break the key down (even if it's a tuple) because the hasher applies to + // the whole key. + vec![map_entry_key(0, *key_ty, hashers[0])] + } else { + // If there are multiple hashers, then we have a StorageDoubleMap or StorageNMap. + // We expect the key type to be tuple, and we will return a MapEntryKey for each + // key in the tuple. + let hasher_count = hashers.len(); + let tuple = match &type_gen + .resolve_type(*key_ty) + .expect("key type should be present") + .type_def + { + TypeDef::Tuple(tuple) => tuple, + _ => { return Err(CodegenError::InvalidStorageHasherCount { storage_entry_name: storage_entry.name().to_owned(), key_count: 1, - hasher_count: 0, + hasher_count, }); - }; + } + }; - vec![map_entry_key(0, *key_ty, *hasher)] + // We should have the same number of hashers and keys. + let key_count = tuple.fields.len(); + if hasher_count != key_count { + return Err(CodegenError::InvalidStorageHasherCount { + storage_entry_name: storage_entry.name().to_owned(), + key_count, + hasher_count, + }); } + + // Collect them together. + tuple + .fields + .iter() + .zip(hashers) + .enumerate() + .map(|(idx, (field, hasher))| map_entry_key(idx, field.id, *hasher)) + .collect() } } }; diff --git a/core/src/storage/storage_key.rs b/core/src/storage/storage_key.rs index 73084e748f..bac737c66f 100644 --- a/core/src/storage/storage_key.rs +++ b/core/src/storage/storage_key.rs @@ -32,34 +32,45 @@ impl StorageHashers { .resolve(*key_ty) .ok_or(MetadataError::TypeNotFound(*key_ty))?; - if let TypeDef::Tuple(tuple) = &ty.type_def { - if hashers.len() == 1 { - // use the same hasher for all fields, if only 1 hasher present: - let hasher = hashers[0]; - for f in tuple.fields.iter() { - hashers_and_ty_ids.push((hasher, f.id)); - } - } else if hashers.len() < tuple.fields.len() { - return Err(StorageAddressError::WrongNumberOfHashers { - hashers: hashers.len(), - fields: tuple.fields.len(), - } - .into()); - } else { - for (i, f) in tuple.fields.iter().enumerate() { - hashers_and_ty_ids.push((hashers[i], f.id)); - } - } + if hashers.len() == 1 { + // If there's exactly 1 hasher, then we have a plain StorageMap. We can't + // break the key down (even if it's a tuple) because the hasher applies to + // the whole key. + hashers_and_ty_ids = vec![(hashers[0], *key_ty)]; } else { - if hashers.len() != 1 { + // If there are multiple hashers, then we have a StorageDoubleMap or StorageNMap. + // We expect the key type to be tuple, and we will return a MapEntryKey for each + // key in the tuple. + let hasher_count = hashers.len(); + let tuple = match &ty.type_def { + TypeDef::Tuple(tuple) => tuple, + _ => { + return Err(StorageAddressError::WrongNumberOfHashers { + hashers: hasher_count, + fields: 1, + } + .into()); + } + }; + + // We should have the same number of hashers and keys. + let key_count = tuple.fields.len(); + if hasher_count != key_count { return Err(StorageAddressError::WrongNumberOfHashers { - hashers: hashers.len(), - fields: 1, + hashers: hasher_count, + fields: key_count, } .into()); } - hashers_and_ty_ids.push((hashers[0], *key_ty)); - }; + + // Collect them together. + hashers_and_ty_ids = tuple + .fields + .iter() + .zip(hashers) + .map(|(field, hasher)| (*hasher, field.id)) + .collect(); + } } Ok(Self { hashers_and_ty_ids }) diff --git a/testing/integration-tests/src/full_client/storage.rs b/testing/integration-tests/src/full_client/storage.rs index 1368e4e195..ecb6fe3156 100644 --- a/testing/integration-tests/src/full_client/storage.rs +++ b/testing/integration-tests/src/full_client/storage.rs @@ -72,7 +72,7 @@ async fn storage_n_mapish_key_is_properly_created() -> Result<(), subxt::Error> // This is what the generated code hashes a `session().key_owner(..)` key into: let actual_key = node_runtime::storage() .session() - .key_owner(KeyTypeId([1, 2, 3, 4]), vec![5, 6, 7, 8]); + .key_owner((KeyTypeId([1, 2, 3, 4]), vec![5, 6, 7, 8])); let actual_key_bytes = api.storage().address_bytes(&actual_key)?; // Let's manually hash to what we assume it should be and compare: @@ -80,13 +80,12 @@ async fn storage_n_mapish_key_is_properly_created() -> Result<(), subxt::Error> // Hash the prefix to the storage entry: let mut bytes = sp_core::twox_128("Session".as_bytes()).to_vec(); bytes.extend(&sp_core::twox_128("KeyOwner".as_bytes())[..]); - // Both keys, use twox64_concat hashers: - let key1 = [1u8, 2, 3, 4].encode(); - let key2 = vec![5u8, 6, 7, 8].encode(); - bytes.extend(sp_core::twox_64(&key1)); - bytes.extend(&key1); - bytes.extend(sp_core::twox_64(&key2)); - bytes.extend(&key2); + // Key is a tuple of 2 args, so encode each arg and then hash the concatenation: + let mut key_bytes = vec![]; + [1u8, 2, 3, 4].encode_to(&mut key_bytes); + vec![5u8, 6, 7, 8].encode_to(&mut key_bytes); + bytes.extend(sp_core::twox_64(&key_bytes)); + bytes.extend(&key_bytes); bytes };