pallet-mmr: fix offchain db for sync from zero (#12498)

* pallet-mmr: cosmetic improvements

* pallet-mmr: fix offchain storage for initial sync

* address review comments

* pallet-mmr: change offchain fork-resistant key to `(prefix, pos, parent_hash)`

Do this so that both canon and fork-resitant keys have the same
`(prefix, pos).encode()` prefix. Might be useful in the future if we'd
be able to to "get" offchain db entries using key prefixes as well.

Signed-off-by: acatangiu <adrian@parity.io>

Signed-off-by: acatangiu <adrian@parity.io>
This commit is contained in:
Adrian Catangiu
2022-10-17 12:09:47 +03:00
committed by GitHub
parent 37538f0429
commit 4ae0d9a8e3
4 changed files with 163 additions and 108 deletions
+4 -4
View File
@@ -100,8 +100,8 @@ fn should_contain_mmr_digest() {
#[test]
fn should_contain_valid_leaf_data() {
fn node_offchain_key(parent_hash: H256, pos: usize) -> Vec<u8> {
(<Test as pallet_mmr::Config>::INDEXING_PREFIX, parent_hash, pos as u64).encode()
fn node_offchain_key(pos: usize, parent_hash: H256) -> Vec<u8> {
(<Test as pallet_mmr::Config>::INDEXING_PREFIX, pos as u64, parent_hash).encode()
}
let mut ext = new_test_ext(vec![1, 2, 3, 4]);
@@ -110,7 +110,7 @@ fn should_contain_valid_leaf_data() {
<frame_system::Pallet<Test>>::parent_hash()
});
let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 0));
let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(0, parent_hash));
assert_eq!(
mmr_leaf,
MmrLeaf {
@@ -135,7 +135,7 @@ fn should_contain_valid_leaf_data() {
<frame_system::Pallet<Test>>::parent_hash()
});
let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 1));
let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(1, parent_hash));
assert_eq!(
mmr_leaf,
MmrLeaf {
@@ -57,9 +57,9 @@
#![cfg_attr(not(feature = "std"), no_std)]
use codec::Encode;
use frame_support::weights::Weight;
use frame_support::{log, traits::Get, weights::Weight};
use sp_runtime::{
traits::{self, CheckedSub, One, Saturating},
traits::{self, CheckedSub, One, Saturating, UniqueSaturatedInto},
SaturatedConversion,
};
@@ -103,6 +103,15 @@ pub trait WeightInfo {
fn on_initialize(peaks: NodeIndex) -> Weight;
}
/// A MMR specific to the pallet.
type ModuleMmr<StorageType, T, I> = mmr::Mmr<StorageType, T, I, LeafOf<T, I>>;
/// Leaf data.
type LeafOf<T, I> = <<T as Config<I>>::LeafData as primitives::LeafDataProvider>::LeafData;
/// Hashing used for the pallet.
pub(crate) type HashingOf<T, I> = <T as Config<I>>::Hashing;
#[frame_support::pallet]
pub mod pallet {
use super::*;
@@ -166,7 +175,7 @@ pub mod pallet {
/// Note that the leaf at each block MUST be unique. You may want to include a block hash or
/// block number as an easiest way to ensure that.
/// Also note that the leaf added by each block is expected to only reference data coming
/// from ancestor blocks (leaves are saved offchain using `(parent_hash, pos)` key to be
/// from ancestor blocks (leaves are saved offchain using `(pos, parent_hash)` key to be
/// fork-resistant, as such conflicts could only happen on 1-block deep forks, which means
/// two forks with identical line of ancestors compete to write the same offchain key, but
/// that's fine as long as leaves only contain data coming from ancestors - conflicting
@@ -212,12 +221,22 @@ pub mod pallet {
let leaves = Self::mmr_leaves();
let peaks_before = mmr::utils::NodesUtils::new(leaves).number_of_peaks();
let data = T::LeafData::leaf_data();
// append new leaf to MMR
let mut mmr: ModuleMmr<mmr::storage::RuntimeStorage, T, I> = mmr::Mmr::new(leaves);
mmr.push(data).expect("MMR push never fails.");
// update the size
let (leaves, root) = mmr.finalize().expect("MMR finalize never fails.");
// MMR push never fails, but better safe than sorry.
if mmr.push(data).is_none() {
log::error!(target: "runtime::mmr", "MMR push failed");
return T::WeightInfo::on_initialize(peaks_before)
}
// Update the size, `mmr.finalize()` should also never fail.
let (leaves, root) = match mmr.finalize() {
Ok((leaves, root)) => (leaves, root),
Err(e) => {
log::error!(target: "runtime::mmr", "MMR finalize failed: {:?}", e);
return T::WeightInfo::on_initialize(peaks_before)
},
};
<T::OnNewRoot as primitives::OnNewRoot<_>>::on_new_root(&root);
<NumberOfLeaves<T, I>>::put(leaves);
@@ -230,21 +249,35 @@ pub mod pallet {
fn offchain_worker(n: T::BlockNumber) {
use mmr::storage::{OffchainStorage, Storage};
// MMR pallet uses offchain storage to hold full MMR and leaves.
// The leaves are saved under fork-unique keys `(parent_hash, pos)`.
// MMR Runtime depends on `frame_system::block_hash(block_num)` mappings to find
// parent hashes for particular nodes or leaves.
// This MMR offchain worker function moves a rolling window of the same size
// as `frame_system::block_hash` map, where nodes/leaves added by blocks that are just
// The MMR nodes can be found in offchain db under either:
// - fork-unique keys `(prefix, pos, parent_hash)`, or,
// - "canonical" keys `(prefix, pos)`,
// depending on how many blocks in the past the node at position `pos` was
// added to the MMR.
//
// For the fork-unique keys, the MMR pallet depends on
// `frame_system::block_hash(parent_num)` mappings to find the relevant parent block
// hashes, so it is limited by `frame_system::BlockHashCount` in terms of how many
// historical forks it can track. Nodes added to MMR by block `N` can be found in
// offchain db at:
// - fork-unique keys `(prefix, pos, parent_hash)` when (`N` >= `latest_block` -
// `frame_system::BlockHashCount`);
// - "canonical" keys `(prefix, pos)` when (`N` < `latest_block` -
// `frame_system::BlockHashCount`);
//
// The offchain worker is responsible for maintaining the nodes' positions in
// offchain db as the chain progresses by moving a rolling window of the same size as
// `frame_system::block_hash` map, where nodes/leaves added by blocks that are just
// about to exit the window are "canonicalized" so that their offchain key no longer
// depends on `parent_hash` therefore on access to `frame_system::block_hash`.
// depends on `parent_hash`.
//
// This approach works to eliminate fork-induced leaf collisions in offchain db,
// under the assumption that no fork will be deeper than `frame_system::BlockHashCount`
// blocks (2400 blocks on Polkadot, Kusama, Rococo, etc):
// entries pertaining to block `N` where `N < current-2400` are moved to a key based
// solely on block number. The only way to have collisions is if two competing forks
// are deeper than 2400 blocks and they both "canonicalize" their view of block `N`.
// blocks:
// entries pertaining to block `N` where `N < current-BlockHashCount` are moved to a
// key based solely on block number. The only way to have collisions is if two
// competing forks are deeper than `frame_system::BlockHashCount` blocks and they
// both "canonicalize" their view of block `N`
// Once a block is canonicalized, all MMR entries pertaining to sibling blocks from
// other forks are pruned from offchain db.
Storage::<OffchainStorage, T, I, LeafOf<T, I>>::canonicalize_and_prune(n);
@@ -252,15 +285,6 @@ pub mod pallet {
}
}
/// A MMR specific to the pallet.
type ModuleMmr<StorageType, T, I> = mmr::Mmr<StorageType, T, I, LeafOf<T, I>>;
/// Leaf data.
type LeafOf<T, I> = <<T as Config<I>>::LeafData as primitives::LeafDataProvider>::LeafData;
/// Hashing used for the pallet.
pub(crate) type HashingOf<T, I> = <T as Config<I>>::Hashing;
/// Stateless MMR proof verification for batch of leaves.
///
/// This function can be used to verify received MMR [primitives::BatchProof] (`proof`)
@@ -290,19 +314,32 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
///
/// This combination makes the offchain (key,value) entry resilient to chain forks.
fn node_offchain_key(
parent_hash: <T as frame_system::Config>::Hash,
pos: NodeIndex,
parent_hash: <T as frame_system::Config>::Hash,
) -> sp_std::prelude::Vec<u8> {
(T::INDEXING_PREFIX, parent_hash, pos).encode()
(T::INDEXING_PREFIX, pos, parent_hash).encode()
}
/// Build canonical offchain key for node `pos` in MMR.
///
/// Used for nodes added by now finalized blocks.
/// Never read keys using `node_canon_offchain_key` unless you sure that
/// there's no `node_offchain_key` key in the storage.
fn node_canon_offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec<u8> {
(T::INDEXING_PREFIX, pos).encode()
}
/// Return size of rolling window of leaves saved in offchain under fork-unique keys.
///
/// Leaves outside this window are canonicalized.
/// Window size is `frame_system::BlockHashCount - 1` to make sure fork-unique keys
/// can be built using `frame_system::block_hash` map.
fn offchain_canonicalization_window() -> LeafIndex {
let window_size: LeafIndex =
<T as frame_system::Config>::BlockHashCount::get().unique_saturated_into();
window_size.saturating_sub(1)
}
/// Provide the parent number for the block that added `leaf_index` to the MMR.
fn leaf_index_to_parent_block_num(
leaf_index: LeafIndex,
@@ -18,11 +18,10 @@
//! A MMR storage implementations.
use codec::Encode;
use frame_support::traits::Get;
use frame_support::log::{debug, error, trace};
use mmr_lib::helper;
use sp_core::offchain::StorageKind;
use sp_io::{offchain, offchain_index};
use sp_runtime::traits::UniqueSaturatedInto;
use sp_std::iter::Peekable;
#[cfg(not(feature = "std"))]
use sp_std::prelude::*;
@@ -133,15 +132,14 @@ where
// Effectively move a rolling window of fork-unique leaves. Once out of the window, leaves
// are "canonicalized" in offchain by moving them under `Pallet::node_canon_offchain_key`.
let leaves = NumberOfLeaves::<T, I>::get();
let window_size =
<T as frame_system::Config>::BlockHashCount::get().unique_saturated_into();
let window_size = Pallet::<T, I>::offchain_canonicalization_window();
if leaves >= window_size {
// Move the rolling window towards the end of `block_num->hash` mappings available
// in the runtime: we "canonicalize" the leaf at the end,
let to_canon_leaf = leaves.saturating_sub(window_size);
// and all the nodes added by that leaf.
let to_canon_nodes = NodesUtils::right_branch_ending_in_leaf(to_canon_leaf);
frame_support::log::debug!(
debug!(
target: "runtime::mmr::offchain", "Nodes to canon for leaf {}: {:?}",
to_canon_leaf, to_canon_nodes
);
@@ -149,7 +147,7 @@ where
let to_canon_block_num =
Pallet::<T, I>::leaf_index_to_parent_block_num(to_canon_leaf, leaves);
// Only entries under this hash (retrieved from state on current canon fork) are to be
// persisted. All other entries added by same block number will be cleared.
// persisted. All entries added by same block number on other forks will be cleared.
let to_canon_hash = <frame_system::Pallet<T>>::block_hash(to_canon_block_num);
Self::canonicalize_nodes_for_hash(&to_canon_nodes, to_canon_hash);
@@ -159,7 +157,7 @@ where
Self::prune_nodes_for_forks(&to_canon_nodes, forks);
})
.unwrap_or_else(|| {
frame_support::log::error!(
error!(
target: "runtime::mmr::offchain",
"Offchain: could not prune: no entry in pruning map for block {:?}",
to_canon_block_num
@@ -171,8 +169,8 @@ where
fn prune_nodes_for_forks(nodes: &[NodeIndex], forks: Vec<<T as frame_system::Config>::Hash>) {
for hash in forks {
for pos in nodes {
let key = Pallet::<T, I>::node_offchain_key(hash, *pos);
frame_support::log::debug!(
let key = Pallet::<T, I>::node_offchain_key(*pos, hash);
debug!(
target: "runtime::mmr::offchain",
"Clear elem at pos {} with key {:?}",
pos, key
@@ -187,19 +185,19 @@ where
to_canon_hash: <T as frame_system::Config>::Hash,
) {
for pos in to_canon_nodes {
let key = Pallet::<T, I>::node_offchain_key(to_canon_hash, *pos);
let key = Pallet::<T, I>::node_offchain_key(*pos, to_canon_hash);
// Retrieve the element from Off-chain DB under fork-aware key.
if let Some(elem) = offchain::local_storage_get(StorageKind::PERSISTENT, &key) {
let canon_key = Pallet::<T, I>::node_canon_offchain_key(*pos);
// Add under new canon key.
offchain::local_storage_set(StorageKind::PERSISTENT, &canon_key, &elem);
frame_support::log::debug!(
debug!(
target: "runtime::mmr::offchain",
"Moved elem at pos {} from key {:?} to canon key {:?}",
pos, key, canon_key
);
} else {
frame_support::log::error!(
error!(
target: "runtime::mmr::offchain",
"Could not canonicalize elem at pos {} using key {:?}",
pos, key
@@ -220,21 +218,18 @@ where
// Find out which leaf added node `pos` in the MMR.
let ancestor_leaf_idx = NodesUtils::leaf_index_that_added_node(pos);
let window_size =
<T as frame_system::Config>::BlockHashCount::get().unique_saturated_into();
let window_size = Pallet::<T, I>::offchain_canonicalization_window();
// Leaves older than this window should have been canonicalized.
if leaves.saturating_sub(ancestor_leaf_idx) > window_size {
let key = Pallet::<T, I>::node_canon_offchain_key(pos);
frame_support::log::debug!(
debug!(
target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, key {:?}",
pos, ancestor_leaf_idx, key
);
// Just for safety, to easily handle runtime upgrades where any of the window params
// change and maybe we mess up storage migration,
// return _if and only if_ node is found (in normal conditions it's always found),
if let Some(elem) =
sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key)
{
if let Some(elem) = sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) {
return Ok(codec::Decode::decode(&mut &*elem).ok())
}
// BUT if we DID MESS UP, fall through to searching node using fork-specific key.
@@ -244,20 +239,20 @@ where
let ancestor_parent_block_num =
Pallet::<T, I>::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves);
let ancestor_parent_hash = <frame_system::Pallet<T>>::block_hash(ancestor_parent_block_num);
let key = Pallet::<T, I>::node_offchain_key(ancestor_parent_hash, pos);
frame_support::log::debug!(
let key = Pallet::<T, I>::node_offchain_key(pos, ancestor_parent_hash);
debug!(
target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, hash {:?}, key {:?}",
pos, ancestor_leaf_idx, ancestor_parent_hash, key
);
// Retrieve the element from Off-chain DB.
Ok(sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key)
Ok(sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key)
.or_else(|| {
// Again, this is just us being extra paranoid.
// We get here only if we mess up a storage migration for a runtime upgrades where
// say the window is increased, and for a little while following the upgrade there's
// leaves inside new 'window' that had been already canonicalized before upgrade.
let key = Pallet::<T, I>::node_canon_offchain_key(pos);
sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key)
sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key)
})
.and_then(|v| codec::Decode::decode(&mut &*v).ok()))
}
@@ -282,9 +277,8 @@ where
return Ok(())
}
frame_support::log::trace!(
target: "runtime::mmr",
"elems: {:?}",
trace!(
target: "runtime::mmr", "elems: {:?}",
elems.iter().map(|elem| elem.hash()).collect::<Vec<_>>()
);
@@ -309,25 +303,12 @@ where
// in offchain DB to avoid DB collisions and overwrites in case of forks.
let parent_hash = <frame_system::Pallet<T>>::parent_hash();
for elem in elems {
// For now we store this leaf offchain keyed by `(parent_hash, node_index)`
// to make it fork-resistant.
// Offchain worker task will "canonicalize" it `frame_system::BlockHashCount` blocks
// later when we are not worried about forks anymore (highly unlikely to have a fork
// in the chain that deep).
// "Canonicalization" in this case means moving this leaf under a new key based
// only on the leaf's `node_index`.
let key = Pallet::<T, I>::node_offchain_key(parent_hash, node_index);
frame_support::log::debug!(
target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}",
node_index, parent_hash, key
);
// Indexing API is used to store the full node content (both leaf and inner).
elem.using_encoded(|elem| offchain_index::set(&key, elem));
// On-chain we are going to only store new peaks.
if peaks_to_store.next_if_eq(&node_index).is_some() {
<Nodes<T, I>>::insert(node_index, elem.hash());
}
// We are storing full node off-chain (using indexing API).
Self::store_to_offchain(node_index, parent_hash, &elem);
// Increase the indices.
if let Node::Data(..) = elem {
@@ -348,6 +329,38 @@ where
}
}
impl<T, I, L> Storage<RuntimeStorage, T, I, L>
where
T: Config<I>,
I: 'static,
L: primitives::FullLeaf,
{
fn store_to_offchain(
pos: NodeIndex,
parent_hash: <T as frame_system::Config>::Hash,
node: &NodeOf<T, I, L>,
) {
let encoded_node = node.encode();
// We store this leaf offchain keyed by `(parent_hash, node_index)` to make it
// fork-resistant. Offchain worker task will "canonicalize" it
// `frame_system::BlockHashCount` blocks later, when we are not worried about forks anymore
// (multi-era-deep forks should not happen).
let key = Pallet::<T, I>::node_offchain_key(pos, parent_hash);
debug!(
target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}",
pos, parent_hash, key
);
// Indexing API is used to store the full node content.
offchain_index::set(&key, &encoded_node);
// We also directly save the full node under the "canonical" key.
// This is superfluous for the normal case - this entry will possibly be overwritten
// by forks, and will also be overwritten by "offchain_worker canonicalization".
// But it is required for blocks imported during initial sync where none of the above apply
// (`offchain_worker` doesn't run for initial sync blocks).
offchain_index::set(&Pallet::<T, I>::node_canon_offchain_key(pos), &encoded_node);
}
}
fn peaks_to_prune_and_store(
old_size: NodeIndex,
new_size: NodeIndex,
@@ -356,8 +369,8 @@ fn peaks_to_prune_and_store(
// both collections may share a common prefix.
let peaks_before = if old_size == 0 { vec![] } else { helper::get_peaks(old_size) };
let peaks_after = helper::get_peaks(new_size);
frame_support::log::trace!(target: "runtime::mmr", "peaks_before: {:?}", peaks_before);
frame_support::log::trace!(target: "runtime::mmr", "peaks_after: {:?}", peaks_after);
trace!(target: "runtime::mmr", "peaks_before: {:?}", peaks_before);
trace!(target: "runtime::mmr", "peaks_after: {:?}", peaks_after);
let mut peaks_before = peaks_before.into_iter().peekable();
let mut peaks_after = peaks_after.into_iter().peekable();
@@ -169,25 +169,22 @@ fn should_append_to_mmr_when_on_initialize_is_called() {
ext.persist_offchain_overlay();
let offchain_db = ext.offchain_db();
assert_eq!(
offchain_db.get(&MMR::node_offchain_key(parent_b1, 0)).map(decode_node),
Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1),)))
);
assert_eq!(
offchain_db.get(&MMR::node_offchain_key(parent_b2, 1)).map(decode_node),
Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2),)))
);
assert_eq!(
offchain_db.get(&MMR::node_offchain_key(parent_b2, 2)).map(decode_node),
Some(mmr::Node::Hash(hex(
"672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"
)))
);
assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_b2, 3)), None);
assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)), None);
assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)), None);
assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)), None);
let expected = Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1))));
assert_eq!(offchain_db.get(&MMR::node_offchain_key(0, parent_b1)).map(decode_node), expected);
assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)).map(decode_node), expected);
let expected = Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2))));
assert_eq!(offchain_db.get(&MMR::node_offchain_key(1, parent_b2)).map(decode_node), expected);
assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)).map(decode_node), expected);
let expected = Some(mmr::Node::Hash(hex(
"672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854",
)));
assert_eq!(offchain_db.get(&MMR::node_offchain_key(2, parent_b2)).map(decode_node), expected);
assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)).map(decode_node), expected);
assert_eq!(offchain_db.get(&MMR::node_offchain_key(3, parent_b2)), None);
assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(3)), None);
}
@@ -815,16 +812,20 @@ fn should_canonicalize_offchain() {
let parent_num: BlockNumber = (block_num - 1).into();
let leaf_index = u64::from(block_num - 1);
let pos = helper::leaf_index_to_pos(leaf_index.into());
// not canon,
assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None);
let parent_hash = <frame_system::Pallet<Test>>::block_hash(parent_num);
// but available in fork-proof storage.
// Available in offchain db under both fork-proof key and canon key.
// We'll later check it is pruned from fork-proof key.
let expected = Some(mmr::Node::Data((
(leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())),
LeafData::new(block_num.into()),
)));
assert_eq!(
offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node),
Some(mmr::Node::Data((
(leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())),
LeafData::new(block_num.into()),
)))
offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node),
expected
);
assert_eq!(
offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)).map(decode_node),
expected
);
}
@@ -835,12 +836,16 @@ fn should_canonicalize_offchain() {
let verify = |pos: NodeIndex, leaf_index: LeafIndex, expected: H256| {
let parent_num: BlockNumber = leaf_index.try_into().unwrap();
let parent_hash = <frame_system::Pallet<Test>>::block_hash(parent_num);
// not canon,
assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None);
// but available in fork-proof storage.
// Available in offchain db under both fork-proof key and canon key.
// We'll later check it is pruned from fork-proof key.
let expected = Some(mmr::Node::Hash(expected));
assert_eq!(
offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node),
Some(mmr::Node::Hash(expected))
offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node),
expected
);
assert_eq!(
offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)).map(decode_node),
expected
);
};
verify(2, 1, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"));
@@ -867,7 +872,7 @@ fn should_canonicalize_offchain() {
let parent_num: BlockNumber = (block_num - 1).into();
let parent_hash = <frame_system::Pallet<Test>>::block_hash(parent_num);
// no longer available in fork-proof storage (was pruned),
assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None);
assert_eq!(offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)), None);
// but available using canon key.
assert_eq!(
offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node),
@@ -886,7 +891,7 @@ fn should_canonicalize_offchain() {
let parent_num: BlockNumber = leaf_index.try_into().unwrap();
let parent_hash = <frame_system::Pallet<Test>>::block_hash(parent_num);
// no longer available in fork-proof storage (was pruned),
assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None);
assert_eq!(offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)), None);
// but available using canon key.
assert_eq!(
offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node),