feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit e4778b4576
6838 changed files with 1847450 additions and 0 deletions
+54
View File
@@ -0,0 +1,54 @@
[package]
name = "sc-client-api"
version = "28.0.0"
authors.workspace = true
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
homepage.workspace = true
repository.workspace = true
description = "Substrate client interfaces."
documentation = "https://docs.rs/sc-client-api"
readme = "README.md"
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { features = ["derive"], workspace = true }
fnv = { workspace = true }
futures = { workspace = true }
log = { workspace = true, default-features = true }
parking_lot = { workspace = true, default-features = true }
prometheus-endpoint = { workspace = true, default-features = true }
sc-executor = { workspace = true, default-features = true }
sc-transaction-pool-api = { workspace = true, default-features = true }
sc-utils = { workspace = true, default-features = true }
sp-api = { workspace = true, default-features = true }
sp-blockchain = { workspace = true, default-features = true }
sp-consensus = { workspace = true, default-features = true }
sp-core = { workspace = true }
sp-database = { workspace = true, default-features = true }
sp-externalities = { workspace = true, default-features = true }
sp-runtime = { workspace = true }
sp-state-machine = { workspace = true, default-features = true }
sp-storage = { workspace = true, default-features = true }
sp-trie = { workspace = true, default-features = true }
[dev-dependencies]
substrate-test-runtime = { workspace = true }
[features]
runtime-benchmarks = [
"sc-executor/runtime-benchmarks",
"sc-transaction-pool-api/runtime-benchmarks",
"sp-api/runtime-benchmarks",
"sp-blockchain/runtime-benchmarks",
"sp-consensus/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"sp-state-machine/runtime-benchmarks",
"sp-trie/runtime-benchmarks",
"substrate-test-runtime/runtime-benchmarks",
]
+3
View File
@@ -0,0 +1,3 @@
Substrate client interfaces.
License: GPL-3.0-or-later WITH Classpath-exception-2.0
+684
View File
@@ -0,0 +1,684 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate Client data backend
use std::collections::HashSet;
use parking_lot::RwLock;
use sp_api::CallContext;
use sp_consensus::BlockOrigin;
use sp_core::offchain::OffchainStorage;
use sp_runtime::{
traits::{Block as BlockT, HashingFor, NumberFor},
Justification, Justifications, StateVersion, Storage,
};
use sp_state_machine::{
backend::AsTrieBackend, ChildStorageCollection, IndexOperation, IterArgs,
OffchainChangesCollection, StorageCollection, StorageIterator,
};
use sp_storage::{ChildInfo, StorageData, StorageKey};
pub use sp_trie::MerkleValue;
use crate::{blockchain::Backend as BlockchainBackend, UsageInfo};
pub use sp_state_machine::{Backend as StateBackend, BackendTransaction, KeyValueStates};
/// Extracts the state backend type for the given backend.
pub type StateBackendFor<B, Block> = <B as Backend<Block>>::State;
/// Describes which block import notification stream should be notified.
#[derive(Debug, Clone, Copy)]
pub enum ImportNotificationAction {
/// Notify only when the node has synced to the tip or there is a re-org.
RecentBlock,
/// Notify for every single block no matter what the sync state is.
EveryBlock,
/// Both block import notifications above should be fired.
Both,
/// No block import notification should be fired.
None,
}
/// Import operation summary.
///
/// Contains information about the block that just got imported,
/// including storage changes, reorged blocks, etc.
pub struct ImportSummary<Block: BlockT> {
/// Block hash of the imported block.
pub hash: Block::Hash,
/// Import origin.
pub origin: BlockOrigin,
/// Header of the imported block.
pub header: Block::Header,
/// Is this block a new best block.
pub is_new_best: bool,
/// Optional storage changes.
pub storage_changes: Option<(StorageCollection, ChildStorageCollection)>,
/// Tree route from old best to new best.
///
/// If `None`, there was no re-org while importing.
pub tree_route: Option<sp_blockchain::TreeRoute<Block>>,
/// What notify action to take for this import.
pub import_notification_action: ImportNotificationAction,
}
/// A stale block.
#[derive(Clone, Debug)]
pub struct StaleBlock<Block: BlockT> {
/// The hash of this block.
pub hash: Block::Hash,
/// Is this a head?
pub is_head: bool,
}
/// Finalization operation summary.
///
/// Contains information about the block that just got finalized,
/// including tree heads that became stale at the moment of finalization.
pub struct FinalizeSummary<Block: BlockT> {
/// Last finalized block header.
pub header: Block::Header,
/// Blocks that were finalized.
///
/// The last entry is the one that has been explicitly finalized.
pub finalized: Vec<Block::Hash>,
/// Blocks that became stale during this finalization operation.
pub stale_blocks: Vec<StaleBlock<Block>>,
}
/// Import operation wrapper.
pub struct ClientImportOperation<Block: BlockT, B: Backend<Block>> {
/// DB Operation.
pub op: B::BlockImportOperation,
/// Summary of imported block.
pub notify_imported: Option<ImportSummary<Block>>,
/// Summary of finalized block.
pub notify_finalized: Option<FinalizeSummary<Block>>,
}
/// Helper function to apply auxiliary data insertion into an operation.
pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, D, I>(
operation: &mut ClientImportOperation<Block, B>,
insert: I,
delete: D,
) -> sp_blockchain::Result<()>
where
Block: BlockT,
B: Backend<Block>,
I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
D: IntoIterator<Item = &'a &'b [u8]>,
{
operation.op.insert_aux(
insert
.into_iter()
.map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
.chain(delete.into_iter().map(|k| (k.to_vec(), None))),
)
}
/// State of a new block.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum NewBlockState {
/// Normal block.
Normal,
/// New best block.
Best,
/// Newly finalized block (implicitly best).
Final,
}
impl NewBlockState {
/// Whether this block is the new best block.
pub fn is_best(self) -> bool {
match self {
NewBlockState::Best | NewBlockState::Final => true,
NewBlockState::Normal => false,
}
}
/// Whether this block is considered final.
pub fn is_final(self) -> bool {
match self {
NewBlockState::Final => true,
NewBlockState::Best | NewBlockState::Normal => false,
}
}
}
/// Block insertion operation.
///
/// Keeps hold if the inserted block state and data.
pub trait BlockImportOperation<Block: BlockT> {
/// Associated state backend type.
type State: StateBackend<HashingFor<Block>>;
/// Returns pending state.
///
/// Returns None for backends with locally-unavailable state data.
fn state(&self) -> sp_blockchain::Result<Option<&Self::State>>;
/// Append block data to the transaction.
fn set_block_data(
&mut self,
header: Block::Header,
body: Option<Vec<Block::Extrinsic>>,
indexed_body: Option<Vec<Vec<u8>>>,
justifications: Option<Justifications>,
state: NewBlockState,
) -> sp_blockchain::Result<()>;
/// Inject storage data into the database.
fn update_db_storage(
&mut self,
update: BackendTransaction<HashingFor<Block>>,
) -> sp_blockchain::Result<()>;
/// Set genesis state. If `commit` is `false` the state is saved in memory, but is not written
/// to the database.
fn set_genesis_state(
&mut self,
storage: Storage,
commit: bool,
state_version: StateVersion,
) -> sp_blockchain::Result<Block::Hash>;
/// Inject storage data into the database replacing any existing data.
fn reset_storage(
&mut self,
storage: Storage,
state_version: StateVersion,
) -> sp_blockchain::Result<Block::Hash>;
/// Set storage changes.
fn update_storage(
&mut self,
update: StorageCollection,
child_update: ChildStorageCollection,
) -> sp_blockchain::Result<()>;
/// Write offchain storage changes to the database.
fn update_offchain_storage(
&mut self,
_offchain_update: OffchainChangesCollection,
) -> sp_blockchain::Result<()> {
Ok(())
}
/// Insert auxiliary keys.
///
/// Values are `None` if should be deleted.
fn insert_aux<I>(&mut self, ops: I) -> sp_blockchain::Result<()>
where
I: IntoIterator<Item = (Vec<u8>, Option<Vec<u8>>)>;
/// Mark a block as finalized, if multiple blocks are finalized in the same operation then they
/// must be marked in ascending order.
fn mark_finalized(
&mut self,
hash: Block::Hash,
justification: Option<Justification>,
) -> sp_blockchain::Result<()>;
/// Mark a block as new head. If both block import and set head are specified, set head
/// overrides block import's best block rule.
fn mark_head(&mut self, hash: Block::Hash) -> sp_blockchain::Result<()>;
/// Add a transaction index operation.
fn update_transaction_index(&mut self, index: Vec<IndexOperation>)
-> sp_blockchain::Result<()>;
/// Configure whether to create a block gap if newly imported block is missing parent
fn set_create_gap(&mut self, create_gap: bool);
}
/// Interface for performing operations on the backend.
pub trait LockImportRun<Block: BlockT, B: Backend<Block>> {
/// Lock the import lock, and run operations inside.
fn lock_import_and_run<R, Err, F>(&self, f: F) -> Result<R, Err>
where
F: FnOnce(&mut ClientImportOperation<Block, B>) -> Result<R, Err>,
Err: From<sp_blockchain::Error>;
}
/// Finalize Facilities
pub trait Finalizer<Block: BlockT, B: Backend<Block>> {
/// Mark all blocks up to given as finalized in operation.
///
/// If `justification` is provided it is stored with the given finalized
/// block (any other finalized blocks are left unjustified).
///
/// If the block being finalized is on a different fork from the current
/// best block the finalized block is set as best, this might be slightly
/// inaccurate (i.e. outdated). Usages that require determining an accurate
/// best block should use `SelectChain` instead of the client.
fn apply_finality(
&self,
operation: &mut ClientImportOperation<Block, B>,
block: Block::Hash,
justification: Option<Justification>,
notify: bool,
) -> sp_blockchain::Result<()>;
/// Finalize a block.
///
/// This will implicitly finalize all blocks up to it and
/// fire finality notifications.
///
/// If the block being finalized is on a different fork from the current
/// best block, the finalized block is set as best. This might be slightly
/// inaccurate (i.e. outdated). Usages that require determining an accurate
/// best block should use `SelectChain` instead of the client.
///
/// Pass a flag to indicate whether finality notifications should be propagated.
/// This is usually tied to some synchronization state, where we don't send notifications
/// while performing major synchronization work.
fn finalize_block(
&self,
block: Block::Hash,
justification: Option<Justification>,
notify: bool,
) -> sp_blockchain::Result<()>;
}
/// Provides access to an auxiliary database.
///
/// This is a simple global database not aware of forks. Can be used for storing auxiliary
/// information like total block weight/difficulty for fork resolution purposes as a common use
/// case.
pub trait AuxStore {
/// Insert auxiliary data into key-value store.
///
/// Deletions occur after insertions.
fn insert_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
D: IntoIterator<Item = &'a &'b [u8]>,
>(
&self,
insert: I,
delete: D,
) -> sp_blockchain::Result<()>;
/// Query auxiliary data from key-value store.
fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>>;
}
/// An `Iterator` that iterates keys in a given block under a prefix.
pub struct KeysIter<State, Block>
where
State: StateBackend<HashingFor<Block>>,
Block: BlockT,
{
inner: <State as StateBackend<HashingFor<Block>>>::RawIter,
state: State,
}
impl<State, Block> KeysIter<State, Block>
where
State: StateBackend<HashingFor<Block>>,
Block: BlockT,
{
/// Create a new iterator over storage keys.
pub fn new(
state: State,
prefix: Option<&StorageKey>,
start_at: Option<&StorageKey>,
) -> Result<Self, State::Error> {
let mut args = IterArgs::default();
args.prefix = prefix.as_ref().map(|prefix| prefix.0.as_slice());
args.start_at = start_at.as_ref().map(|start_at| start_at.0.as_slice());
args.start_at_exclusive = true;
Ok(Self { inner: state.raw_iter(args)?, state })
}
/// Create a new iterator over a child storage's keys.
pub fn new_child(
state: State,
child_info: ChildInfo,
prefix: Option<&StorageKey>,
start_at: Option<&StorageKey>,
) -> Result<Self, State::Error> {
let mut args = IterArgs::default();
args.prefix = prefix.as_ref().map(|prefix| prefix.0.as_slice());
args.start_at = start_at.as_ref().map(|start_at| start_at.0.as_slice());
args.child_info = Some(child_info);
args.start_at_exclusive = true;
Ok(Self { inner: state.raw_iter(args)?, state })
}
}
impl<State, Block> Iterator for KeysIter<State, Block>
where
Block: BlockT,
State: StateBackend<HashingFor<Block>>,
{
type Item = StorageKey;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next_key(&self.state)?.ok().map(StorageKey)
}
}
/// An `Iterator` that iterates keys and values in a given block under a prefix.
pub struct PairsIter<State, Block>
where
State: StateBackend<HashingFor<Block>>,
Block: BlockT,
{
inner: <State as StateBackend<HashingFor<Block>>>::RawIter,
state: State,
}
impl<State, Block> Iterator for PairsIter<State, Block>
where
Block: BlockT,
State: StateBackend<HashingFor<Block>>,
{
type Item = (StorageKey, StorageData);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next_pair(&self.state)?
.ok()
.map(|(key, value)| (StorageKey(key), StorageData(value)))
}
}
impl<State, Block> PairsIter<State, Block>
where
State: StateBackend<HashingFor<Block>>,
Block: BlockT,
{
/// Create a new iterator over storage key and value pairs.
pub fn new(
state: State,
prefix: Option<&StorageKey>,
start_at: Option<&StorageKey>,
) -> Result<Self, State::Error> {
let mut args = IterArgs::default();
args.prefix = prefix.as_ref().map(|prefix| prefix.0.as_slice());
args.start_at = start_at.as_ref().map(|start_at| start_at.0.as_slice());
args.start_at_exclusive = true;
Ok(Self { inner: state.raw_iter(args)?, state })
}
}
/// Provides access to storage primitives
pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
/// Given a block's `Hash` and a key, return the value under the key in that block.
fn storage(
&self,
hash: Block::Hash,
key: &StorageKey,
) -> sp_blockchain::Result<Option<StorageData>>;
/// Given a block's `Hash` and a key, return the value under the hash in that block.
fn storage_hash(
&self,
hash: Block::Hash,
key: &StorageKey,
) -> sp_blockchain::Result<Option<Block::Hash>>;
/// Given a block's `Hash` and a key prefix, returns a `KeysIter` iterates matching storage
/// keys in that block.
fn storage_keys(
&self,
hash: Block::Hash,
prefix: Option<&StorageKey>,
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<KeysIter<B::State, Block>>;
/// Given a block's `Hash` and a key prefix, returns an iterator over the storage keys and
/// values in that block.
fn storage_pairs(
&self,
hash: <Block as BlockT>::Hash,
prefix: Option<&StorageKey>,
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<PairsIter<B::State, Block>>;
/// Given a block's `Hash`, a key and a child storage key, return the value under the key in
/// that block.
fn child_storage(
&self,
hash: Block::Hash,
child_info: &ChildInfo,
key: &StorageKey,
) -> sp_blockchain::Result<Option<StorageData>>;
/// Given a block's `Hash` and a key `prefix` and a child storage key,
/// returns a `KeysIter` that iterates matching storage keys in that block.
fn child_storage_keys(
&self,
hash: Block::Hash,
child_info: ChildInfo,
prefix: Option<&StorageKey>,
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<KeysIter<B::State, Block>>;
/// Given a block's `Hash`, a key and a child storage key, return the hash under the key in that
/// block.
fn child_storage_hash(
&self,
hash: Block::Hash,
child_info: &ChildInfo,
key: &StorageKey,
) -> sp_blockchain::Result<Option<Block::Hash>>;
/// Given a block's `Hash` and a key, return the closest merkle value.
fn closest_merkle_value(
&self,
hash: Block::Hash,
key: &StorageKey,
) -> sp_blockchain::Result<Option<MerkleValue<Block::Hash>>>;
/// Given a block's `Hash`, a key and a child storage key, return the closest merkle value.
fn child_closest_merkle_value(
&self,
hash: Block::Hash,
child_info: &ChildInfo,
key: &StorageKey,
) -> sp_blockchain::Result<Option<MerkleValue<Block::Hash>>>;
}
/// Specify the desired trie cache context when calling [`Backend::state_at`].
///
/// This is used to determine the size of the local trie cache.
#[derive(Debug, Clone, Copy)]
pub enum TrieCacheContext {
/// This is used when calling [`Backend::state_at`] in a trusted context.
///
/// A trusted context is for example the building or importing of a block.
/// In this case the local trie cache can grow unlimited and all the cached data
/// will be propagated back to the shared trie cache. It is safe to let the local
/// cache grow to hold the entire data, because importing and building blocks is
/// bounded by the block size limit.
Trusted,
/// This is used when calling [`Backend::state_at`] in from untrusted context.
///
/// The local trie cache will be bounded by its preconfigured size.
Untrusted,
}
impl From<CallContext> for TrieCacheContext {
fn from(call_context: CallContext) -> Self {
match call_context {
CallContext::Onchain => TrieCacheContext::Trusted,
CallContext::Offchain => TrieCacheContext::Untrusted,
}
}
}
/// Client backend.
///
/// Manages the data layer.
///
/// # State Pruning
///
/// While an object from `state_at` is alive, the state
/// should not be pruned. The backend should internally reference-count
/// its state objects.
///
/// The same applies for live `BlockImportOperation`s: while an import operation building on a
/// parent `P` is alive, the state for `P` should not be pruned.
///
/// # Block Pruning
///
/// Users can pin blocks in memory by calling `pin_block`. When
/// a block would be pruned, its value is kept in an in-memory cache
/// until it is unpinned via `unpin_block`.
///
/// While a block is pinned, its state is also preserved.
///
/// The backend should internally reference count the number of pin / unpin calls.
pub trait Backend<Block: BlockT>: AuxStore + Send + Sync {
/// Associated block insertion operation type.
type BlockImportOperation: BlockImportOperation<Block, State = Self::State>;
/// Associated blockchain backend type.
type Blockchain: BlockchainBackend<Block>;
/// Associated state backend type.
type State: StateBackend<HashingFor<Block>>
+ Send
+ AsTrieBackend<
HashingFor<Block>,
TrieBackendStorage = <Self::State as StateBackend<HashingFor<Block>>>::TrieBackendStorage,
>;
/// Offchain workers local storage.
type OffchainStorage: OffchainStorage;
/// Begin a new block insertion transaction with given parent block id.
///
/// When constructing the genesis, this is called with all-zero hash.
fn begin_operation(&self) -> sp_blockchain::Result<Self::BlockImportOperation>;
/// Note an operation to contain state transition.
fn begin_state_operation(
&self,
operation: &mut Self::BlockImportOperation,
block: Block::Hash,
) -> sp_blockchain::Result<()>;
/// Commit block insertion.
fn commit_operation(
&self,
transaction: Self::BlockImportOperation,
) -> sp_blockchain::Result<()>;
/// Finalize block with given `hash`.
///
/// This should only be called if the parent of the given block has been finalized.
fn finalize_block(
&self,
hash: Block::Hash,
justification: Option<Justification>,
) -> sp_blockchain::Result<()>;
/// Append justification to the block with the given `hash`.
///
/// This should only be called for blocks that are already finalized.
fn append_justification(
&self,
hash: Block::Hash,
justification: Justification,
) -> sp_blockchain::Result<()>;
/// Returns reference to blockchain backend.
fn blockchain(&self) -> &Self::Blockchain;
/// Returns current usage statistics.
fn usage_info(&self) -> Option<UsageInfo>;
/// Returns a handle to offchain storage.
fn offchain_storage(&self) -> Option<Self::OffchainStorage>;
/// Pin the block to keep body, justification and state available after pruning.
/// Number of pins are reference counted. Users need to make sure to perform
/// one call to [`Self::unpin_block`] per call to [`Self::pin_block`].
fn pin_block(&self, hash: Block::Hash) -> sp_blockchain::Result<()>;
/// Unpin the block to allow pruning.
fn unpin_block(&self, hash: Block::Hash);
/// Returns true if state for given block is available.
fn have_state_at(&self, hash: Block::Hash, _number: NumberFor<Block>) -> bool {
self.state_at(hash, TrieCacheContext::Untrusted).is_ok()
}
/// Returns state backend with post-state of given block.
fn state_at(
&self,
hash: Block::Hash,
trie_cache_context: TrieCacheContext,
) -> sp_blockchain::Result<Self::State>;
/// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to
/// revert past any finalized block, this is unsafe and can potentially leave the node in an
/// inconsistent state. All blocks higher than the best block are also reverted and not counting
/// towards `n`.
///
/// Returns the number of blocks that were successfully reverted and the list of finalized
/// blocks that has been reverted.
fn revert(
&self,
n: NumberFor<Block>,
revert_finalized: bool,
) -> sp_blockchain::Result<(NumberFor<Block>, HashSet<Block::Hash>)>;
/// Discard non-best, unfinalized leaf block.
fn remove_leaf_block(&self, hash: Block::Hash) -> sp_blockchain::Result<()>;
/// Insert auxiliary data into key-value store.
fn insert_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
D: IntoIterator<Item = &'a &'b [u8]>,
>(
&self,
insert: I,
delete: D,
) -> sp_blockchain::Result<()> {
AuxStore::insert_aux(self, insert, delete)
}
/// Query auxiliary data from key-value store.
fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
AuxStore::get_aux(self, key)
}
/// Gain access to the import lock around this backend.
///
/// _Note_ Backend isn't expected to acquire the lock by itself ever. Rather
/// the using components should acquire and hold the lock whenever they do
/// something that the import of a block would interfere with, e.g. importing
/// a new block or calculating the best head.
fn get_import_lock(&self) -> &RwLock<()>;
/// Tells whether the backend requires full-sync mode.
fn requires_full_sync(&self) -> bool;
}
/// Mark for all Backend implementations, that are making use of state data, stored locally.
pub trait LocalBackend<Block: BlockT>: Backend<Block> {}
+95
View File
@@ -0,0 +1,95 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! A method call executor interface.
use sc_executor::{RuntimeVersion, RuntimeVersionOf};
use sp_core::traits::CallContext;
use sp_externalities::Extensions;
use sp_runtime::traits::{Block as BlockT, HashingFor};
use sp_state_machine::{OverlayedChanges, StorageProof};
use std::cell::RefCell;
use crate::execution_extensions::ExecutionExtensions;
use sp_api::ProofRecorder;
/// Executor Provider
pub trait ExecutorProvider<Block: BlockT> {
/// executor instance
type Executor: CallExecutor<Block>;
/// Get call executor reference.
fn executor(&self) -> &Self::Executor;
/// Get a reference to the execution extensions.
fn execution_extensions(&self) -> &ExecutionExtensions<Block>;
}
/// Method call executor.
pub trait CallExecutor<B: BlockT>: RuntimeVersionOf {
/// Externalities error type.
type Error: sp_state_machine::Error;
/// The backend used by the node.
type Backend: crate::backend::Backend<B>;
/// Returns the [`ExecutionExtensions`].
fn execution_extensions(&self) -> &ExecutionExtensions<B>;
/// Execute a call to a contract on top of state in a block of given hash.
///
/// No changes are made.
fn call(
&self,
at_hash: B::Hash,
method: &str,
call_data: &[u8],
context: CallContext,
) -> Result<Vec<u8>, sp_blockchain::Error>;
/// Execute a contextual call on top of state in a block of a given hash.
///
/// No changes are made.
/// Before executing the method, passed header is installed as the current header
/// of the execution context.
fn contextual_call(
&self,
at_hash: B::Hash,
method: &str,
call_data: &[u8],
changes: &RefCell<OverlayedChanges<HashingFor<B>>>,
proof_recorder: &Option<ProofRecorder<B>>,
call_context: CallContext,
extensions: &RefCell<Extensions>,
) -> sp_blockchain::Result<Vec<u8>>;
/// Extract RuntimeVersion of given block
///
/// No changes are made.
fn runtime_version(&self, at_hash: B::Hash) -> Result<RuntimeVersion, sp_blockchain::Error>;
/// Prove the execution of the given `method`.
///
/// No changes are made.
fn prove_execution(
&self,
at_hash: B::Hash,
method: &str,
call_data: &[u8],
) -> Result<(Vec<u8>, StorageProof), sp_blockchain::Error>;
}
+475
View File
@@ -0,0 +1,475 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! A set of APIs supported by the client along with their primitives.
use sp_consensus::BlockOrigin;
use sp_core::storage::StorageKey;
use sp_runtime::{
generic::SignedBlock,
traits::{Block as BlockT, NumberFor},
Justifications,
};
use std::{
collections::HashSet,
fmt::{self, Debug},
sync::Arc,
};
use crate::{
blockchain::Info, notifications::StorageEventStream, FinalizeSummary, ImportSummary, StaleBlock,
};
use sc_transaction_pool_api::ChainEvent;
use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender};
use sp_blockchain;
/// Type that implements `futures::Stream` of block import events.
pub type ImportNotifications<Block> = TracingUnboundedReceiver<BlockImportNotification<Block>>;
/// A stream of block finality notifications.
pub type FinalityNotifications<Block> = TracingUnboundedReceiver<FinalityNotification<Block>>;
/// Expected hashes of blocks at given heights.
///
/// This may be used as chain spec extension to set trusted checkpoints, i.e.
/// the client will refuse to import a block with a different hash at the given
/// height.
pub type ForkBlocks<Block> = Option<Vec<(NumberFor<Block>, <Block as BlockT>::Hash)>>;
/// Known bad block hashes.
///
/// This may be used as chain spec extension to filter out known, unwanted forks.
pub type BadBlocks<Block> = Option<HashSet<<Block as BlockT>::Hash>>;
/// Figure out the block type for a given type (for now, just a `Client`).
pub trait BlockOf {
/// The type of the block.
type Type: BlockT;
}
/// A source of blockchain events.
pub trait BlockchainEvents<Block: BlockT> {
/// Get block import event stream.
///
/// Not guaranteed to be fired for every imported block. Use
/// `every_import_notification_stream()` if you want a notification of every imported block
/// regardless.
///
/// The events for this notification stream are emitted:
/// - During initial sync process: if there is a re-org while importing blocks. See
/// [here](https://github.com/paritytech/substrate/pull/7118#issuecomment-694091901) for the
/// rationale behind this.
/// - After initial sync process: on every imported block, regardless of whether it is
/// the new best block or not, causes a re-org or not.
fn import_notification_stream(&self) -> ImportNotifications<Block>;
/// Get a stream of every imported block.
fn every_import_notification_stream(&self) -> ImportNotifications<Block>;
/// Get a stream of finality notifications. Not guaranteed to be fired for every
/// finalized block.
fn finality_notification_stream(&self) -> FinalityNotifications<Block>;
/// Get storage changes event stream.
///
/// Passing `None` as `filter_keys` subscribes to all storage changes.
fn storage_changes_notification_stream(
&self,
filter_keys: Option<&[StorageKey]>,
child_filter_keys: Option<&[(StorageKey, Option<Vec<StorageKey>>)]>,
) -> sp_blockchain::Result<StorageEventStream<Block::Hash>>;
}
/// List of operations to be performed on storage aux data.
/// First tuple element is the encoded data key.
/// Second tuple element is the encoded optional data to write.
/// If `None`, the key and the associated data are deleted from storage.
pub type AuxDataOperations = Vec<(Vec<u8>, Option<Vec<u8>>)>;
/// Callback invoked before committing the operations created during block import.
/// This gives the opportunity to perform auxiliary pre-commit actions and optionally
/// enqueue further storage write operations to be atomically performed on commit.
pub type OnImportAction<Block> =
Box<dyn (Fn(&BlockImportNotification<Block>) -> AuxDataOperations) + Send>;
/// Callback invoked before committing the operations created during block finalization.
/// This gives the opportunity to perform auxiliary pre-commit actions and optionally
/// enqueue further storage write operations to be atomically performed on commit.
pub type OnFinalityAction<Block> =
Box<dyn (Fn(&FinalityNotification<Block>) -> AuxDataOperations) + Send>;
/// Interface to perform auxiliary actions before committing a block import or
/// finality operation.
pub trait PreCommitActions<Block: BlockT> {
/// Actions to be performed on block import.
fn register_import_action(&self, op: OnImportAction<Block>);
/// Actions to be performed on block finalization.
fn register_finality_action(&self, op: OnFinalityAction<Block>);
}
/// Interface for fetching block data.
pub trait BlockBackend<Block: BlockT> {
/// Get block body by ID. Returns `None` if the body is not stored.
fn block_body(
&self,
hash: Block::Hash,
) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get all indexed transactions for a block,
/// including renewed transactions.
///
/// Note that this will only fetch transactions
/// that are indexed by the runtime with `storage_index_transaction`.
fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Vec<Vec<u8>>>>;
/// Get full block by hash.
fn block(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<SignedBlock<Block>>>;
/// Get block status by block hash.
fn block_status(&self, hash: Block::Hash) -> sp_blockchain::Result<sp_consensus::BlockStatus>;
/// Get block justifications for the block with the given hash.
fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Justifications>>;
/// Get block hash by number.
fn block_hash(&self, number: NumberFor<Block>) -> sp_blockchain::Result<Option<Block::Hash>>;
/// Get single indexed transaction by content hash.
///
/// Note that this will only fetch transactions
/// that are indexed by the runtime with `storage_index_transaction`.
fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Vec<u8>>>;
/// Check if transaction index exists.
fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result<bool> {
Ok(self.indexed_transaction(hash)?.is_some())
}
/// Tells whether the current client configuration requires full-sync mode.
fn requires_full_sync(&self) -> bool;
}
/// Provide a list of potential uncle headers for a given block.
pub trait ProvideUncles<Block: BlockT> {
/// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors.
fn uncles(
&self,
target_hash: Block::Hash,
max_generation: NumberFor<Block>,
) -> sp_blockchain::Result<Vec<Block::Header>>;
}
/// Client info
#[derive(Debug, Clone)]
pub struct ClientInfo<Block: BlockT> {
/// Best block hash.
pub chain: Info<Block>,
/// Usage info, if backend supports this.
pub usage: Option<UsageInfo>,
}
/// A wrapper to store the size of some memory.
#[derive(Default, Clone, Debug, Copy)]
pub struct MemorySize(usize);
impl MemorySize {
/// Creates `Self` from the given `bytes` size.
pub fn from_bytes(bytes: usize) -> Self {
Self(bytes)
}
/// Returns the memory size as bytes.
pub fn as_bytes(self) -> usize {
self.0
}
}
impl fmt::Display for MemorySize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 < 1024 {
write!(f, "{} bytes", self.0)
} else if self.0 < 1024 * 1024 {
write!(f, "{:.2} KiB", self.0 as f64 / 1024f64)
} else if self.0 < 1024 * 1024 * 1024 {
write!(f, "{:.2} MiB", self.0 as f64 / (1024f64 * 1024f64))
} else {
write!(f, "{:.2} GiB", self.0 as f64 / (1024f64 * 1024f64 * 1024f64))
}
}
}
/// Memory statistics for client instance.
#[derive(Default, Clone, Debug)]
pub struct MemoryInfo {
/// Size of state cache.
pub state_cache: MemorySize,
/// Size of backend database cache.
pub database_cache: MemorySize,
}
/// I/O statistics for client instance.
#[derive(Default, Clone, Debug)]
pub struct IoInfo {
/// Number of transactions.
pub transactions: u64,
/// Total bytes read from disk.
pub bytes_read: u64,
/// Total bytes written to disk.
pub bytes_written: u64,
/// Total key writes to disk.
pub writes: u64,
/// Total key reads from disk.
pub reads: u64,
/// Average size of the transaction.
pub average_transaction_size: u64,
/// State reads (keys)
pub state_reads: u64,
/// State reads (keys) from cache.
pub state_reads_cache: u64,
/// State reads (keys)
pub state_writes: u64,
/// State write (keys) already cached.
pub state_writes_cache: u64,
/// State write (trie nodes) to backend db.
pub state_writes_nodes: u64,
}
/// Usage statistics for running client instance.
///
/// Returning backend determines the scope of these stats,
/// but usually it is either from service start or from previous
/// gathering of the statistics.
#[derive(Default, Clone, Debug)]
pub struct UsageInfo {
/// Memory statistics.
pub memory: MemoryInfo,
/// I/O statistics.
pub io: IoInfo,
}
impl fmt::Display for UsageInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"caches: ({} state, {} db overlay), \
i/o: ({} tx, {} write, {} read, {} avg tx, {}/{} key cache reads/total, {} trie nodes writes)",
self.memory.state_cache,
self.memory.database_cache,
self.io.transactions,
self.io.bytes_written,
self.io.bytes_read,
self.io.average_transaction_size,
self.io.state_reads_cache,
self.io.state_reads,
self.io.state_writes_nodes,
)
}
}
/// Sends a message to the pinning-worker once dropped to unpin a block in the backend.
pub struct UnpinHandleInner<Block: BlockT> {
/// Hash of the block pinned by this handle
hash: Block::Hash,
unpin_worker_sender: TracingUnboundedSender<UnpinWorkerMessage<Block>>,
}
impl<Block: BlockT> Debug for UnpinHandleInner<Block> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("UnpinHandleInner").field("pinned_block", &self.hash).finish()
}
}
impl<Block: BlockT> UnpinHandleInner<Block> {
/// Create a new [`UnpinHandleInner`]
pub fn new(
hash: Block::Hash,
unpin_worker_sender: TracingUnboundedSender<UnpinWorkerMessage<Block>>,
) -> Self {
Self { hash, unpin_worker_sender }
}
}
impl<Block: BlockT> Drop for UnpinHandleInner<Block> {
fn drop(&mut self) {
if let Err(err) =
self.unpin_worker_sender.unbounded_send(UnpinWorkerMessage::Unpin(self.hash))
{
log::debug!(target: "db", "Unable to unpin block with hash: {}, error: {:?}", self.hash, err);
};
}
}
/// Message that signals notification-based pinning actions to the pinning-worker.
///
/// When the notification is dropped, an `Unpin` message should be sent to the worker.
#[derive(Debug)]
pub enum UnpinWorkerMessage<Block: BlockT> {
/// Should be sent when a import or finality notification is created.
AnnouncePin(Block::Hash),
/// Should be sent when a import or finality notification is dropped.
Unpin(Block::Hash),
}
/// Keeps a specific block pinned while the handle is alive.
/// Once the last handle instance for a given block is dropped, the
/// block is unpinned in the [`Backend`](crate::backend::Backend::unpin_block).
#[derive(Debug, Clone)]
pub struct UnpinHandle<Block: BlockT>(Arc<UnpinHandleInner<Block>>);
impl<Block: BlockT> UnpinHandle<Block> {
/// Create a new [`UnpinHandle`]
pub fn new(
hash: Block::Hash,
unpin_worker_sender: TracingUnboundedSender<UnpinWorkerMessage<Block>>,
) -> UnpinHandle<Block> {
UnpinHandle(Arc::new(UnpinHandleInner::new(hash, unpin_worker_sender)))
}
/// Hash of the block this handle is unpinning on drop
pub fn hash(&self) -> Block::Hash {
self.0.hash
}
}
/// Summary of an imported block
#[derive(Clone, Debug)]
pub struct BlockImportNotification<Block: BlockT> {
/// Imported block header hash.
pub hash: Block::Hash,
/// Imported block origin.
pub origin: BlockOrigin,
/// Imported block header.
pub header: Block::Header,
/// Is this the new best block.
pub is_new_best: bool,
/// Tree route from old best to new best parent.
///
/// If `None`, there was no re-org while importing.
pub tree_route: Option<Arc<sp_blockchain::TreeRoute<Block>>>,
/// Handle to unpin the block this notification is for
unpin_handle: UnpinHandle<Block>,
}
impl<Block: BlockT> BlockImportNotification<Block> {
/// Create new notification
pub fn new(
hash: Block::Hash,
origin: BlockOrigin,
header: Block::Header,
is_new_best: bool,
tree_route: Option<Arc<sp_blockchain::TreeRoute<Block>>>,
unpin_worker_sender: TracingUnboundedSender<UnpinWorkerMessage<Block>>,
) -> Self {
Self {
hash,
origin,
header,
is_new_best,
tree_route,
unpin_handle: UnpinHandle::new(hash, unpin_worker_sender),
}
}
/// Consume this notification and extract the unpin handle.
///
/// Note: Only use this if you want to keep the block pinned in the backend.
pub fn into_unpin_handle(self) -> UnpinHandle<Block> {
self.unpin_handle
}
}
/// Summary of a finalized block.
#[derive(Clone, Debug)]
pub struct FinalityNotification<Block: BlockT> {
/// Finalized block header hash.
pub hash: Block::Hash,
/// Finalized block header.
pub header: Block::Header,
/// Path from the old finalized to new finalized parent (implicitly finalized blocks).
///
/// This maps to the range `(old_finalized, new_finalized)`.
pub tree_route: Arc<[Block::Hash]>,
/// Stale blocks.
pub stale_blocks: Arc<[Arc<StaleBlock<Block>>]>,
/// Handle to unpin the block this notification is for
unpin_handle: UnpinHandle<Block>,
}
impl<B: BlockT> TryFrom<BlockImportNotification<B>> for ChainEvent<B> {
type Error = ();
fn try_from(n: BlockImportNotification<B>) -> Result<Self, ()> {
if n.is_new_best {
Ok(Self::NewBestBlock { hash: n.hash, tree_route: n.tree_route })
} else {
Err(())
}
}
}
impl<B: BlockT> From<FinalityNotification<B>> for ChainEvent<B> {
fn from(n: FinalityNotification<B>) -> Self {
Self::Finalized { hash: n.hash, tree_route: n.tree_route }
}
}
impl<Block: BlockT> FinalityNotification<Block> {
/// Create finality notification from finality summary.
pub fn from_summary(
mut summary: FinalizeSummary<Block>,
unpin_worker_sender: TracingUnboundedSender<UnpinWorkerMessage<Block>>,
) -> FinalityNotification<Block> {
let hash = summary.finalized.pop().unwrap_or_default();
FinalityNotification {
hash,
header: summary.header,
tree_route: Arc::from(summary.finalized),
stale_blocks: Arc::from(
summary.stale_blocks.into_iter().map(Arc::from).collect::<Vec<_>>(),
),
unpin_handle: UnpinHandle::new(hash, unpin_worker_sender),
}
}
/// Consume this notification and extract the unpin handle.
///
/// Note: Only use this if you want to keep the block pinned in the backend.
pub fn into_unpin_handle(self) -> UnpinHandle<Block> {
self.unpin_handle
}
}
impl<Block: BlockT> BlockImportNotification<Block> {
/// Create finality notification from finality summary.
pub fn from_summary(
summary: ImportSummary<Block>,
unpin_worker_sender: TracingUnboundedSender<UnpinWorkerMessage<Block>>,
) -> BlockImportNotification<Block> {
let hash = summary.hash;
BlockImportNotification {
hash,
origin: summary.origin,
header: summary.header,
is_new_best: summary.is_new_best,
tree_route: summary.tree_route.map(Arc::new),
unpin_handle: UnpinHandle::new(hash, unpin_worker_sender),
}
}
}
@@ -0,0 +1,130 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Execution extensions for runtime calls.
//!
//! This module is responsible for defining the execution
//! strategy for the runtime calls and provide the right `Externalities`
//! extensions to support APIs for particular execution context & capabilities.
use parking_lot::RwLock;
use sp_core::traits::{ReadRuntimeVersion, ReadRuntimeVersionExt};
use sp_externalities::{Extension, Extensions};
use sp_runtime::traits::{Block as BlockT, NumberFor};
use std::{marker::PhantomData, sync::Arc};
/// Generate the starting set of [`Extensions`].
///
/// These [`Extensions`] are passed to the environment a runtime is executed in.
pub trait ExtensionsFactory<Block: BlockT>: Send + Sync {
/// Create [`Extensions`] for the given input.
///
/// - `block_hash`: The hash of the block in the context that extensions will be used.
/// - `block_number`: The number of the block in the context that extensions will be used.
fn extensions_for(&self, block_hash: Block::Hash, block_number: NumberFor<Block>)
-> Extensions;
}
impl<Block: BlockT> ExtensionsFactory<Block> for () {
fn extensions_for(&self, _: Block::Hash, _: NumberFor<Block>) -> Extensions {
Extensions::new()
}
}
impl<Block: BlockT, T: ExtensionsFactory<Block>> ExtensionsFactory<Block> for Vec<T> {
fn extensions_for(
&self,
block_hash: Block::Hash,
block_number: NumberFor<Block>,
) -> Extensions {
let mut exts = Extensions::new();
exts.extend(self.iter().map(|e| e.extensions_for(block_hash, block_number)));
exts
}
}
/// An [`ExtensionsFactory`] that registers an [`Extension`] before a certain block.
pub struct ExtensionBeforeBlock<Block: BlockT, Ext> {
before: NumberFor<Block>,
_marker: PhantomData<fn(Ext) -> Ext>,
}
impl<Block: BlockT, Ext> ExtensionBeforeBlock<Block, Ext> {
/// Create the extension factory.
///
/// - `before`: The block number until the extension should be registered.
pub fn new(before: NumberFor<Block>) -> Self {
Self { before, _marker: PhantomData }
}
}
impl<Block: BlockT, Ext: Default + Extension> ExtensionsFactory<Block>
for ExtensionBeforeBlock<Block, Ext>
{
fn extensions_for(&self, _: Block::Hash, block_number: NumberFor<Block>) -> Extensions {
let mut exts = Extensions::new();
if block_number < self.before {
exts.register(Ext::default());
}
exts
}
}
/// A producer of execution extensions for offchain calls.
///
/// This crate aggregates extensions available for the offchain calls
/// and is responsible for producing a correct `Extensions` object.
pub struct ExecutionExtensions<Block: BlockT> {
extensions_factory: RwLock<Box<dyn ExtensionsFactory<Block>>>,
read_runtime_version: Arc<dyn ReadRuntimeVersion>,
}
impl<Block: BlockT> ExecutionExtensions<Block> {
/// Create new `ExecutionExtensions` given an `extensions_factory`.
pub fn new(
extensions_factory: Option<Box<dyn ExtensionsFactory<Block>>>,
read_runtime_version: Arc<dyn ReadRuntimeVersion>,
) -> Self {
Self {
extensions_factory: extensions_factory
.map(RwLock::new)
.unwrap_or_else(|| RwLock::new(Box::new(()))),
read_runtime_version,
}
}
/// Set the new extensions_factory
pub fn set_extensions_factory(&self, maker: impl ExtensionsFactory<Block> + 'static) {
*self.extensions_factory.write() = Box::new(maker);
}
/// Produces default extensions based on the input parameters.
pub fn extensions(
&self,
block_hash: Block::Hash,
block_number: NumberFor<Block>,
) -> Extensions {
let mut extensions =
self.extensions_factory.read().extensions_for(block_hash, block_number);
extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone()));
extensions
}
}
+874
View File
@@ -0,0 +1,874 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! In memory client backend
use parking_lot::RwLock;
use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata};
use sp_core::{
offchain::storage::InMemOffchainStorage as OffchainStorage, storage::well_known_keys,
};
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, HashingFor, Header as HeaderT, NumberFor, Zero},
Justification, Justifications, StateVersion, Storage,
};
use sp_state_machine::{
Backend as StateBackend, BackendTransaction, ChildStorageCollection, InMemoryBackend,
IndexOperation, StorageCollection,
};
use std::{
collections::{HashMap, HashSet},
ptr,
sync::Arc,
};
use crate::{
backend::{self, NewBlockState},
blockchain::{self, BlockStatus, HeaderBackend},
leaves::LeafSet,
TrieCacheContext, UsageInfo,
};
struct PendingBlock<B: BlockT> {
block: StoredBlock<B>,
state: NewBlockState,
}
#[derive(PartialEq, Eq, Clone)]
enum StoredBlock<B: BlockT> {
Header(B::Header, Option<Justifications>),
Full(B, Option<Justifications>),
}
impl<B: BlockT> StoredBlock<B> {
fn new(
header: B::Header,
body: Option<Vec<B::Extrinsic>>,
just: Option<Justifications>,
) -> Self {
match body {
Some(body) => StoredBlock::Full(B::new(header, body), just),
None => StoredBlock::Header(header, just),
}
}
fn header(&self) -> &B::Header {
match *self {
StoredBlock::Header(ref h, _) => h,
StoredBlock::Full(ref b, _) => b.header(),
}
}
fn justifications(&self) -> Option<&Justifications> {
match *self {
StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref(),
}
}
fn extrinsics(&self) -> Option<&[B::Extrinsic]> {
match *self {
StoredBlock::Header(_, _) => None,
StoredBlock::Full(ref b, _) => Some(b.extrinsics()),
}
}
fn into_inner(self) -> (B::Header, Option<Vec<B::Extrinsic>>, Option<Justifications>) {
match self {
StoredBlock::Header(header, just) => (header, None, just),
StoredBlock::Full(block, just) => {
let (header, body) = block.deconstruct();
(header, Some(body), just)
},
}
}
}
#[derive(Clone)]
struct BlockchainStorage<Block: BlockT> {
blocks: HashMap<Block::Hash, StoredBlock<Block>>,
hashes: HashMap<NumberFor<Block>, Block::Hash>,
best_hash: Block::Hash,
best_number: NumberFor<Block>,
finalized_hash: Block::Hash,
finalized_number: NumberFor<Block>,
genesis_hash: Block::Hash,
header_cht_roots: HashMap<NumberFor<Block>, Block::Hash>,
leaves: LeafSet<Block::Hash, NumberFor<Block>>,
aux: HashMap<Vec<u8>, Vec<u8>>,
}
/// In-memory blockchain. Supports concurrent reads.
#[derive(Clone)]
pub struct Blockchain<Block: BlockT> {
storage: Arc<RwLock<BlockchainStorage<Block>>>,
}
impl<Block: BlockT> Default for Blockchain<Block> {
fn default() -> Self {
Self::new()
}
}
impl<Block: BlockT> Blockchain<Block> {
/// Get header hash of given block.
pub fn id(&self, id: BlockId<Block>) -> Option<Block::Hash> {
match id {
BlockId::Hash(h) => Some(h),
BlockId::Number(n) => self.storage.read().hashes.get(&n).cloned(),
}
}
/// Create new in-memory blockchain storage.
pub fn new() -> Blockchain<Block> {
let storage = Arc::new(RwLock::new(BlockchainStorage {
blocks: HashMap::new(),
hashes: HashMap::new(),
best_hash: Default::default(),
best_number: Zero::zero(),
finalized_hash: Default::default(),
finalized_number: Zero::zero(),
genesis_hash: Default::default(),
header_cht_roots: HashMap::new(),
leaves: LeafSet::new(),
aux: HashMap::new(),
}));
Blockchain { storage }
}
/// Insert a block header and associated data.
pub fn insert(
&self,
hash: Block::Hash,
header: <Block as BlockT>::Header,
justifications: Option<Justifications>,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
new_state: NewBlockState,
) -> sp_blockchain::Result<()> {
let number = *header.number();
if new_state.is_best() {
self.apply_head(&header)?;
}
{
let mut storage = self.storage.write();
storage.leaves.import(hash, number, *header.parent_hash());
storage.blocks.insert(hash, StoredBlock::new(header, body, justifications));
if let NewBlockState::Final = new_state {
storage.finalized_hash = hash;
storage.finalized_number = number;
}
if number == Zero::zero() {
storage.genesis_hash = hash;
}
}
Ok(())
}
/// Get total number of blocks.
pub fn blocks_count(&self) -> usize {
self.storage.read().blocks.len()
}
/// Compare this blockchain with another in-mem blockchain
pub fn equals_to(&self, other: &Self) -> bool {
// Check ptr equality first to avoid double read locks.
if ptr::eq(self, other) {
return true;
}
self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks
}
/// Compare canonical chain to other canonical chain.
pub fn canon_equals_to(&self, other: &Self) -> bool {
// Check ptr equality first to avoid double read locks.
if ptr::eq(self, other) {
return true;
}
let this = self.storage.read();
let other = other.storage.read();
this.hashes == other.hashes &&
this.best_hash == other.best_hash &&
this.best_number == other.best_number &&
this.genesis_hash == other.genesis_hash
}
/// Insert header CHT root.
pub fn insert_cht_root(&self, block: NumberFor<Block>, cht_root: Block::Hash) {
self.storage.write().header_cht_roots.insert(block, cht_root);
}
/// Set an existing block as head.
pub fn set_head(&self, hash: Block::Hash) -> sp_blockchain::Result<()> {
let header = self
.header(hash)?
.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash)))?;
self.apply_head(&header)
}
fn apply_head(&self, header: &<Block as BlockT>::Header) -> sp_blockchain::Result<()> {
let hash = header.hash();
let number = header.number();
// Note: this may lock storage, so it must happen before obtaining storage
// write lock.
let best_tree_route = {
let best_hash = self.storage.read().best_hash;
if &best_hash == header.parent_hash() {
None
} else {
let route = sp_blockchain::tree_route(self, best_hash, *header.parent_hash())?;
Some(route)
}
};
let mut storage = self.storage.write();
if let Some(tree_route) = best_tree_route {
// apply retraction and enaction when reorganizing up to parent hash
let enacted = tree_route.enacted();
for entry in enacted {
storage.hashes.insert(entry.number, entry.hash);
}
for entry in tree_route.retracted().iter().skip(enacted.len()) {
storage.hashes.remove(&entry.number);
}
}
storage.best_hash = hash;
storage.best_number = *number;
storage.hashes.insert(*number, hash);
Ok(())
}
fn finalize_header(
&self,
block: Block::Hash,
justification: Option<Justification>,
) -> sp_blockchain::Result<()> {
let mut storage = self.storage.write();
storage.finalized_hash = block;
if justification.is_some() {
let block = storage
.blocks
.get_mut(&block)
.expect("hash was fetched from a block in the db; qed");
let block_justifications = match block {
StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j,
};
*block_justifications = justification.map(Justifications::from);
}
Ok(())
}
fn append_justification(
&self,
hash: Block::Hash,
justification: Justification,
) -> sp_blockchain::Result<()> {
let mut storage = self.storage.write();
let block = storage
.blocks
.get_mut(&hash)
.expect("hash was fetched from a block in the db; qed");
let block_justifications = match block {
StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j,
};
if let Some(stored_justifications) = block_justifications {
if !stored_justifications.append(justification) {
return Err(sp_blockchain::Error::BadJustification(
"Duplicate consensus engine ID".into(),
));
}
} else {
*block_justifications = Some(Justifications::from(justification));
};
Ok(())
}
fn write_aux(&self, ops: Vec<(Vec<u8>, Option<Vec<u8>>)>) {
let mut storage = self.storage.write();
for (k, v) in ops {
match v {
Some(v) => storage.aux.insert(k, v),
None => storage.aux.remove(&k),
};
}
}
}
impl<Block: BlockT> HeaderBackend<Block> for Blockchain<Block> {
fn header(
&self,
hash: Block::Hash,
) -> sp_blockchain::Result<Option<<Block as BlockT>::Header>> {
Ok(self.storage.read().blocks.get(&hash).map(|b| b.header().clone()))
}
fn info(&self) -> blockchain::Info<Block> {
let storage = self.storage.read();
blockchain::Info {
best_hash: storage.best_hash,
best_number: storage.best_number,
genesis_hash: storage.genesis_hash,
finalized_hash: storage.finalized_hash,
finalized_number: storage.finalized_number,
finalized_state: if storage.finalized_hash != Default::default() {
Some((storage.finalized_hash, storage.finalized_number))
} else {
None
},
number_leaves: storage.leaves.count(),
block_gap: None,
}
}
fn status(&self, hash: Block::Hash) -> sp_blockchain::Result<BlockStatus> {
match self.storage.read().blocks.contains_key(&hash) {
true => Ok(BlockStatus::InChain),
false => Ok(BlockStatus::Unknown),
}
}
fn number(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<NumberFor<Block>>> {
Ok(self.storage.read().blocks.get(&hash).map(|b| *b.header().number()))
}
fn hash(
&self,
number: <<Block as BlockT>::Header as HeaderT>::Number,
) -> sp_blockchain::Result<Option<Block::Hash>> {
Ok(self.id(BlockId::Number(number)))
}
}
impl<Block: BlockT> HeaderMetadata<Block> for Blockchain<Block> {
type Error = sp_blockchain::Error;
fn header_metadata(
&self,
hash: Block::Hash,
) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
self.header(hash)?
.map(|header| CachedHeaderMetadata::from(&header))
.ok_or_else(|| {
sp_blockchain::Error::UnknownBlock(format!("header not found: {}", hash))
})
}
fn insert_header_metadata(&self, _hash: Block::Hash, _metadata: CachedHeaderMetadata<Block>) {
// No need to implement.
}
fn remove_header_metadata(&self, _hash: Block::Hash) {
// No need to implement.
}
}
impl<Block: BlockT> blockchain::Backend<Block> for Blockchain<Block> {
fn body(
&self,
hash: Block::Hash,
) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
Ok(self
.storage
.read()
.blocks
.get(&hash)
.and_then(|b| b.extrinsics().map(|x| x.to_vec())))
}
fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<Justifications>> {
Ok(self.storage.read().blocks.get(&hash).and_then(|b| b.justifications().cloned()))
}
fn last_finalized(&self) -> sp_blockchain::Result<Block::Hash> {
Ok(self.storage.read().finalized_hash)
}
fn leaves(&self) -> sp_blockchain::Result<Vec<Block::Hash>> {
Ok(self.storage.read().leaves.hashes())
}
fn children(&self, _parent_hash: Block::Hash) -> sp_blockchain::Result<Vec<Block::Hash>> {
unimplemented!()
}
fn indexed_transaction(&self, _hash: Block::Hash) -> sp_blockchain::Result<Option<Vec<u8>>> {
unimplemented!("Not supported by the in-mem backend.")
}
fn block_indexed_body(
&self,
_hash: Block::Hash,
) -> sp_blockchain::Result<Option<Vec<Vec<u8>>>> {
unimplemented!("Not supported by the in-mem backend.")
}
}
impl<Block: BlockT> backend::AuxStore for Blockchain<Block> {
fn insert_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
D: IntoIterator<Item = &'a &'b [u8]>,
>(
&self,
insert: I,
delete: D,
) -> sp_blockchain::Result<()> {
let mut storage = self.storage.write();
for (k, v) in insert {
storage.aux.insert(k.to_vec(), v.to_vec());
}
for k in delete {
storage.aux.remove(*k);
}
Ok(())
}
fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
Ok(self.storage.read().aux.get(key).cloned())
}
}
/// In-memory operation.
pub struct BlockImportOperation<Block: BlockT> {
pending_block: Option<PendingBlock<Block>>,
old_state: InMemoryBackend<HashingFor<Block>>,
new_state: Option<BackendTransaction<HashingFor<Block>>>,
aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
finalized_blocks: Vec<(Block::Hash, Option<Justification>)>,
set_head: Option<Block::Hash>,
}
impl<Block: BlockT> BlockImportOperation<Block> {
fn apply_storage(
&mut self,
storage: Storage,
commit: bool,
state_version: StateVersion,
) -> sp_blockchain::Result<Block::Hash> {
check_genesis_storage(&storage)?;
let child_delta = storage.children_default.values().map(|child_content| {
(
&child_content.child_info,
child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))),
)
});
let (root, transaction) = self.old_state.full_storage_root(
storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))),
child_delta,
state_version,
);
if commit {
self.new_state = Some(transaction);
}
Ok(root)
}
}
impl<Block: BlockT> backend::BlockImportOperation<Block> for BlockImportOperation<Block> {
type State = InMemoryBackend<HashingFor<Block>>;
fn state(&self) -> sp_blockchain::Result<Option<&Self::State>> {
Ok(Some(&self.old_state))
}
fn set_block_data(
&mut self,
header: <Block as BlockT>::Header,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
_indexed_body: Option<Vec<Vec<u8>>>,
justifications: Option<Justifications>,
state: NewBlockState,
) -> sp_blockchain::Result<()> {
assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
self.pending_block =
Some(PendingBlock { block: StoredBlock::new(header, body, justifications), state });
Ok(())
}
fn update_db_storage(
&mut self,
update: BackendTransaction<HashingFor<Block>>,
) -> sp_blockchain::Result<()> {
self.new_state = Some(update);
Ok(())
}
fn set_genesis_state(
&mut self,
storage: Storage,
commit: bool,
state_version: StateVersion,
) -> sp_blockchain::Result<Block::Hash> {
self.apply_storage(storage, commit, state_version)
}
fn reset_storage(
&mut self,
storage: Storage,
state_version: StateVersion,
) -> sp_blockchain::Result<Block::Hash> {
self.apply_storage(storage, true, state_version)
}
fn insert_aux<I>(&mut self, ops: I) -> sp_blockchain::Result<()>
where
I: IntoIterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
{
self.aux.append(&mut ops.into_iter().collect());
Ok(())
}
fn update_storage(
&mut self,
_update: StorageCollection,
_child_update: ChildStorageCollection,
) -> sp_blockchain::Result<()> {
Ok(())
}
fn mark_finalized(
&mut self,
hash: Block::Hash,
justification: Option<Justification>,
) -> sp_blockchain::Result<()> {
self.finalized_blocks.push((hash, justification));
Ok(())
}
fn mark_head(&mut self, hash: Block::Hash) -> sp_blockchain::Result<()> {
assert!(self.pending_block.is_none(), "Only one set block per operation is allowed");
self.set_head = Some(hash);
Ok(())
}
fn update_transaction_index(
&mut self,
_index: Vec<IndexOperation>,
) -> sp_blockchain::Result<()> {
Ok(())
}
fn set_create_gap(&mut self, _create_gap: bool) {}
}
/// In-memory backend. Keeps all states and blocks in memory.
///
/// > **Warning**: Doesn't support all the features necessary for a proper database. Only use this
/// > struct for testing purposes. Do **NOT** use in production.
pub struct Backend<Block: BlockT> {
states: RwLock<HashMap<Block::Hash, InMemoryBackend<HashingFor<Block>>>>,
blockchain: Blockchain<Block>,
import_lock: RwLock<()>,
pinned_blocks: RwLock<HashMap<Block::Hash, i64>>,
}
impl<Block: BlockT> Backend<Block> {
/// Create a new instance of in-mem backend.
///
/// # Warning
///
/// For testing purposes only!
pub fn new() -> Self {
Backend {
states: RwLock::new(HashMap::new()),
blockchain: Blockchain::new(),
import_lock: Default::default(),
pinned_blocks: Default::default(),
}
}
/// Return the number of references active for a pinned block.
///
/// # Warning
///
/// For testing purposes only!
pub fn pin_refs(&self, hash: &<Block as BlockT>::Hash) -> Option<i64> {
let blocks = self.pinned_blocks.read();
blocks.get(hash).map(|value| *value)
}
}
impl<Block: BlockT> backend::AuxStore for Backend<Block> {
fn insert_aux<
'a,
'b: 'a,
'c: 'a,
I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
D: IntoIterator<Item = &'a &'b [u8]>,
>(
&self,
insert: I,
delete: D,
) -> sp_blockchain::Result<()> {
self.blockchain.insert_aux(insert, delete)
}
fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
self.blockchain.get_aux(key)
}
}
impl<Block: BlockT> backend::Backend<Block> for Backend<Block> {
type BlockImportOperation = BlockImportOperation<Block>;
type Blockchain = Blockchain<Block>;
type State = InMemoryBackend<HashingFor<Block>>;
type OffchainStorage = OffchainStorage;
fn begin_operation(&self) -> sp_blockchain::Result<Self::BlockImportOperation> {
let old_state = self.state_at(Default::default(), TrieCacheContext::Untrusted)?;
Ok(BlockImportOperation {
pending_block: None,
old_state,
new_state: None,
aux: Default::default(),
finalized_blocks: Default::default(),
set_head: None,
})
}
fn begin_state_operation(
&self,
operation: &mut Self::BlockImportOperation,
block: Block::Hash,
) -> sp_blockchain::Result<()> {
operation.old_state = self.state_at(block, TrieCacheContext::Untrusted)?;
Ok(())
}
fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> {
if !operation.finalized_blocks.is_empty() {
for (block, justification) in operation.finalized_blocks {
self.blockchain.finalize_header(block, justification)?;
}
}
if let Some(pending_block) = operation.pending_block {
let old_state = &operation.old_state;
let (header, body, justification) = pending_block.block.into_inner();
let hash = header.hash();
let new_state = match operation.new_state {
Some(state) => old_state.update_backend(*header.state_root(), state),
None => old_state.clone(),
};
self.states.write().insert(hash, new_state);
self.blockchain.insert(hash, header, justification, body, pending_block.state)?;
}
if !operation.aux.is_empty() {
self.blockchain.write_aux(operation.aux);
}
if let Some(set_head) = operation.set_head {
self.blockchain.set_head(set_head)?;
}
Ok(())
}
fn finalize_block(
&self,
hash: Block::Hash,
justification: Option<Justification>,
) -> sp_blockchain::Result<()> {
self.blockchain.finalize_header(hash, justification)
}
fn append_justification(
&self,
hash: Block::Hash,
justification: Justification,
) -> sp_blockchain::Result<()> {
self.blockchain.append_justification(hash, justification)
}
fn blockchain(&self) -> &Self::Blockchain {
&self.blockchain
}
fn usage_info(&self) -> Option<UsageInfo> {
None
}
fn offchain_storage(&self) -> Option<Self::OffchainStorage> {
None
}
fn state_at(
&self,
hash: Block::Hash,
_trie_cache_context: TrieCacheContext,
) -> sp_blockchain::Result<Self::State> {
if hash == Default::default() {
return Ok(Self::State::default());
}
self.states
.read()
.get(&hash)
.cloned()
.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash)))
}
fn revert(
&self,
_n: NumberFor<Block>,
_revert_finalized: bool,
) -> sp_blockchain::Result<(NumberFor<Block>, HashSet<Block::Hash>)> {
Ok((Zero::zero(), HashSet::new()))
}
fn remove_leaf_block(&self, _hash: Block::Hash) -> sp_blockchain::Result<()> {
Ok(())
}
fn get_import_lock(&self) -> &RwLock<()> {
&self.import_lock
}
fn requires_full_sync(&self) -> bool {
false
}
fn pin_block(&self, hash: <Block as BlockT>::Hash) -> blockchain::Result<()> {
let mut blocks = self.pinned_blocks.write();
*blocks.entry(hash).or_default() += 1;
Ok(())
}
fn unpin_block(&self, hash: <Block as BlockT>::Hash) {
let mut blocks = self.pinned_blocks.write();
blocks.entry(hash).and_modify(|counter| *counter -= 1).or_insert(-1);
}
}
impl<Block: BlockT> backend::LocalBackend<Block> for Backend<Block> {}
/// Check that genesis storage is valid.
pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> {
if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) {
return Err(sp_blockchain::Error::InvalidState);
}
if storage
.children_default
.keys()
.any(|child_key| !well_known_keys::is_child_storage_key(child_key))
{
return Err(sp_blockchain::Error::InvalidState);
}
Ok(())
}
#[cfg(test)]
mod tests {
use crate::{in_mem::Blockchain, NewBlockState};
use sp_blockchain::Backend;
use sp_runtime::{traits::Header as HeaderT, ConsensusEngineId, Justifications};
use substrate_test_runtime::{Block, Header, H256};
pub const ID1: ConsensusEngineId = *b"TST1";
pub const ID2: ConsensusEngineId = *b"TST2";
fn header(number: u64) -> Header {
let parent_hash = match number {
0 => Default::default(),
_ => header(number - 1).hash(),
};
Header::new(
number,
H256::from_low_u64_be(0),
H256::from_low_u64_be(0),
parent_hash,
Default::default(),
)
}
fn test_blockchain() -> Blockchain<Block> {
let blockchain = Blockchain::<Block>::new();
let just0 = Some(Justifications::from((ID1, vec![0])));
let just1 = Some(Justifications::from((ID1, vec![1])));
let just2 = None;
let just3 = Some(Justifications::from((ID1, vec![3])));
blockchain
.insert(header(0).hash(), header(0), just0, None, NewBlockState::Final)
.unwrap();
blockchain
.insert(header(1).hash(), header(1), just1, None, NewBlockState::Final)
.unwrap();
blockchain
.insert(header(2).hash(), header(2), just2, None, NewBlockState::Best)
.unwrap();
blockchain
.insert(header(3).hash(), header(3), just3, None, NewBlockState::Final)
.unwrap();
blockchain
}
#[test]
fn append_and_retrieve_justifications() {
let blockchain = test_blockchain();
let last_finalized = blockchain.last_finalized().unwrap();
blockchain.append_justification(last_finalized, (ID2, vec![4])).unwrap();
let justifications = {
let mut just = Justifications::from((ID1, vec![3]));
just.append((ID2, vec![4]));
just
};
assert_eq!(blockchain.justifications(last_finalized).unwrap(), Some(justifications));
}
#[test]
fn store_duplicate_justifications_is_forbidden() {
let blockchain = test_blockchain();
let last_finalized = blockchain.last_finalized().unwrap();
blockchain.append_justification(last_finalized, (ID2, vec![0])).unwrap();
assert!(matches!(
blockchain.append_justification(last_finalized, (ID2, vec![1])),
Err(sp_blockchain::Error::BadJustification(_)),
));
}
}
+436
View File
@@ -0,0 +1,436 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Helper for managing the set of available leaves in the chain for DB implementations.
use codec::{Decode, Encode};
use sp_blockchain::{Error, Result};
use sp_database::{Database, Transaction};
use sp_runtime::traits::AtLeast32Bit;
use std::{cmp::Reverse, collections::BTreeMap};
type DbHash = sp_core::H256;
#[derive(Debug, Clone, PartialEq, Eq)]
struct LeafSetItem<H, N> {
hash: H,
number: Reverse<N>,
}
/// Inserted and removed leaves after an import action.
pub struct ImportOutcome<H, N> {
inserted: LeafSetItem<H, N>,
removed: Option<H>,
}
/// Inserted and removed leaves after a remove action.
pub struct RemoveOutcome<H, N> {
inserted: Option<H>,
removed: LeafSetItem<H, N>,
}
/// Removed leaves after a finalization action.
pub struct FinalizationOutcome<I, H, N>
where
I: Iterator<Item = (N, H)>,
{
removed: I,
}
impl<I, H: Ord, N: Ord> FinalizationOutcome<I, H, N>
where
I: Iterator<Item = (N, H)>,
{
/// Constructor
pub fn new(new_displaced: I) -> Self {
FinalizationOutcome { removed: new_displaced }
}
}
/// list of leaf hashes ordered by number (descending).
/// stored in memory for fast access.
/// this allows very fast checking and modification of active leaves.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct LeafSet<H, N> {
storage: BTreeMap<Reverse<N>, Vec<H>>,
}
impl<H, N> LeafSet<H, N>
where
H: Clone + PartialEq + Decode + Encode,
N: std::fmt::Debug + Copy + AtLeast32Bit + Decode + Encode,
{
/// Construct a new, blank leaf set.
pub fn new() -> Self {
Self { storage: BTreeMap::new() }
}
/// Read the leaf list from the DB, using given prefix for keys.
pub fn read_from_db(db: &dyn Database<DbHash>, column: u32, prefix: &[u8]) -> Result<Self> {
let mut storage = BTreeMap::new();
match db.get(column, prefix) {
Some(leaves) => {
let vals: Vec<_> = match Decode::decode(&mut leaves.as_ref()) {
Ok(vals) => vals,
Err(_) => return Err(Error::Backend("Error decoding leaves".into())),
};
for (number, hashes) in vals.into_iter() {
storage.insert(Reverse(number), hashes);
}
},
None => {},
}
Ok(Self { storage })
}
/// Update the leaf list on import.
pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> ImportOutcome<H, N> {
let number = Reverse(number);
let removed = if number.0 != N::zero() {
let parent_number = Reverse(number.0 - N::one());
self.remove_leaf(&parent_number, &parent_hash).then(|| parent_hash)
} else {
None
};
self.insert_leaf(number, hash.clone());
ImportOutcome { inserted: LeafSetItem { hash, number }, removed }
}
/// Update the leaf list on removal.
///
/// Note that the leaves set structure doesn't have the information to decide if the
/// leaf we're removing is the last children of the parent. Follows that this method requires
/// the caller to check this condition and optionally pass the `parent_hash` if `hash` is
/// its last child.
///
/// Returns `None` if no modifications are applied.
pub fn remove(
&mut self,
hash: H,
number: N,
parent_hash: Option<H>,
) -> Option<RemoveOutcome<H, N>> {
let number = Reverse(number);
if !self.remove_leaf(&number, &hash) {
return None;
}
let inserted = parent_hash.and_then(|parent_hash| {
if number.0 != N::zero() {
let parent_number = Reverse(number.0 - N::one());
self.insert_leaf(parent_number, parent_hash.clone());
Some(parent_hash)
} else {
None
}
});
Some(RemoveOutcome { inserted, removed: LeafSetItem { hash, number } })
}
/// Remove all leaves displaced by the last block finalization.
pub fn remove_displaced_leaves<I>(&mut self, displaced_leaves: FinalizationOutcome<I, H, N>)
where
I: Iterator<Item = (N, H)>,
{
for (number, hash) in displaced_leaves.removed {
self.remove_leaf(&Reverse(number), &hash);
}
}
/// Undo all pending operations.
///
/// This returns an `Undo` struct, where any
/// `Displaced` objects that have returned by previous method calls
/// should be passed to via the appropriate methods. Otherwise,
/// the on-disk state may get out of sync with in-memory state.
pub fn undo(&mut self) -> Undo<'_, H, N> {
Undo { inner: self }
}
/// Revert to the given block height by dropping all leaves in the leaf set
/// with a block number higher than the target.
///
/// Returns the removed leaves.
pub fn revert(&mut self, best_hash: H, best_number: N) -> impl Iterator<Item = (H, N)> {
let items = self
.storage
.iter()
.flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), number.0)))
.collect::<Vec<_>>();
for (hash, number) in &items {
if *number > best_number {
assert!(
self.remove_leaf(&Reverse(*number), &hash),
"item comes from an iterator over storage; qed",
);
}
}
let best_number_rev = Reverse(best_number);
let leaves_contains_best = self
.storage
.get(&best_number_rev)
.map_or(false, |hashes| hashes.contains(&best_hash));
// We need to make sure that the best block exists in the leaf set as
// this is an invariant of regular block import.
if !leaves_contains_best {
self.insert_leaf(best_number_rev, best_hash.clone());
}
items.into_iter().filter(move |(_, n)| *n > best_number)
}
/// Returns an iterator over all hashes in the leaf set
/// ordered by their block number descending.
pub fn hashes(&self) -> Vec<H> {
self.storage.iter().flat_map(|(_, hashes)| hashes.iter()).cloned().collect()
}
/// Number of known leaves.
pub fn count(&self) -> usize {
self.storage.values().map(|level| level.len()).sum()
}
/// Write the leaf list to the database transaction.
pub fn prepare_transaction(
&mut self,
tx: &mut Transaction<DbHash>,
column: u32,
prefix: &[u8],
) {
let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0, h.clone())).collect();
tx.set_from_vec(column, prefix, leaves.encode());
}
/// Check if given block is a leaf.
pub fn contains(&self, number: N, hash: H) -> bool {
self.storage
.get(&Reverse(number))
.map_or(false, |hashes| hashes.contains(&hash))
}
fn insert_leaf(&mut self, number: Reverse<N>, hash: H) {
self.storage.entry(number).or_insert_with(Vec::new).push(hash);
}
// Returns true if this leaf was contained, false otherwise.
fn remove_leaf(&mut self, number: &Reverse<N>, hash: &H) -> bool {
let mut empty = false;
let removed = self.storage.get_mut(number).map_or(false, |leaves| {
let mut found = false;
leaves.retain(|h| {
if h == hash {
found = true;
false
} else {
true
}
});
if leaves.is_empty() {
empty = true
}
found
});
if removed && empty {
self.storage.remove(number);
}
removed
}
/// Returns the highest leaf and all hashes associated to it.
pub fn highest_leaf(&self) -> Option<(N, &[H])> {
self.storage.iter().next().map(|(k, v)| (k.0, &v[..]))
}
}
/// Helper for undoing operations.
pub struct Undo<'a, H: 'a, N: 'a> {
inner: &'a mut LeafSet<H, N>,
}
impl<'a, H: 'a, N: 'a> Undo<'a, H, N>
where
H: Clone + PartialEq + Decode + Encode,
N: std::fmt::Debug + Copy + AtLeast32Bit + Decode + Encode,
{
/// Undo an imported block by providing the import operation outcome.
/// No additional operations should be performed between import and undo.
pub fn undo_import(&mut self, outcome: ImportOutcome<H, N>) {
if let Some(removed_hash) = outcome.removed {
let removed_number = Reverse(outcome.inserted.number.0 - N::one());
self.inner.insert_leaf(removed_number, removed_hash);
}
self.inner.remove_leaf(&outcome.inserted.number, &outcome.inserted.hash);
}
/// Undo a removed block by providing the displaced leaf.
/// No additional operations should be performed between remove and undo.
pub fn undo_remove(&mut self, outcome: RemoveOutcome<H, N>) {
if let Some(inserted_hash) = outcome.inserted {
let inserted_number = Reverse(outcome.removed.number.0 - N::one());
self.inner.remove_leaf(&inserted_number, &inserted_hash);
}
self.inner.insert_leaf(outcome.removed.number, outcome.removed.hash);
}
/// Undo a finalization operation by providing the displaced leaves.
/// No additional operations should be performed between finalization and undo.
pub fn undo_finalization<I>(&mut self, outcome: FinalizationOutcome<I, H, N>)
where
I: Iterator<Item = (N, H)>,
{
for (number, hash) in outcome.removed {
self.inner.storage.entry(Reverse(number)).or_default().push(hash);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
#[test]
fn import_works() {
let mut set = LeafSet::new();
set.import(0u32, 0u32, 0u32);
set.import(1_1, 1, 0);
set.import(2_1, 2, 1_1);
set.import(3_1, 3, 2_1);
assert_eq!(set.count(), 1);
assert!(set.contains(3, 3_1));
assert!(!set.contains(2, 2_1));
assert!(!set.contains(1, 1_1));
assert!(!set.contains(0, 0));
set.import(2_2, 2, 1_1);
set.import(1_2, 1, 0);
set.import(2_3, 2, 1_2);
assert_eq!(set.count(), 3);
assert!(set.contains(3, 3_1));
assert!(set.contains(2, 2_2));
assert!(set.contains(2, 2_3));
// Finally test the undo feature
let outcome = set.import(2_4, 2, 1_1);
assert_eq!(outcome.inserted.hash, 2_4);
assert_eq!(outcome.removed, None);
assert_eq!(set.count(), 4);
assert!(set.contains(2, 2_4));
set.undo().undo_import(outcome);
assert_eq!(set.count(), 3);
assert!(set.contains(3, 3_1));
assert!(set.contains(2, 2_2));
assert!(set.contains(2, 2_3));
let outcome = set.import(3_2, 3, 2_3);
assert_eq!(outcome.inserted.hash, 3_2);
assert_eq!(outcome.removed, Some(2_3));
assert_eq!(set.count(), 3);
assert!(set.contains(3, 3_2));
set.undo().undo_import(outcome);
assert_eq!(set.count(), 3);
assert!(set.contains(3, 3_1));
assert!(set.contains(2, 2_2));
assert!(set.contains(2, 2_3));
}
#[test]
fn removal_works() {
let mut set = LeafSet::new();
set.import(10_1u32, 10u32, 0u32);
set.import(11_1, 11, 10_1);
set.import(11_2, 11, 10_1);
set.import(12_1, 12, 11_1);
let outcome = set.remove(12_1, 12, Some(11_1)).unwrap();
assert_eq!(outcome.removed.hash, 12_1);
assert_eq!(outcome.inserted, Some(11_1));
assert_eq!(set.count(), 2);
assert!(set.contains(11, 11_1));
assert!(set.contains(11, 11_2));
let outcome = set.remove(11_1, 11, None).unwrap();
assert_eq!(outcome.removed.hash, 11_1);
assert_eq!(outcome.inserted, None);
assert_eq!(set.count(), 1);
assert!(set.contains(11, 11_2));
let outcome = set.remove(11_2, 11, Some(10_1)).unwrap();
assert_eq!(outcome.removed.hash, 11_2);
assert_eq!(outcome.inserted, Some(10_1));
assert_eq!(set.count(), 1);
assert!(set.contains(10, 10_1));
set.undo().undo_remove(outcome);
assert_eq!(set.count(), 1);
assert!(set.contains(11, 11_2));
}
#[test]
fn flush_to_disk() {
const PREFIX: &[u8] = b"abcdefg";
let db = Arc::new(sp_database::MemDb::default());
let mut set = LeafSet::new();
set.import(0u32, 0u32, 0u32);
set.import(1_1, 1, 0);
set.import(2_1, 2, 1_1);
set.import(3_1, 3, 2_1);
let mut tx = Transaction::new();
set.prepare_transaction(&mut tx, 0, PREFIX);
db.commit(tx).unwrap();
let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap();
assert_eq!(set, set2);
}
#[test]
fn two_leaves_same_height_can_be_included() {
let mut set = LeafSet::new();
set.import(1_1u32, 10u32, 0u32);
set.import(1_2, 10, 0);
assert!(set.storage.contains_key(&Reverse(10)));
assert!(set.contains(10, 1_1));
assert!(set.contains(10, 1_2));
assert!(!set.contains(10, 1_3));
}
}
+90
View File
@@ -0,0 +1,90 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate client interfaces.
#![warn(missing_docs)]
pub mod backend;
pub mod call_executor;
pub mod client;
pub mod execution_extensions;
pub mod in_mem;
pub mod leaves;
pub mod notifications;
pub mod proof_provider;
pub use backend::*;
pub use call_executor::*;
pub use client::*;
pub use notifications::*;
pub use proof_provider::*;
pub use sp_blockchain as blockchain;
pub use sp_blockchain::HeaderBackend;
pub use sp_state_machine::{CompactProof, StorageProof};
pub use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey};
/// Usage Information Provider interface
pub trait UsageProvider<Block: sp_runtime::traits::Block> {
/// Get usage info about current client.
fn usage_info(&self) -> ClientInfo<Block>;
}
/// Utility methods for the client.
pub mod utils {
use sp_blockchain::{Error, HeaderBackend, HeaderMetadata};
use sp_runtime::traits::Block as BlockT;
/// Returns a function for checking block ancestry, the returned function will
/// return `true` if the given hash (second parameter) is a descendent of the
/// base (first parameter). If the `current` parameter is defined, it should
/// represent the current block `hash` and its `parent hash`, if given the
/// function that's returned will assume that `hash` isn't part of the local DB
/// yet, and all searches in the DB will instead reference the parent.
pub fn is_descendent_of<Block: BlockT, T>(
client: &T,
current: Option<(Block::Hash, Block::Hash)>,
) -> impl Fn(&Block::Hash, &Block::Hash) -> Result<bool, Error> + '_
where
T: HeaderBackend<Block> + HeaderMetadata<Block, Error = Error>,
{
move |base, hash| {
if base == hash {
return Ok(false);
}
let mut hash = hash;
if let Some((current_hash, current_parent_hash)) = &current {
if base == current_hash {
return Ok(false);
}
if hash == current_hash {
if base == current_parent_hash {
return Ok(true);
} else {
hash = current_parent_hash;
}
}
}
let ancestor = sp_blockchain::lowest_common_ancestor(client, *hash, *base)?;
Ok(ancestor.hash == *base)
}
}
}
+153
View File
@@ -0,0 +1,153 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Storage notifications
use std::{
collections::{HashMap, HashSet},
pin::Pin,
sync::Arc,
task::Poll,
};
use futures::Stream;
use prometheus_endpoint::Registry as PrometheusRegistry;
use sc_utils::pubsub::{Hub, Receiver};
use sp_core::storage::{StorageData, StorageKey};
use sp_runtime::traits::Block as BlockT;
mod registry;
use registry::Registry;
#[cfg(test)]
mod tests;
/// A type of a message delivered to the subscribers
#[derive(Debug)]
pub struct StorageNotification<Hash> {
/// The hash of the block
pub block: Hash,
/// The set of changes
pub changes: StorageChangeSet,
}
/// Storage change set
#[derive(Debug)]
pub struct StorageChangeSet {
changes: Arc<[(StorageKey, Option<StorageData>)]>,
child_changes: Arc<[(StorageKey, Vec<(StorageKey, Option<StorageData>)>)]>,
filter: Keys,
child_filters: ChildKeys,
}
/// Manages storage listeners.
#[derive(Debug)]
pub struct StorageNotifications<Block: BlockT>(Hub<StorageNotification<Block::Hash>, Registry>);
/// Type that implements `futures::Stream` of storage change events.
pub struct StorageEventStream<H>(Receiver<StorageNotification<H>, Registry>);
type Keys = Option<HashSet<StorageKey>>;
type ChildKeys = Option<HashMap<StorageKey, Option<HashSet<StorageKey>>>>;
impl StorageChangeSet {
/// Convert the change set into iterator over storage items.
pub fn iter(
&self,
) -> impl Iterator<Item = (Option<&StorageKey>, &StorageKey, Option<&StorageData>)> + '_ {
let top = self
.changes
.iter()
.filter(move |&(key, _)| match self.filter {
Some(ref filter) => filter.contains(key),
None => true,
})
.map(move |(k, v)| (None, k, v.as_ref()));
let children = self
.child_changes
.iter()
.filter_map(move |(sk, changes)| {
self.child_filters.as_ref().and_then(|cf| {
cf.get(sk).map(|filter| {
changes
.iter()
.filter(move |&(key, _)| match filter {
Some(ref filter) => filter.contains(key),
None => true,
})
.map(move |(k, v)| (Some(sk), k, v.as_ref()))
})
})
})
.flatten();
top.chain(children)
}
}
impl<H> Stream for StorageEventStream<H> {
type Item = StorageNotification<H>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Option<Self::Item>> {
Stream::poll_next(Pin::new(&mut self.get_mut().0), cx)
}
}
impl<Block: BlockT> StorageNotifications<Block> {
/// Initialize a new StorageNotifications
/// optionally pass a prometheus registry to send subscriber metrics to
pub fn new(prometheus_registry: Option<PrometheusRegistry>) -> Self {
let registry = Registry::new(prometheus_registry);
let hub = Hub::new_with_registry("mpsc_storage_notification_items", registry);
StorageNotifications(hub)
}
/// Trigger notification to all listeners.
///
/// Note the changes are going to be filtered by listener's filter key.
/// In fact no event might be sent if clients are not interested in the changes.
pub fn trigger(
&self,
hash: &Block::Hash,
changeset: impl Iterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
child_changeset: impl Iterator<
Item = (Vec<u8>, impl Iterator<Item = (Vec<u8>, Option<Vec<u8>>)>),
>,
) {
self.0.send((hash, changeset, child_changeset))
}
/// Start listening for particular storage keys.
pub fn listen(
&self,
filter_keys: Option<&[StorageKey]>,
filter_child_keys: Option<&[(StorageKey, Option<Vec<StorageKey>>)]>,
) -> StorageEventStream<Block::Hash> {
let receiver = self
.0
.subscribe(registry::SubscribeOp { filter_keys, filter_child_keys }, 100_000);
StorageEventStream(receiver)
}
}
@@ -0,0 +1,365 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use super::*;
use sp_core::hexdisplay::HexDisplay;
use fnv::{FnvHashMap, FnvHashSet};
use prometheus_endpoint::{register, CounterVec, Opts, U64};
use sc_utils::{
id_sequence::SeqID as SubscriberId,
pubsub::{Dispatch, Subscribe, Unsubscribe},
};
type SubscribersGauge = CounterVec<U64>;
/// A command to subscribe with the specified filters.
///
/// Used by the implementation of [`Subscribe<Op>`] trait for [`Registry].
pub(super) struct SubscribeOp<'a> {
pub filter_keys: Option<&'a [StorageKey]>,
pub filter_child_keys: Option<&'a [(StorageKey, Option<Vec<StorageKey>>)]>,
}
#[derive(Debug, Default)]
pub(super) struct Registry {
pub(super) metrics: Option<SubscribersGauge>,
pub(super) wildcard_listeners: FnvHashSet<SubscriberId>,
pub(super) listeners: HashMap<StorageKey, FnvHashSet<SubscriberId>>,
pub(super) child_listeners: HashMap<
StorageKey,
(HashMap<StorageKey, FnvHashSet<SubscriberId>>, FnvHashSet<SubscriberId>),
>,
pub(super) sinks: FnvHashMap<SubscriberId, SubscriberSink>,
}
#[derive(Debug)]
pub(super) struct SubscriberSink {
subs_id: SubscriberId,
keys: Keys,
child_keys: ChildKeys,
was_triggered: bool,
}
impl Drop for SubscriberSink {
fn drop(&mut self) {
if !self.was_triggered {
log::trace!(
target: "storage_notifications",
"Listener was never triggered: id={}, keys={:?}, child_keys={:?}",
self.subs_id,
PrintKeys(&self.keys),
PrintChildKeys(&self.child_keys),
);
}
}
}
impl SubscriberSink {
fn new(subs_id: SubscriberId, keys: Keys, child_keys: ChildKeys) -> Self {
Self { subs_id, keys, child_keys, was_triggered: false }
}
}
impl Registry {
pub(super) fn new(prometheus_registry: Option<PrometheusRegistry>) -> Self {
let metrics = prometheus_registry.and_then(|r| {
CounterVec::new(
Opts::new(
"substrate_storage_notification_subscribers",
"Number of subscribers in storage notification sytem",
),
&["action"], // added | removed
)
.and_then(|g| register(g, &r))
.ok()
});
Registry { metrics, ..Default::default() }
}
}
impl Unsubscribe for Registry {
fn unsubscribe(&mut self, subs_id: SubscriberId) {
self.remove_subscriber(subs_id);
}
}
impl<'a> Subscribe<SubscribeOp<'a>> for Registry {
fn subscribe(&mut self, subs_op: SubscribeOp<'a>, subs_id: SubscriberId) {
let SubscribeOp { filter_keys, filter_child_keys } = subs_op;
let keys = Self::listen_from(
subs_id,
filter_keys.as_ref(),
&mut self.listeners,
&mut self.wildcard_listeners,
);
let child_keys = filter_child_keys.map(|filter_child_keys| {
filter_child_keys
.iter()
.map(|(c_key, o_keys)| {
let (c_listeners, c_wildcards) =
self.child_listeners.entry(c_key.clone()).or_default();
(
c_key.clone(),
Self::listen_from(
subs_id,
o_keys.as_ref(),
&mut *c_listeners,
&mut *c_wildcards,
),
)
})
.collect()
});
if let Some(m) = self.metrics.as_ref() {
m.with_label_values(&["added"]).inc();
}
if self
.sinks
.insert(subs_id, SubscriberSink::new(subs_id, keys, child_keys))
.is_some()
{
log::warn!("The `subscribe`-method has been passed a non-unique subs_id (in `sc-client-api::notifications`)");
}
}
}
impl<'a, Hash, CS, CCS, CCSI> Dispatch<(&'a Hash, CS, CCS)> for Registry
where
Hash: Clone,
CS: Iterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
CCS: Iterator<Item = (Vec<u8>, CCSI)>,
CCSI: Iterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
{
type Item = StorageNotification<Hash>;
type Ret = ();
fn dispatch<F>(&mut self, message: (&'a Hash, CS, CCS), dispatch: F) -> Self::Ret
where
F: FnMut(&SubscriberId, Self::Item),
{
let (hash, changeset, child_changeset) = message;
self.trigger(hash, changeset, child_changeset, dispatch);
}
}
impl Registry {
pub(super) fn trigger<Hash, F>(
&mut self,
hash: &Hash,
changeset: impl Iterator<Item = (Vec<u8>, Option<Vec<u8>>)>,
child_changeset: impl Iterator<
Item = (Vec<u8>, impl Iterator<Item = (Vec<u8>, Option<Vec<u8>>)>),
>,
mut dispatch: F,
) where
Hash: Clone,
F: FnMut(&SubscriberId, StorageNotification<Hash>),
{
let has_wildcard = !self.wildcard_listeners.is_empty();
// early exit if no listeners
if !has_wildcard && self.listeners.is_empty() && self.child_listeners.is_empty() {
return;
}
let mut subscribers = self.wildcard_listeners.clone();
let mut changes = Vec::new();
let mut child_changes = Vec::new();
// Collect subscribers and changes
for (k, v) in changeset {
let k = StorageKey(k);
let listeners = self.listeners.get(&k);
if let Some(listeners) = listeners {
subscribers.extend(listeners.iter());
}
if has_wildcard || listeners.is_some() {
changes.push((k, v.map(StorageData)));
}
}
for (sk, changeset) in child_changeset {
let sk = StorageKey(sk);
if let Some((cl, cw)) = self.child_listeners.get(&sk) {
let mut changes = Vec::new();
for (k, v) in changeset {
let k = StorageKey(k);
let listeners = cl.get(&k);
if let Some(listeners) = listeners {
subscribers.extend(listeners.iter());
}
subscribers.extend(cw.iter());
if !cw.is_empty() || listeners.is_some() {
changes.push((k, v.map(StorageData)));
}
}
if !changes.is_empty() {
child_changes.push((sk, changes));
}
}
}
// Don't send empty notifications
if changes.is_empty() && child_changes.is_empty() {
return;
}
let changes = Arc::<[_]>::from(changes);
let child_changes = Arc::<[_]>::from(child_changes);
// Trigger the events
self.sinks.iter_mut().for_each(|(subs_id, sink)| {
if subscribers.contains(subs_id) {
sink.was_triggered = true;
let storage_change_set = StorageChangeSet {
changes: changes.clone(),
child_changes: child_changes.clone(),
filter: sink.keys.clone(),
child_filters: sink.child_keys.clone(),
};
let notification =
StorageNotification { block: hash.clone(), changes: storage_change_set };
dispatch(subs_id, notification);
}
});
}
}
impl Registry {
fn remove_subscriber(&mut self, subscriber: SubscriberId) -> Option<(Keys, ChildKeys)> {
let sink = self.sinks.remove(&subscriber)?;
Self::remove_subscriber_from(
subscriber,
&sink.keys,
&mut self.listeners,
&mut self.wildcard_listeners,
);
if let Some(child_filters) = &sink.child_keys {
for (c_key, filters) in child_filters {
if let Some((listeners, wildcards)) = self.child_listeners.get_mut(c_key) {
Self::remove_subscriber_from(
subscriber,
filters,
&mut *listeners,
&mut *wildcards,
);
if listeners.is_empty() && wildcards.is_empty() {
self.child_listeners.remove(c_key);
}
}
}
}
if let Some(m) = self.metrics.as_ref() {
m.with_label_values(&["removed"]).inc();
}
Some((sink.keys.clone(), sink.child_keys.clone()))
}
fn remove_subscriber_from(
subscriber: SubscriberId,
filters: &Keys,
listeners: &mut HashMap<StorageKey, FnvHashSet<SubscriberId>>,
wildcards: &mut FnvHashSet<SubscriberId>,
) {
match filters {
None => {
wildcards.remove(&subscriber);
},
Some(filters) =>
for key in filters.iter() {
let remove_key = match listeners.get_mut(key) {
Some(ref mut set) => {
set.remove(&subscriber);
set.is_empty()
},
None => false,
};
if remove_key {
listeners.remove(key);
}
},
}
}
fn listen_from(
current_id: SubscriberId,
filter_keys: Option<impl AsRef<[StorageKey]>>,
listeners: &mut HashMap<StorageKey, FnvHashSet<SubscriberId>>,
wildcards: &mut FnvHashSet<SubscriberId>,
) -> Keys {
match filter_keys {
None => {
wildcards.insert(current_id);
None
},
Some(keys) => Some(
keys.as_ref()
.iter()
.map(|key| {
listeners.entry(key.clone()).or_default().insert(current_id);
key.clone()
})
.collect(),
),
}
}
}
pub(super) struct PrintKeys<'a>(pub &'a Keys);
impl<'a> std::fmt::Debug for PrintKeys<'a> {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(keys) = self.0 {
fmt.debug_list().entries(keys.iter().map(HexDisplay::from)).finish()
} else {
write!(fmt, "None")
}
}
}
pub(super) struct PrintChildKeys<'a>(pub &'a ChildKeys);
impl<'a> std::fmt::Debug for PrintChildKeys<'a> {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(map) = self.0 {
fmt.debug_map()
.entries(map.iter().map(|(key, values)| (HexDisplay::from(key), PrintKeys(values))))
.finish()
} else {
write!(fmt, "None")
}
}
}
@@ -0,0 +1,221 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use super::*;
use sp_runtime::testing::{Block as RawBlock, TestXt, H256 as Hash};
use std::iter::{empty, Empty};
type TestChangeSet = (
Vec<(StorageKey, Option<StorageData>)>,
Vec<(StorageKey, Vec<(StorageKey, Option<StorageData>)>)>,
);
impl From<TestChangeSet> for StorageChangeSet {
fn from(changes: TestChangeSet) -> Self {
// warning hardcoded child trie wildcard to test upon
let child_filters = Some(
[(StorageKey(vec![4]), None), (StorageKey(vec![5]), None)]
.iter()
.cloned()
.collect(),
);
StorageChangeSet {
changes: From::from(changes.0),
child_changes: From::from(changes.1),
filter: None,
child_filters,
}
}
}
impl PartialEq for StorageChangeSet {
fn eq(&self, other: &Self) -> bool {
self.iter().eq(other.iter())
}
}
type Block = RawBlock<TestXt<substrate_test_runtime::RuntimeCall, ()>>;
#[test]
fn triggering_change_should_notify_wildcard_listeners() {
// given
let notifications = StorageNotifications::<Block>::new(None);
let child_filter = [(StorageKey(vec![4]), None)];
let mut recv =
futures::executor::block_on_stream(notifications.listen(None, Some(&child_filter[..])));
// when
let changeset = vec![(vec![2], Some(vec![3])), (vec![3], None)];
let c_changeset_1 = vec![(vec![5], Some(vec![4])), (vec![6], None)];
let c_changeset = vec![(vec![4], c_changeset_1)];
notifications.trigger(
&Hash::from_low_u64_be(1),
changeset.into_iter(),
c_changeset.into_iter().map(|(a, b)| (a, b.into_iter())),
);
// then
assert_eq!(
recv.next().map(StorageNotification::into_fields).unwrap(),
(
Hash::from_low_u64_be(1),
(
vec![
(StorageKey(vec![2]), Some(StorageData(vec![3]))),
(StorageKey(vec![3]), None),
],
vec![(
StorageKey(vec![4]),
vec![
(StorageKey(vec![5]), Some(StorageData(vec![4]))),
(StorageKey(vec![6]), None),
]
)]
)
.into()
)
);
}
#[test]
fn should_only_notify_interested_listeners() {
// given
let notifications = StorageNotifications::<Block>::new(None);
let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))];
let mut recv1 = futures::executor::block_on_stream(
notifications.listen(Some(&[StorageKey(vec![1])]), None),
);
let mut recv2 = futures::executor::block_on_stream(
notifications.listen(Some(&[StorageKey(vec![2])]), None),
);
let mut recv3 =
futures::executor::block_on_stream(notifications.listen(Some(&[]), Some(&child_filter)));
// when
let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)];
let c_changeset_1 = vec![(vec![5], Some(vec![4])), (vec![6], None)];
let c_changeset = vec![(vec![4], c_changeset_1)];
notifications.trigger(
&Hash::from_low_u64_be(1),
changeset.into_iter(),
c_changeset.into_iter().map(|(a, b)| (a, b.into_iter())),
);
// then
assert_eq!(
recv1.next().map(StorageNotification::into_fields).unwrap(),
(Hash::from_low_u64_be(1), (vec![(StorageKey(vec![1]), None),], vec![]).into())
);
assert_eq!(
recv2.next().map(StorageNotification::into_fields).unwrap(),
(
Hash::from_low_u64_be(1),
(vec![(StorageKey(vec![2]), Some(StorageData(vec![3]))),], vec![]).into()
)
);
assert_eq!(
recv3.next().map(StorageNotification::into_fields).unwrap(),
(
Hash::from_low_u64_be(1),
(
vec![],
vec![(
StorageKey(vec![4]),
vec![(StorageKey(vec![5]), Some(StorageData(vec![4])))]
),]
)
.into()
)
);
}
#[test]
fn should_cleanup_subscribers_if_dropped() {
// given
let notifications = StorageNotifications::<Block>::new(None);
{
let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))];
let _recv1 = futures::executor::block_on_stream(
notifications.listen(Some(&[StorageKey(vec![1])]), None),
);
let _recv2 = futures::executor::block_on_stream(
notifications.listen(Some(&[StorageKey(vec![2])]), None),
);
let _recv3 = futures::executor::block_on_stream(notifications.listen(None, None));
let _recv4 =
futures::executor::block_on_stream(notifications.listen(None, Some(&child_filter)));
assert_eq!(notifications.map_registry(|r| r.listeners.len()), 2);
assert_eq!(notifications.map_registry(|r| r.wildcard_listeners.len()), 2);
assert_eq!(notifications.map_registry(|r| r.child_listeners.len()), 1);
}
// when
let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)];
let c_changeset = empty::<(_, Empty<_>)>();
notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter(), c_changeset);
// then
assert_eq!(notifications.map_registry(|r| r.listeners.len()), 0);
assert_eq!(notifications.map_registry(|r| r.wildcard_listeners.len()), 0);
assert_eq!(notifications.map_registry(|r| r.child_listeners.len()), 0);
}
#[test]
fn should_cleanup_subscriber_if_stream_is_dropped() {
let notifications = StorageNotifications::<Block>::new(None);
let stream = notifications.listen(None, None);
assert_eq!(notifications.map_registry(|r| r.sinks.len()), 1);
std::mem::drop(stream);
assert_eq!(notifications.map_registry(|r| r.sinks.len()), 0);
}
#[test]
fn should_not_send_empty_notifications() {
// given
let mut recv = {
let notifications = StorageNotifications::<Block>::new(None);
let recv = futures::executor::block_on_stream(notifications.listen(None, None));
// when
let changeset = vec![];
let c_changeset = empty::<(_, Empty<_>)>();
notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter(), c_changeset);
recv
};
// then
assert_eq!(recv.next().map(StorageNotification::into_fields), None);
}
impl<B: BlockT> StorageNotifications<B> {
fn map_registry<MapF, Ret>(&self, map: MapF) -> Ret
where
MapF: FnOnce(&Registry) -> Ret,
{
self.0.map_registry_for_tests(map)
}
}
impl<H> StorageNotification<H> {
fn into_fields(self) -> (H, StorageChangeSet) {
let Self { block, changes } = self;
(block, changes)
}
}
@@ -0,0 +1,93 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Proof utilities
use crate::{CompactProof, StorageProof};
use sp_runtime::traits::Block as BlockT;
use sp_state_machine::{KeyValueStates, KeyValueStorageLevel};
use sp_storage::ChildInfo;
/// Interface for providing block proving utilities.
pub trait ProofProvider<Block: BlockT> {
/// Reads storage value at a given block + key, returning read proof.
fn read_proof(
&self,
hash: Block::Hash,
keys: &mut dyn Iterator<Item = &[u8]>,
) -> sp_blockchain::Result<StorageProof>;
/// Reads child storage value at a given block + storage_key + key, returning
/// read proof.
fn read_child_proof(
&self,
hash: Block::Hash,
child_info: &ChildInfo,
keys: &mut dyn Iterator<Item = &[u8]>,
) -> sp_blockchain::Result<StorageProof>;
/// Execute a call to a contract on top of state in a block of given hash
/// AND returning execution proof.
///
/// No changes are made.
fn execution_proof(
&self,
hash: Block::Hash,
method: &str,
call_data: &[u8],
) -> sp_blockchain::Result<(Vec<u8>, StorageProof)>;
/// Given a `Hash` iterate over all storage values starting at `start_keys`.
/// Last `start_keys` element contains last accessed key value.
/// With multiple `start_keys`, first `start_keys` element is
/// the current storage key of of the last accessed child trie.
/// at last level the value to start at exclusively.
/// Proofs is build until size limit is reached and always include at
/// least one key following `start_keys`.
/// Returns combined proof and the numbers of collected keys.
fn read_proof_collection(
&self,
hash: Block::Hash,
start_keys: &[Vec<u8>],
size_limit: usize,
) -> sp_blockchain::Result<(CompactProof, u32)>;
/// Given a `Hash` iterate over all storage values starting at `start_key`.
/// Returns collected keys and values.
/// Returns the collected keys values content of the top trie followed by the
/// collected keys values of child tries.
/// Only child tries with their root part of the collected content or
/// related to `start_key` are attached.
/// For each collected state a boolean indicates if state reach
/// end.
fn storage_collection(
&self,
hash: Block::Hash,
start_key: &[Vec<u8>],
size_limit: usize,
) -> sp_blockchain::Result<Vec<(KeyValueStorageLevel, bool)>>;
/// Verify read storage proof for a set of keys.
/// Returns collected key-value pairs and the nested state
/// depth of current iteration or 0 if completed.
fn verify_range_proof(
&self,
root: Block::Hash,
proof: CompactProof,
start_keys: &[Vec<u8>],
) -> sp_blockchain::Result<(KeyValueStates, usize)>;
}