feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit 286de54384
6841 changed files with 1848356 additions and 0 deletions
+733
View File
@@ -0,0 +1,733 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! State backend that's useful for benchmarking
use crate::{DbState, DbStateBuilder};
use hash_db::{Hasher as DbHasher, Prefix};
use kvdb::{DBTransaction, KeyValueDB};
use linked_hash_map::LinkedHashMap;
use parking_lot::Mutex;
use sp_core::{
hexdisplay::HexDisplay,
storage::{ChildInfo, TrackedStorageKey},
};
use sp_runtime::{traits::Hash, StateVersion, Storage};
use sp_state_machine::{
backend::Backend as StateBackend, BackendTransaction, ChildStorageCollection, DBValue,
IterArgs, StorageCollection, StorageIterator, StorageKey, StorageValue,
};
use sp_trie::{
cache::{CacheSize, SharedTrieCache},
prefixed_key, MemoryDB, MerkleValue,
};
use std::{
cell::{Cell, RefCell},
collections::HashMap,
sync::Arc,
};
type State<H> = DbState<H>;
struct StorageDb<Hasher> {
db: Arc<dyn KeyValueDB>,
_phantom: std::marker::PhantomData<Hasher>,
}
impl<Hasher: Hash> sp_state_machine::Storage<Hasher> for StorageDb<Hasher> {
fn get(&self, key: &Hasher::Output, prefix: Prefix) -> Result<Option<DBValue>, String> {
let prefixed_key = prefixed_key::<Hasher>(key, prefix);
self.db
.get(0, &prefixed_key)
.map_err(|e| format!("Database backend error: {:?}", e))
}
}
struct KeyTracker {
enable_tracking: bool,
/// Key tracker for keys in the main trie.
/// We track the total number of reads and writes to these keys,
/// not de-duplicated for repeats.
main_keys: LinkedHashMap<Vec<u8>, TrackedStorageKey>,
/// Key tracker for keys in a child trie.
/// Child trie are identified by their storage key (i.e. `ChildInfo::storage_key()`)
/// We track the total number of reads and writes to these keys,
/// not de-duplicated for repeats.
child_keys: LinkedHashMap<Vec<u8>, LinkedHashMap<Vec<u8>, TrackedStorageKey>>,
}
/// State that manages the backend database reference. Allows runtime to control the database.
pub struct BenchmarkingState<Hasher: Hash> {
root: Cell<Hasher::Output>,
genesis_root: Hasher::Output,
state: RefCell<Option<State<Hasher>>>,
db: Cell<Option<Arc<dyn KeyValueDB>>>,
genesis: HashMap<Vec<u8>, (Vec<u8>, i32)>,
record: Cell<Vec<Vec<u8>>>,
key_tracker: Arc<Mutex<KeyTracker>>,
whitelist: RefCell<Vec<TrackedStorageKey>>,
proof_recorder: Option<sp_trie::recorder::Recorder<Hasher>>,
proof_recorder_root: Cell<Hasher::Output>,
shared_trie_cache: SharedTrieCache<Hasher>,
}
/// A raw iterator over the `BenchmarkingState`.
pub struct RawIter<Hasher: Hash> {
inner: <DbState<Hasher> as StateBackend<Hasher>>::RawIter,
child_trie: Option<Vec<u8>>,
key_tracker: Arc<Mutex<KeyTracker>>,
}
impl<Hasher: Hash> StorageIterator<Hasher> for RawIter<Hasher> {
type Backend = BenchmarkingState<Hasher>;
type Error = String;
fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
match self.inner.next_key(backend.state.borrow().as_ref()?) {
Some(Ok(key)) => {
self.key_tracker.lock().add_read_key(self.child_trie.as_deref(), &key);
Some(Ok(key))
},
result => result,
}
}
fn next_pair(
&mut self,
backend: &Self::Backend,
) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
match self.inner.next_pair(backend.state.borrow().as_ref()?) {
Some(Ok((key, value))) => {
self.key_tracker.lock().add_read_key(self.child_trie.as_deref(), &key);
Some(Ok((key, value)))
},
result => result,
}
}
fn was_complete(&self) -> bool {
self.inner.was_complete()
}
}
impl<Hasher: Hash> BenchmarkingState<Hasher> {
/// Create a new instance that creates a database in a temporary dir.
pub fn new(
genesis: Storage,
_cache_size_mb: Option<usize>,
record_proof: bool,
enable_tracking: bool,
) -> Result<Self, String> {
let state_version = sp_runtime::StateVersion::default();
let mut root = Default::default();
let mut mdb = MemoryDB::<Hasher>::default();
sp_trie::trie_types::TrieDBMutBuilderV1::<Hasher>::new(&mut mdb, &mut root).build();
let mut state = BenchmarkingState {
state: RefCell::new(None),
db: Cell::new(None),
root: Cell::new(root),
genesis: Default::default(),
genesis_root: Default::default(),
record: Default::default(),
key_tracker: Arc::new(Mutex::new(KeyTracker {
main_keys: Default::default(),
child_keys: Default::default(),
enable_tracking,
})),
whitelist: Default::default(),
proof_recorder: record_proof.then(Default::default),
proof_recorder_root: Cell::new(root),
// Enable the cache, but do not sync anything to the shared state.
shared_trie_cache: SharedTrieCache::new(CacheSize::new(0), None),
};
state.add_whitelist_to_tracker();
state.reopen()?;
let child_delta = genesis.children_default.values().map(|child_content| {
(
&child_content.child_info,
child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))),
)
});
let (root, transaction): (Hasher::Output, _) =
state.state.borrow().as_ref().unwrap().full_storage_root(
genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))),
child_delta,
state_version,
);
state.genesis = transaction.clone().drain();
state.genesis_root = root;
state.commit(root, transaction, Vec::new(), Vec::new())?;
state.record.take();
Ok(state)
}
/// Get the proof recorder for this state
pub fn recorder(&self) -> Option<sp_trie::recorder::Recorder<Hasher>> {
self.proof_recorder.clone()
}
fn reopen(&self) -> Result<(), String> {
*self.state.borrow_mut() = None;
let db = match self.db.take() {
Some(db) => db,
None => Arc::new(kvdb_memorydb::create(1)),
};
self.db.set(Some(db.clone()));
if let Some(recorder) = &self.proof_recorder {
recorder.reset();
self.proof_recorder_root.set(self.root.get());
}
let storage_db = Arc::new(StorageDb::<Hasher> { db, _phantom: Default::default() });
*self.state.borrow_mut() = Some(
DbStateBuilder::<Hasher>::new(storage_db, self.root.get())
.with_optional_recorder(self.proof_recorder.clone())
.with_cache(self.shared_trie_cache.local_cache_trusted())
.build(),
);
Ok(())
}
fn add_whitelist_to_tracker(&self) {
self.key_tracker.lock().add_whitelist(&self.whitelist.borrow());
}
fn wipe_tracker(&self) {
let mut key_tracker = self.key_tracker.lock();
key_tracker.main_keys = LinkedHashMap::new();
key_tracker.child_keys = LinkedHashMap::new();
key_tracker.add_whitelist(&self.whitelist.borrow());
}
fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) {
self.key_tracker.lock().add_read_key(childtrie, key);
}
fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) {
self.key_tracker.lock().add_write_key(childtrie, key);
}
fn all_trackers(&self) -> Vec<TrackedStorageKey> {
self.key_tracker.lock().all_trackers()
}
}
impl KeyTracker {
fn add_whitelist(&mut self, whitelist: &[TrackedStorageKey]) {
whitelist.iter().for_each(|key| {
let mut whitelisted = TrackedStorageKey::new(key.key.clone());
whitelisted.whitelist();
self.main_keys.insert(key.key.clone(), whitelisted);
});
}
// Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`)
fn add_read_key(&mut self, childtrie: Option<&[u8]>, key: &[u8]) {
if !self.enable_tracking {
return;
}
let child_key_tracker = &mut self.child_keys;
let main_key_tracker = &mut self.main_keys;
let key_tracker = if let Some(childtrie) = childtrie {
child_key_tracker.entry(childtrie.to_vec()).or_insert_with(LinkedHashMap::new)
} else {
main_key_tracker
};
let should_log = match key_tracker.get_mut(key) {
None => {
let mut has_been_read = TrackedStorageKey::new(key.to_vec());
has_been_read.add_read();
key_tracker.insert(key.to_vec(), has_been_read);
true
},
Some(tracker) => {
let should_log = !tracker.has_been_read();
tracker.add_read();
should_log
},
};
if should_log {
if let Some(childtrie) = childtrie {
log::trace!(
target: "benchmark",
"Childtrie Read: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key)
);
} else {
log::trace!(target: "benchmark", "Read: {}", HexDisplay::from(&key));
}
}
}
// Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`)
fn add_write_key(&mut self, childtrie: Option<&[u8]>, key: &[u8]) {
if !self.enable_tracking {
return;
}
let child_key_tracker = &mut self.child_keys;
let main_key_tracker = &mut self.main_keys;
let key_tracker = if let Some(childtrie) = childtrie {
child_key_tracker.entry(childtrie.to_vec()).or_insert_with(LinkedHashMap::new)
} else {
main_key_tracker
};
// If we have written to the key, we also consider that we have read from it.
let should_log = match key_tracker.get_mut(key) {
None => {
let mut has_been_written = TrackedStorageKey::new(key.to_vec());
has_been_written.add_write();
key_tracker.insert(key.to_vec(), has_been_written);
true
},
Some(tracker) => {
let should_log = !tracker.has_been_written();
tracker.add_write();
should_log
},
};
if should_log {
if let Some(childtrie) = childtrie {
log::trace!(
target: "benchmark",
"Childtrie Write: {} {}", HexDisplay::from(&childtrie), HexDisplay::from(&key)
);
} else {
log::trace!(target: "benchmark", "Write: {}", HexDisplay::from(&key));
}
}
}
// Return all the tracked storage keys among main and child trie.
fn all_trackers(&self) -> Vec<TrackedStorageKey> {
let mut all_trackers = Vec::new();
self.main_keys.iter().for_each(|(_, tracker)| {
all_trackers.push(tracker.clone());
});
self.child_keys.iter().for_each(|(_, child_tracker)| {
child_tracker.iter().for_each(|(_, tracker)| {
all_trackers.push(tracker.clone());
});
});
all_trackers
}
}
fn state_err() -> String {
"State is not open".into()
}
impl<Hasher: Hash> StateBackend<Hasher> for BenchmarkingState<Hasher> {
type Error = <DbState<Hasher> as StateBackend<Hasher>>::Error;
type TrieBackendStorage = <DbState<Hasher> as StateBackend<Hasher>>::TrieBackendStorage;
type RawIter = RawIter<Hasher>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.add_read_key(None, key);
self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key)
}
fn storage_hash(&self, key: &[u8]) -> Result<Option<Hasher::Output>, Self::Error> {
self.add_read_key(None, key);
self.state.borrow().as_ref().ok_or_else(state_err)?.storage_hash(key)
}
fn child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.add_read_key(Some(child_info.storage_key()), key);
self.state
.borrow()
.as_ref()
.ok_or_else(state_err)?
.child_storage(child_info, key)
}
fn child_storage_hash(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Hasher::Output>, Self::Error> {
self.add_read_key(Some(child_info.storage_key()), key);
self.state
.borrow()
.as_ref()
.ok_or_else(state_err)?
.child_storage_hash(child_info, key)
}
fn closest_merkle_value(
&self,
key: &[u8],
) -> Result<Option<MerkleValue<Hasher::Output>>, Self::Error> {
self.add_read_key(None, key);
self.state.borrow().as_ref().ok_or_else(state_err)?.closest_merkle_value(key)
}
fn child_closest_merkle_value(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<MerkleValue<Hasher::Output>>, Self::Error> {
self.add_read_key(None, key);
self.state
.borrow()
.as_ref()
.ok_or_else(state_err)?
.child_closest_merkle_value(child_info, key)
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
self.add_read_key(None, key);
self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key)
}
fn exists_child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.add_read_key(Some(child_info.storage_key()), key);
self.state
.borrow()
.as_ref()
.ok_or_else(state_err)?
.exists_child_storage(child_info, key)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.add_read_key(None, key);
self.state.borrow().as_ref().ok_or_else(state_err)?.next_storage_key(key)
}
fn next_child_storage_key(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.add_read_key(Some(child_info.storage_key()), key);
self.state
.borrow()
.as_ref()
.ok_or_else(state_err)?
.next_child_storage_key(child_info, key)
}
fn storage_root<'a>(
&self,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
state_version: StateVersion,
) -> (Hasher::Output, BackendTransaction<Hasher>) {
self.state
.borrow()
.as_ref()
.map_or(Default::default(), |s| s.storage_root(delta, state_version))
}
fn child_storage_root<'a>(
&self,
child_info: &ChildInfo,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
state_version: StateVersion,
) -> (Hasher::Output, bool, BackendTransaction<Hasher>) {
self.state
.borrow()
.as_ref()
.map_or(Default::default(), |s| s.child_storage_root(child_info, delta, state_version))
}
fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
let child_trie =
args.child_info.as_ref().map(|child_info| child_info.storage_key().to_vec());
self.state
.borrow()
.as_ref()
.map(|s| s.raw_iter(args))
.unwrap_or(Ok(Default::default()))
.map(|raw_iter| RawIter {
inner: raw_iter,
key_tracker: self.key_tracker.clone(),
child_trie,
})
}
fn commit(
&self,
storage_root: <Hasher as DbHasher>::Out,
mut transaction: BackendTransaction<Hasher>,
main_storage_changes: StorageCollection,
child_storage_changes: ChildStorageCollection,
) -> Result<(), Self::Error> {
if let Some(db) = self.db.take() {
let mut db_transaction = DBTransaction::new();
let changes = transaction.drain();
let mut keys = Vec::with_capacity(changes.len());
for (key, (val, rc)) in changes {
if rc > 0 {
db_transaction.put(0, &key, &val);
} else if rc < 0 {
db_transaction.delete(0, &key);
}
keys.push(key);
}
let mut record = self.record.take();
record.extend(keys);
self.record.set(record);
db.write(db_transaction)
.map_err(|_| String::from("Error committing transaction"))?;
self.root.set(storage_root);
self.db.set(Some(db));
// Track DB Writes
main_storage_changes.iter().for_each(|(key, _)| {
self.add_write_key(None, key);
});
child_storage_changes.iter().for_each(|(child_storage_key, storage_changes)| {
storage_changes.iter().for_each(|(key, _)| {
self.add_write_key(Some(child_storage_key), key);
})
});
} else {
return Err("Trying to commit to a closed db".into());
}
self.reopen()
}
fn wipe(&self) -> Result<(), Self::Error> {
// Restore to genesis
let record = self.record.take();
if let Some(db) = self.db.take() {
let mut db_transaction = DBTransaction::new();
for key in record {
match self.genesis.get(&key) {
Some((v, _)) => db_transaction.put(0, &key, v),
None => db_transaction.delete(0, &key),
}
}
db.write(db_transaction)
.map_err(|_| String::from("Error committing transaction"))?;
self.db.set(Some(db));
}
self.root.set(self.genesis_root);
self.reopen()?;
self.wipe_tracker();
Ok(())
}
/// Get the key tracking information for the state db.
/// 1. `reads` - Total number of DB reads.
/// 2. `repeat_reads` - Total number of in-memory reads.
/// 3. `writes` - Total number of DB writes.
/// 4. `repeat_writes` - Total number of in-memory writes.
fn read_write_count(&self) -> (u32, u32, u32, u32) {
let mut reads = 0;
let mut repeat_reads = 0;
let mut writes = 0;
let mut repeat_writes = 0;
self.all_trackers().iter().for_each(|tracker| {
if !tracker.whitelisted {
if tracker.reads > 0 {
reads += 1;
repeat_reads += tracker.reads - 1;
}
if tracker.writes > 0 {
writes += 1;
repeat_writes += tracker.writes - 1;
}
}
});
(reads, repeat_reads, writes, repeat_writes)
}
/// Reset the key tracking information for the state db.
fn reset_read_write_count(&self) {
self.wipe_tracker()
}
fn get_whitelist(&self) -> Vec<TrackedStorageKey> {
self.whitelist.borrow().to_vec()
}
fn set_whitelist(&self, new: Vec<TrackedStorageKey>) {
*self.whitelist.borrow_mut() = new;
}
fn get_read_and_written_keys(&self) -> Vec<(Vec<u8>, u32, u32, bool)> {
// We only track at the level of a key-prefix and not whitelisted for now for memory size.
// TODO: Refactor to enable full storage key transparency, where we can remove the
// `prefix_key_tracker`.
let mut prefix_key_tracker = LinkedHashMap::<Vec<u8>, (u32, u32, bool)>::new();
self.all_trackers().iter().for_each(|tracker| {
if !tracker.whitelisted {
let prefix_length = tracker.key.len().min(32);
let prefix = tracker.key[0..prefix_length].to_vec();
// each read / write of a specific key is counted at most one time, since
// additional reads / writes happen in the memory overlay.
let reads = tracker.reads.min(1);
let writes = tracker.writes.min(1);
if let Some(prefix_tracker) = prefix_key_tracker.get_mut(&prefix) {
prefix_tracker.0 += reads;
prefix_tracker.1 += writes;
} else {
prefix_key_tracker.insert(prefix, (reads, writes, tracker.whitelisted));
}
}
});
prefix_key_tracker
.iter()
.map(|(key, tracker)| -> (Vec<u8>, u32, u32, bool) {
(key.to_vec(), tracker.0, tracker.1, tracker.2)
})
.collect::<Vec<_>>()
}
fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) {
self.state.borrow().as_ref().map(|s| s.register_overlay_stats(stats));
}
fn usage_info(&self) -> sp_state_machine::UsageInfo {
self.state
.borrow()
.as_ref()
.map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info())
}
fn proof_size(&self) -> Option<u32> {
self.proof_recorder.as_ref().map(|recorder| {
let proof_size = recorder.estimate_encoded_size() as u32;
let proof = recorder.to_storage_proof();
let proof_recorder_root = self.proof_recorder_root.get();
if proof_recorder_root == Default::default() || proof_size == 1 {
// empty trie
log::debug!(target: "benchmark", "Some proof size: {}", &proof_size);
proof_size
} else {
if let Some(size) = proof.encoded_compact_size::<Hasher>(proof_recorder_root) {
size as u32
} else if proof_recorder_root == self.root.get() {
log::debug!(target: "benchmark", "No changes - no proof");
0
} else {
panic!(
"proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}",
self.proof_recorder_root.get(),
self.root.get(),
self.genesis_root,
proof_size,
);
}
}
})
}
}
impl<Hasher: Hash> std::fmt::Debug for BenchmarkingState<Hasher> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Bench DB")
}
}
#[cfg(test)]
mod test {
use crate::bench::BenchmarkingState;
use sp_runtime::traits::HashingFor;
use sp_state_machine::backend::Backend as _;
fn hex(hex: &str) -> Vec<u8> {
array_bytes::hex2bytes(hex).unwrap()
}
#[test]
fn iteration_is_also_counted_in_rw_counts() {
let storage = sp_runtime::Storage {
top: vec![(
hex("ce6e1397e668c7fcf47744350dc59688455a2c2dbd2e2a649df4e55d93cd7158"),
hex("0102030405060708"),
)]
.into_iter()
.collect(),
..sp_runtime::Storage::default()
};
let bench_state =
BenchmarkingState::<HashingFor<crate::tests::Block>>::new(storage, None, false, true)
.unwrap();
assert_eq!(bench_state.read_write_count(), (0, 0, 0, 0));
assert_eq!(bench_state.keys(Default::default()).unwrap().count(), 1);
assert_eq!(bench_state.read_write_count(), (1, 0, 0, 0));
}
#[test]
fn read_to_main_and_child_tries() {
let bench_state = BenchmarkingState::<HashingFor<crate::tests::Block>>::new(
Default::default(),
None,
false,
true,
)
.unwrap();
for _ in 0..2 {
let child1 = sp_core::storage::ChildInfo::new_default(b"child1");
let child2 = sp_core::storage::ChildInfo::new_default(b"child2");
bench_state.storage(b"foo").unwrap();
bench_state.child_storage(&child1, b"foo").unwrap();
bench_state.child_storage(&child2, b"foo").unwrap();
bench_state.storage(b"bar").unwrap();
bench_state.child_storage(&child1, b"bar").unwrap();
bench_state.child_storage(&child2, b"bar").unwrap();
bench_state
.commit(
Default::default(),
Default::default(),
vec![("foo".as_bytes().to_vec(), None)],
vec![("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)])],
)
.unwrap();
let rw_tracker = bench_state.read_write_count();
assert_eq!(rw_tracker.0, 6);
assert_eq!(rw_tracker.1, 0);
assert_eq!(rw_tracker.2, 2);
assert_eq!(rw_tracker.3, 0);
bench_state.wipe().unwrap();
}
}
}
+123
View File
@@ -0,0 +1,123 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Functionality for reading and storing children hashes from db.
use crate::DbHash;
use codec::{Decode, Encode};
use sp_blockchain;
use sp_database::{Database, Transaction};
use std::hash::Hash;
/// Returns the hashes of the children blocks of the block with `parent_hash`.
pub fn read_children<
K: Eq + Hash + Clone + Encode + Decode,
V: Eq + Hash + Clone + Encode + Decode,
>(
db: &dyn Database<DbHash>,
column: u32,
prefix: &[u8],
parent_hash: K,
) -> sp_blockchain::Result<Vec<V>> {
let mut buf = prefix.to_vec();
parent_hash.using_encoded(|s| buf.extend(s));
let raw_val_opt = db.get(column, &buf[..]);
let raw_val = match raw_val_opt {
Some(val) => val,
None => return Ok(Vec::new()),
};
let children: Vec<V> = match Decode::decode(&mut &raw_val[..]) {
Ok(children) => children,
Err(_) => return Err(sp_blockchain::Error::Backend("Error decoding children".into())),
};
Ok(children)
}
/// Insert the key-value pair (`parent_hash`, `children_hashes`) in the transaction.
/// Any existing value is overwritten upon write.
pub fn write_children<
K: Eq + Hash + Clone + Encode + Decode,
V: Eq + Hash + Clone + Encode + Decode,
>(
tx: &mut Transaction<DbHash>,
column: u32,
prefix: &[u8],
parent_hash: K,
children_hashes: V,
) {
let mut key = prefix.to_vec();
parent_hash.using_encoded(|s| key.extend(s));
tx.set_from_vec(column, &key[..], children_hashes.encode());
}
/// Prepare transaction to remove the children of `parent_hash`.
pub fn remove_children<K: Eq + Hash + Clone + Encode + Decode>(
tx: &mut Transaction<DbHash>,
column: u32,
prefix: &[u8],
parent_hash: K,
) {
let mut key = prefix.to_vec();
parent_hash.using_encoded(|s| key.extend(s));
tx.remove(column, &key);
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
#[test]
fn children_write_read_remove() {
const PREFIX: &[u8] = b"children";
let db = Arc::new(sp_database::MemDb::default());
let mut tx = Transaction::new();
let mut children1 = Vec::new();
children1.push(1_3);
children1.push(1_5);
write_children(&mut tx, 0, PREFIX, 1_1, children1);
let mut children2 = Vec::new();
children2.push(1_4);
children2.push(1_6);
write_children(&mut tx, 0, PREFIX, 1_2, children2);
db.commit(tx.clone()).unwrap();
let r1: Vec<u32> = read_children(&*db, 0, PREFIX, 1_1).expect("(1) Getting r1 failed");
let r2: Vec<u32> = read_children(&*db, 0, PREFIX, 1_2).expect("(1) Getting r2 failed");
assert_eq!(r1, vec![1_3, 1_5]);
assert_eq!(r2, vec![1_4, 1_6]);
remove_children(&mut tx, 0, PREFIX, 1_2);
db.commit(tx).unwrap();
let r1: Vec<u32> = read_children(&*db, 0, PREFIX, 1_1).expect("(2) Getting r1 failed");
let r2: Vec<u32> = read_children(&*db, 0, PREFIX, 1_2).expect("(2) Getting r2 failed");
assert_eq!(r1, vec![1_3, 1_5]);
assert_eq!(r2.len(), 0);
}
}
File diff suppressed because it is too large Load Diff
+150
View File
@@ -0,0 +1,150 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! RocksDB-based offchain workers local storage.
use std::{collections::HashMap, sync::Arc};
use crate::{columns, Database, DbHash, Transaction};
use log::error;
use parking_lot::Mutex;
/// Offchain local storage
#[derive(Clone)]
pub struct LocalStorage {
db: Arc<dyn Database<DbHash>>,
locks: Arc<Mutex<HashMap<Vec<u8>, Arc<Mutex<()>>>>>,
}
impl std::fmt::Debug for LocalStorage {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("LocalStorage").finish()
}
}
impl LocalStorage {
/// Create new offchain storage for tests (backed by memorydb)
#[cfg(any(feature = "test-helpers", test))]
pub fn new_test() -> Self {
let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS);
let db = sp_database::as_database(db);
Self::new(db as _)
}
/// Create offchain local storage with given `KeyValueDB` backend.
pub fn new(db: Arc<dyn Database<DbHash>>) -> Self {
Self { db, locks: Default::default() }
}
}
impl sp_core::offchain::OffchainStorage for LocalStorage {
fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) {
let mut tx = Transaction::new();
tx.set(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key), value);
if let Err(err) = self.db.commit(tx) {
error!("Error setting on local storage: {}", err)
}
}
fn remove(&mut self, prefix: &[u8], key: &[u8]) {
let mut tx = Transaction::new();
tx.remove(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key));
if let Err(err) = self.db.commit(tx) {
error!("Error removing on local storage: {}", err)
}
}
fn get(&self, prefix: &[u8], key: &[u8]) -> Option<Vec<u8>> {
self.db.get(columns::OFFCHAIN, &concatenate_prefix_and_key(prefix, key))
}
fn compare_and_set(
&mut self,
prefix: &[u8],
item_key: &[u8],
old_value: Option<&[u8]>,
new_value: &[u8],
) -> bool {
let key = concatenate_prefix_and_key(prefix, item_key);
let key_lock = {
let mut locks = self.locks.lock();
locks.entry(key.clone()).or_default().clone()
};
let is_set;
{
let _key_guard = key_lock.lock();
let val = self.db.get(columns::OFFCHAIN, &key);
is_set = val.as_deref() == old_value;
if is_set {
self.set(prefix, item_key, new_value)
}
}
// clean the lock map if we're the only entry
let mut locks = self.locks.lock();
{
drop(key_lock);
let key_lock = locks.get_mut(&key);
if key_lock.and_then(Arc::get_mut).is_some() {
locks.remove(&key);
}
}
is_set
}
}
/// Concatenate the prefix and key to create an offchain key in the db.
pub(crate) fn concatenate_prefix_and_key(prefix: &[u8], key: &[u8]) -> Vec<u8> {
prefix.iter().chain(key.iter()).cloned().collect()
}
#[cfg(test)]
mod tests {
use super::*;
use sp_core::offchain::OffchainStorage;
#[test]
fn should_compare_and_set_and_clear_the_locks_map() {
let mut storage = LocalStorage::new_test();
let prefix = b"prefix";
let key = b"key";
let value = b"value";
storage.set(prefix, key, value);
assert_eq!(storage.get(prefix, key), Some(value.to_vec()));
assert_eq!(storage.compare_and_set(prefix, key, Some(value), b"asd"), true);
assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec()));
assert!(storage.locks.lock().is_empty(), "Locks map should be empty!");
}
#[test]
fn should_compare_and_set_on_empty_field() {
let mut storage = LocalStorage::new_test();
let prefix = b"prefix";
let key = b"key";
assert_eq!(storage.compare_and_set(prefix, key, None, b"asd"), true);
assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec()));
assert!(storage.locks.lock().is_empty(), "Locks map should be empty!");
}
}
+162
View File
@@ -0,0 +1,162 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{
columns,
utils::{DatabaseType, NUM_COLUMNS},
};
/// A `Database` adapter for parity-db.
use sp_database::{error::DatabaseError, Change, ColumnId, Database, Transaction};
struct DbAdapter(parity_db::Db);
fn handle_err<T>(result: parity_db::Result<T>) -> T {
match result {
Ok(r) => r,
Err(e) => {
panic!("Critical database error: {:?}", e);
},
}
}
/// Wrap parity-db database into a trait object that implements `sp_database::Database`
pub fn open<H: Clone + AsRef<[u8]>>(
path: &std::path::Path,
db_type: DatabaseType,
create: bool,
upgrade: bool,
) -> parity_db::Result<std::sync::Arc<dyn Database<H>>> {
let mut config = parity_db::Options::with_columns(path, NUM_COLUMNS as u8);
match db_type {
DatabaseType::Full => {
let compressed = [
columns::STATE,
columns::HEADER,
columns::BODY,
columns::BODY_INDEX,
columns::TRANSACTION,
columns::JUSTIFICATIONS,
];
for i in compressed {
let column = &mut config.columns[i as usize];
column.compression = parity_db::CompressionType::Lz4;
}
let state_col = &mut config.columns[columns::STATE as usize];
state_col.ref_counted = true;
state_col.preimage = true;
state_col.uniform = true;
let tx_col = &mut config.columns[columns::TRANSACTION as usize];
tx_col.ref_counted = true;
tx_col.preimage = true;
tx_col.uniform = true;
},
}
if upgrade {
log::info!("Upgrading database metadata.");
if let Some(meta) = parity_db::Options::load_metadata(path)? {
config.write_metadata_with_version(path, &meta.salt, Some(meta.version))?;
}
}
let db = if create {
parity_db::Db::open_or_create(&config)?
} else {
parity_db::Db::open(&config)?
};
Ok(std::sync::Arc::new(DbAdapter(db)))
}
fn ref_counted_column(col: u32) -> bool {
col == columns::TRANSACTION || col == columns::STATE
}
impl<H: Clone + AsRef<[u8]>> Database<H> for DbAdapter {
fn commit(&self, transaction: Transaction<H>) -> Result<(), DatabaseError> {
let mut not_ref_counted_column = Vec::new();
let result = self.0.commit(transaction.0.into_iter().filter_map(|change| {
Some(match change {
Change::Set(col, key, value) => (col as u8, key, Some(value)),
Change::Remove(col, key) => (col as u8, key, None),
Change::Store(col, key, value) =>
if ref_counted_column(col) {
(col as u8, key.as_ref().to_vec(), Some(value))
} else {
if !not_ref_counted_column.contains(&col) {
not_ref_counted_column.push(col);
}
return None;
},
Change::Reference(col, key) => {
if ref_counted_column(col) {
// FIXME accessing value is not strictly needed, optimize this in parity-db.
let value = <Self as Database<H>>::get(self, col, key.as_ref());
(col as u8, key.as_ref().to_vec(), value)
} else {
if !not_ref_counted_column.contains(&col) {
not_ref_counted_column.push(col);
}
return None;
}
},
Change::Release(col, key) =>
if ref_counted_column(col) {
(col as u8, key.as_ref().to_vec(), None)
} else {
if !not_ref_counted_column.contains(&col) {
not_ref_counted_column.push(col);
}
return None;
},
})
}));
if not_ref_counted_column.len() > 0 {
return Err(DatabaseError(Box::new(parity_db::Error::InvalidInput(format!(
"Ref counted operation on non ref counted columns {:?}",
not_ref_counted_column
)))));
}
result.map_err(|e| DatabaseError(Box::new(e)))
}
fn get(&self, col: ColumnId, key: &[u8]) -> Option<Vec<u8>> {
handle_err(self.0.get(col as u8, key))
}
fn contains(&self, col: ColumnId, key: &[u8]) -> bool {
handle_err(self.0.get_size(col as u8, key)).is_some()
}
fn value_size(&self, col: ColumnId, key: &[u8]) -> Option<usize> {
handle_err(self.0.get_size(col as u8, key)).map(|s| s as usize)
}
fn supports_ref_counting(&self) -> bool {
true
}
fn sanitize_key(&self, key: &mut Vec<u8>) {
let _prefix = key.drain(0..key.len() - crate::DB_HASH_LEN);
}
}
@@ -0,0 +1,232 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use schnellru::{Limiter, LruMap};
use sp_runtime::{traits::Block as BlockT, Justifications};
const LOG_TARGET: &str = "db::pin";
const PINNING_CACHE_SIZE: usize = 2048;
/// Entry for pinned blocks cache.
struct PinnedBlockCacheEntry<Block: BlockT> {
/// How many times this item has been pinned
ref_count: u32,
/// Cached justifications for this block
pub justifications: Option<Option<Justifications>>,
/// Cached body for this block
pub body: Option<Option<Vec<Block::Extrinsic>>>,
}
impl<Block: BlockT> Default for PinnedBlockCacheEntry<Block> {
fn default() -> Self {
Self { ref_count: 0, justifications: None, body: None }
}
}
impl<Block: BlockT> PinnedBlockCacheEntry<Block> {
pub fn decrease_ref(&mut self) {
self.ref_count = self.ref_count.saturating_sub(1);
}
pub fn increase_ref(&mut self) {
self.ref_count = self.ref_count.saturating_add(1);
}
pub fn has_no_references(&self) -> bool {
self.ref_count == 0
}
}
/// A limiter for a map which is limited by the number of elements.
#[derive(Copy, Clone, Debug)]
struct LoggingByLengthLimiter {
max_length: usize,
}
impl LoggingByLengthLimiter {
/// Creates a new length limiter with a given `max_length`.
pub const fn new(max_length: usize) -> LoggingByLengthLimiter {
LoggingByLengthLimiter { max_length }
}
}
impl<Block: BlockT> Limiter<Block::Hash, PinnedBlockCacheEntry<Block>> for LoggingByLengthLimiter {
type KeyToInsert<'a> = Block::Hash;
type LinkType = usize;
fn is_over_the_limit(&self, length: usize) -> bool {
length > self.max_length
}
fn on_insert(
&mut self,
_length: usize,
key: Self::KeyToInsert<'_>,
value: PinnedBlockCacheEntry<Block>,
) -> Option<(Block::Hash, PinnedBlockCacheEntry<Block>)> {
if self.max_length > 0 {
Some((key, value))
} else {
None
}
}
fn on_replace(
&mut self,
_length: usize,
_old_key: &mut Block::Hash,
_new_key: Block::Hash,
_old_value: &mut PinnedBlockCacheEntry<Block>,
_new_value: &mut PinnedBlockCacheEntry<Block>,
) -> bool {
true
}
fn on_removed(&mut self, key: &mut Block::Hash, value: &mut PinnedBlockCacheEntry<Block>) {
// If reference count was larger than 0 on removal,
// the item was removed due to capacity limitations.
// Since the cache should be large enough for pinned items,
// we want to know about these evictions.
if value.ref_count > 0 {
log::warn!(
target: LOG_TARGET,
"Pinned block cache limit reached. Evicting value. hash = {}",
key
);
} else {
log::trace!(
target: LOG_TARGET,
"Evicting value from pinned block cache. hash = {}",
key
)
}
}
fn on_cleared(&mut self) {}
fn on_grow(&mut self, _new_memory_usage: usize) -> bool {
true
}
}
/// Reference counted cache for pinned block bodies and justifications.
pub struct PinnedBlocksCache<Block: BlockT> {
cache: LruMap<Block::Hash, PinnedBlockCacheEntry<Block>, LoggingByLengthLimiter>,
}
impl<Block: BlockT> PinnedBlocksCache<Block> {
pub fn new() -> Self {
Self { cache: LruMap::new(LoggingByLengthLimiter::new(PINNING_CACHE_SIZE)) }
}
/// Increase reference count of an item.
/// Create an entry with empty value in the cache if necessary.
pub fn pin(&mut self, hash: Block::Hash) {
match self.cache.get_or_insert(hash, Default::default) {
Some(entry) => {
entry.increase_ref();
log::trace!(
target: LOG_TARGET,
"Bumped cache refcount. hash = {}, num_entries = {}",
hash,
self.cache.len()
);
},
None => {
log::warn!(target: LOG_TARGET, "Unable to bump reference count. hash = {}", hash)
},
};
}
/// Clear the cache
pub fn clear(&mut self) {
self.cache.clear();
}
/// Check if item is contained in the cache
pub fn contains(&self, hash: Block::Hash) -> bool {
self.cache.peek(&hash).is_some()
}
/// Attach body to an existing cache item
pub fn insert_body(&mut self, hash: Block::Hash, extrinsics: Option<Vec<Block::Extrinsic>>) {
match self.cache.peek_mut(&hash) {
Some(entry) => {
entry.body = Some(extrinsics);
log::trace!(
target: LOG_TARGET,
"Cached body. hash = {}, num_entries = {}",
hash,
self.cache.len()
);
},
None => log::warn!(
target: LOG_TARGET,
"Unable to insert body for uncached item. hash = {}",
hash
),
}
}
/// Attach justification to an existing cache item
pub fn insert_justifications(
&mut self,
hash: Block::Hash,
justifications: Option<Justifications>,
) {
match self.cache.peek_mut(&hash) {
Some(entry) => {
entry.justifications = Some(justifications);
log::trace!(
target: LOG_TARGET,
"Cached justification. hash = {}, num_entries = {}",
hash,
self.cache.len()
);
},
None => log::warn!(
target: LOG_TARGET,
"Unable to insert justifications for uncached item. hash = {}",
hash
),
}
}
/// Decreases reference count of an item.
/// If the count hits 0, the item is removed.
pub fn unpin(&mut self, hash: Block::Hash) {
if let Some(entry) = self.cache.peek_mut(&hash) {
entry.decrease_ref();
if entry.has_no_references() {
self.cache.remove(&hash);
}
}
}
/// Get justifications for cached block
pub fn justifications(&self, hash: &Block::Hash) -> Option<&Option<Justifications>> {
self.cache.peek(hash).and_then(|entry| entry.justifications.as_ref())
}
/// Get body for cached block
pub fn body(&self, hash: &Block::Hash) -> Option<&Option<Vec<Block::Extrinsic>>> {
self.cache.peek(hash).and_then(|entry| entry.body.as_ref())
}
}
@@ -0,0 +1,227 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Provides [`RecordStatsState`] for recording stats about state access.
use crate::stats::StateUsageStats;
use sp_core::storage::ChildInfo;
use sp_runtime::{
traits::{Block as BlockT, HashingFor},
StateVersion,
};
use sp_state_machine::{
backend::{AsTrieBackend, Backend as StateBackend},
BackendTransaction, IterArgs, StorageIterator, StorageKey, StorageValue, TrieBackend,
};
use sp_trie::MerkleValue;
use std::sync::Arc;
/// State abstraction for recording stats about state access.
pub struct RecordStatsState<S, B: BlockT> {
/// Usage statistics
usage: StateUsageStats,
/// State machine registered stats
overlay_stats: sp_state_machine::StateMachineStats,
/// Backing state.
state: S,
/// The hash of the block is state belongs to.
block_hash: Option<B::Hash>,
/// The usage statistics of the backend. These will be updated on drop.
state_usage: Arc<StateUsageStats>,
}
impl<S, B: BlockT> std::fmt::Debug for RecordStatsState<S, B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Block {:?}", self.block_hash)
}
}
impl<S, B: BlockT> Drop for RecordStatsState<S, B> {
fn drop(&mut self) {
self.state_usage.merge_sm(self.usage.take());
}
}
impl<S: StateBackend<HashingFor<B>>, B: BlockT> RecordStatsState<S, B> {
/// Create a new instance wrapping generic State and shared cache.
pub(crate) fn new(
state: S,
block_hash: Option<B::Hash>,
state_usage: Arc<StateUsageStats>,
) -> Self {
RecordStatsState {
usage: StateUsageStats::new(),
overlay_stats: sp_state_machine::StateMachineStats::default(),
state,
block_hash,
state_usage,
}
}
}
pub struct RawIter<S, B>
where
S: StateBackend<HashingFor<B>>,
B: BlockT,
{
inner: <S as StateBackend<HashingFor<B>>>::RawIter,
}
impl<S, B> StorageIterator<HashingFor<B>> for RawIter<S, B>
where
S: StateBackend<HashingFor<B>>,
B: BlockT,
{
type Backend = RecordStatsState<S, B>;
type Error = S::Error;
fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
self.inner.next_key(&backend.state)
}
fn next_pair(
&mut self,
backend: &Self::Backend,
) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
self.inner.next_pair(&backend.state)
}
fn was_complete(&self) -> bool {
self.inner.was_complete()
}
}
impl<S: StateBackend<HashingFor<B>>, B: BlockT> StateBackend<HashingFor<B>>
for RecordStatsState<S, B>
{
type Error = S::Error;
type TrieBackendStorage = S::TrieBackendStorage;
type RawIter = RawIter<S, B>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
let value = self.state.storage(key)?;
self.usage.tally_key_read(key, value.as_ref(), false);
Ok(value)
}
fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
self.state.storage_hash(key)
}
fn child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
let key = (child_info.storage_key().to_vec(), key.to_vec());
let value = self.state.child_storage(child_info, &key.1)?;
// just pass it through the usage counter
let value = self.usage.tally_child_key_read(&key, value, false);
Ok(value)
}
fn child_storage_hash(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<B::Hash>, Self::Error> {
self.state.child_storage_hash(child_info, key)
}
fn closest_merkle_value(
&self,
key: &[u8],
) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
self.state.closest_merkle_value(key)
}
fn child_closest_merkle_value(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<MerkleValue<B::Hash>>, Self::Error> {
self.state.child_closest_merkle_value(child_info, key)
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
self.state.exists_storage(key)
}
fn exists_child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.state.exists_child_storage(child_info, key)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.next_storage_key(key)
}
fn next_child_storage_key(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.next_child_storage_key(child_info, key)
}
fn storage_root<'a>(
&self,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
state_version: StateVersion,
) -> (B::Hash, BackendTransaction<HashingFor<B>>) {
self.state.storage_root(delta, state_version)
}
fn child_storage_root<'a>(
&self,
child_info: &ChildInfo,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
state_version: StateVersion,
) -> (B::Hash, bool, BackendTransaction<HashingFor<B>>) {
self.state.child_storage_root(child_info, delta, state_version)
}
fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
self.state.raw_iter(args).map(|inner| RawIter { inner })
}
fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) {
self.overlay_stats.add(stats);
}
fn usage_info(&self) -> sp_state_machine::UsageInfo {
let mut info = self.usage.take();
info.include_state_machine_states(&self.overlay_stats);
info
}
}
impl<S: StateBackend<HashingFor<B>> + AsTrieBackend<HashingFor<B>>, B: BlockT>
AsTrieBackend<HashingFor<B>> for RecordStatsState<S, B>
{
type TrieBackendStorage = <S as AsTrieBackend<HashingFor<B>>>::TrieBackendStorage;
fn as_trie_backend(&self) -> &TrieBackend<Self::TrieBackendStorage, HashingFor<B>> {
self.state.as_trie_backend()
}
}
+145
View File
@@ -0,0 +1,145 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Database usage statistics
use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering};
/// Accumulated usage statistics for state queries.
pub struct StateUsageStats {
started: std::time::Instant,
reads: AtomicU64,
bytes_read: AtomicU64,
writes: AtomicU64,
bytes_written: AtomicU64,
writes_nodes: AtomicU64,
bytes_written_nodes: AtomicU64,
removed_nodes: AtomicU64,
bytes_removed_nodes: AtomicU64,
reads_cache: AtomicU64,
bytes_read_cache: AtomicU64,
}
impl StateUsageStats {
/// New empty usage stats.
pub fn new() -> Self {
Self {
started: std::time::Instant::now(),
reads: 0.into(),
bytes_read: 0.into(),
writes: 0.into(),
bytes_written: 0.into(),
writes_nodes: 0.into(),
bytes_written_nodes: 0.into(),
removed_nodes: 0.into(),
bytes_removed_nodes: 0.into(),
reads_cache: 0.into(),
bytes_read_cache: 0.into(),
}
}
/// Tally one read operation, of some length.
pub fn tally_read(&self, data_bytes: u64, cache: bool) {
self.reads.fetch_add(1, AtomicOrdering::Relaxed);
self.bytes_read.fetch_add(data_bytes, AtomicOrdering::Relaxed);
if cache {
self.reads_cache.fetch_add(1, AtomicOrdering::Relaxed);
self.bytes_read_cache.fetch_add(data_bytes, AtomicOrdering::Relaxed);
}
}
/// Tally one key read.
pub fn tally_key_read(&self, key: &[u8], val: Option<&Vec<u8>>, cache: bool) {
self.tally_read(
key.len() as u64 + val.as_ref().map(|x| x.len() as u64).unwrap_or(0),
cache,
);
}
/// Tally one child key read.
pub fn tally_child_key_read(
&self,
key: &(Vec<u8>, Vec<u8>),
val: Option<Vec<u8>>,
cache: bool,
) -> Option<Vec<u8>> {
let bytes = key.0.len() + key.1.len() + val.as_ref().map(|x| x.len()).unwrap_or(0);
self.tally_read(bytes as u64, cache);
val
}
/// Tally some write trie nodes operations, including their byte count.
pub fn tally_writes_nodes(&self, ops: u64, data_bytes: u64) {
self.writes_nodes.fetch_add(ops, AtomicOrdering::Relaxed);
self.bytes_written_nodes.fetch_add(data_bytes, AtomicOrdering::Relaxed);
}
/// Tally some removed trie nodes operations, including their byte count.
pub fn tally_removed_nodes(&self, ops: u64, data_bytes: u64) {
self.removed_nodes.fetch_add(ops, AtomicOrdering::Relaxed);
self.bytes_removed_nodes.fetch_add(data_bytes, AtomicOrdering::Relaxed);
}
/// Tally some write trie nodes operations, including their byte count.
pub fn tally_writes(&self, ops: u64, data_bytes: u64) {
self.writes.fetch_add(ops, AtomicOrdering::Relaxed);
self.bytes_written.fetch_add(data_bytes, AtomicOrdering::Relaxed);
}
/// Merge state machine usage info.
pub fn merge_sm(&self, info: sp_state_machine::UsageInfo) {
self.reads.fetch_add(info.reads.ops, AtomicOrdering::Relaxed);
self.bytes_read.fetch_add(info.reads.bytes, AtomicOrdering::Relaxed);
self.writes_nodes.fetch_add(info.nodes_writes.ops, AtomicOrdering::Relaxed);
self.bytes_written_nodes
.fetch_add(info.nodes_writes.bytes, AtomicOrdering::Relaxed);
self.removed_nodes.fetch_add(info.removed_nodes.ops, AtomicOrdering::Relaxed);
self.bytes_removed_nodes
.fetch_add(info.removed_nodes.bytes, AtomicOrdering::Relaxed);
self.reads_cache.fetch_add(info.cache_reads.ops, AtomicOrdering::Relaxed);
self.bytes_read_cache.fetch_add(info.cache_reads.bytes, AtomicOrdering::Relaxed);
}
/// Returns the collected `UsageInfo` and resets the internal state.
pub fn take(&self) -> sp_state_machine::UsageInfo {
use sp_state_machine::UsageUnit;
fn unit(ops: &AtomicU64, bytes: &AtomicU64) -> UsageUnit {
UsageUnit {
ops: ops.swap(0, AtomicOrdering::Relaxed),
bytes: bytes.swap(0, AtomicOrdering::Relaxed),
}
}
sp_state_machine::UsageInfo {
reads: unit(&self.reads, &self.bytes_read),
writes: unit(&self.writes, &self.bytes_written),
nodes_writes: unit(&self.writes_nodes, &self.bytes_written_nodes),
removed_nodes: unit(&self.removed_nodes, &self.bytes_removed_nodes),
cache_reads: unit(&self.reads_cache, &self.bytes_read_cache),
modified_reads: Default::default(),
overlay_writes: Default::default(),
// TODO: Proper tracking state of memory footprint here requires
// imposing `MallocSizeOf` requirement on half of the codebase,
// so it is an open question how to do it better
memory: 0,
started: self.started,
span: self.started.elapsed(),
}
}
}
+256
View File
@@ -0,0 +1,256 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Database upgrade logic.
use std::{
fmt, fs,
io::{self, ErrorKind, Read, Write},
path::{Path, PathBuf},
};
use crate::{columns, utils::DatabaseType};
use codec::{Decode, Encode};
use kvdb_rocksdb::{Database, DatabaseConfig};
use sp_runtime::traits::Block as BlockT;
/// Version file name.
const VERSION_FILE_NAME: &str = "db_version";
/// Current db version.
const CURRENT_VERSION: u32 = 4;
/// Number of columns in v1.
const V1_NUM_COLUMNS: u32 = 11;
const V2_NUM_COLUMNS: u32 = 12;
const V3_NUM_COLUMNS: u32 = 12;
/// Database upgrade errors.
#[derive(Debug)]
pub enum UpgradeError {
/// Database version cannot be read from existing db_version file.
UnknownDatabaseVersion,
/// Missing database version file.
MissingDatabaseVersionFile,
/// Database version no longer supported.
UnsupportedVersion(u32),
/// Database version comes from future version of the client.
FutureDatabaseVersion(u32),
/// Invalid justification block.
DecodingJustificationBlock,
/// Common io error.
Io(io::Error),
}
pub type UpgradeResult<T> = Result<T, UpgradeError>;
impl From<io::Error> for UpgradeError {
fn from(err: io::Error) -> Self {
UpgradeError::Io(err)
}
}
impl fmt::Display for UpgradeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
UpgradeError::UnknownDatabaseVersion => {
write!(f, "Database version cannot be read from existing db_version file")
},
UpgradeError::MissingDatabaseVersionFile => write!(f, "Missing database version file"),
UpgradeError::UnsupportedVersion(version) => {
write!(f, "Database version no longer supported: {}", version)
},
UpgradeError::FutureDatabaseVersion(version) => {
write!(f, "Database version comes from future version of the client: {}", version)
},
UpgradeError::DecodingJustificationBlock => {
write!(f, "Decoding justification block failed")
},
UpgradeError::Io(err) => write!(f, "Io error: {}", err),
}
}
}
/// Upgrade database to current version.
pub fn upgrade_db<Block: BlockT>(db_path: &Path, db_type: DatabaseType) -> UpgradeResult<()> {
let db_version = current_version(db_path)?;
match db_version {
0 => return Err(UpgradeError::UnsupportedVersion(db_version)),
1 => {
migrate_1_to_2::<Block>(db_path, db_type)?;
migrate_2_to_3::<Block>(db_path, db_type)?;
migrate_3_to_4::<Block>(db_path, db_type)?;
},
2 => {
migrate_2_to_3::<Block>(db_path, db_type)?;
migrate_3_to_4::<Block>(db_path, db_type)?;
},
3 => {
migrate_3_to_4::<Block>(db_path, db_type)?;
},
CURRENT_VERSION => (),
_ => return Err(UpgradeError::FutureDatabaseVersion(db_version)),
}
update_version(db_path)?;
Ok(())
}
/// Migration from version1 to version2:
/// 1) the number of columns has changed from 11 to 12;
/// 2) transactions column is added;
fn migrate_1_to_2<Block: BlockT>(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> {
let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS);
let mut db = Database::open(&db_cfg, db_path)?;
db.add_column().map_err(Into::into)
}
/// Migration from version2 to version3:
/// - The format of the stored Justification changed to support multiple Justifications.
fn migrate_2_to_3<Block: BlockT>(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> {
let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS);
let db = Database::open(&db_cfg, db_path)?;
// Get all the keys we need to update
let keys: Vec<_> = db
.iter(columns::JUSTIFICATIONS)
.map(|r| r.map(|e| e.0))
.collect::<Result<_, _>>()?;
// Read and update each entry
let mut transaction = db.transaction();
for key in keys {
if let Some(justification) = db.get(columns::JUSTIFICATIONS, &key)? {
// Tag each justification with the hardcoded ID for GRANDPA to avoid the dependency on
// the GRANDPA crate.
// NOTE: when storing justifications the previous API would get a `Vec<u8>` and still
// call encode on it.
let justification = Vec::<u8>::decode(&mut &justification[..])
.map_err(|_| UpgradeError::DecodingJustificationBlock)?;
let justifications = sp_runtime::Justifications::from((*b"FRNK", justification));
transaction.put_vec(columns::JUSTIFICATIONS, &key, justifications.encode());
}
}
db.write(transaction)?;
Ok(())
}
/// Migration from version3 to version4:
/// 1) the number of columns has changed from 12 to 13;
/// 2) BODY_INDEX column is added;
fn migrate_3_to_4<Block: BlockT>(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> {
let db_cfg = DatabaseConfig::with_columns(V3_NUM_COLUMNS);
let mut db = Database::open(&db_cfg, db_path)?;
db.add_column().map_err(Into::into)
}
/// Reads current database version from the file at given path.
/// If the file does not exist returns 0.
fn current_version(path: &Path) -> UpgradeResult<u32> {
match fs::File::open(version_file_path(path)) {
Err(ref err) if err.kind() == ErrorKind::NotFound =>
Err(UpgradeError::MissingDatabaseVersionFile),
Err(_) => Err(UpgradeError::UnknownDatabaseVersion),
Ok(mut file) => {
let mut s = String::new();
file.read_to_string(&mut s).map_err(|_| UpgradeError::UnknownDatabaseVersion)?;
u32::from_str_radix(&s, 10).map_err(|_| UpgradeError::UnknownDatabaseVersion)
},
}
}
/// Writes current database version to the file.
/// Creates a new file if the version file does not exist yet.
pub fn update_version(path: &Path) -> io::Result<()> {
fs::create_dir_all(path)?;
let mut file = fs::File::create(version_file_path(path))?;
file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?;
Ok(())
}
/// Returns the version file path.
fn version_file_path(path: &Path) -> PathBuf {
let mut file_path = path.to_owned();
file_path.push(VERSION_FILE_NAME);
file_path
}
#[cfg(all(test, feature = "rocksdb"))]
mod tests {
use super::*;
use crate::{tests::Block, DatabaseSource};
fn create_db(db_path: &Path, version: Option<u32>) {
if let Some(version) = version {
fs::create_dir_all(db_path).unwrap();
let mut file = fs::File::create(version_file_path(db_path)).unwrap();
file.write_all(format!("{}", version).as_bytes()).unwrap();
}
}
fn open_database(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> {
crate::utils::open_database::<Block>(
&DatabaseSource::RocksDb { path: db_path.to_owned(), cache_size: 128 },
db_type,
true,
)
.map(|_| ())
.map_err(|e| sp_blockchain::Error::Backend(e.to_string()))
}
#[test]
fn downgrade_never_happens() {
let db_dir = tempfile::TempDir::new().unwrap();
create_db(db_dir.path(), Some(CURRENT_VERSION + 1));
assert!(open_database(db_dir.path(), DatabaseType::Full).is_err());
}
#[test]
fn open_empty_database_works() {
let db_type = DatabaseType::Full;
let db_dir = tempfile::TempDir::new().unwrap();
let db_dir = db_dir.path().join(db_type.as_str());
open_database(&db_dir, db_type).unwrap();
open_database(&db_dir, db_type).unwrap();
assert_eq!(current_version(&db_dir).unwrap(), CURRENT_VERSION);
}
#[test]
fn upgrade_to_3_works() {
let db_type = DatabaseType::Full;
for version_from_file in &[None, Some(1), Some(2)] {
let db_dir = tempfile::TempDir::new().unwrap();
let db_path = db_dir.path().join(db_type.as_str());
create_db(&db_path, *version_from_file);
open_database(&db_path, db_type).unwrap();
assert_eq!(current_version(&db_path).unwrap(), CURRENT_VERSION);
}
}
#[test]
fn upgrade_to_4_works() {
let db_type = DatabaseType::Full;
for version_from_file in &[None, Some(1), Some(2), Some(3)] {
let db_dir = tempfile::TempDir::new().unwrap();
let db_path = db_dir.path().join(db_type.as_str());
create_db(&db_path, *version_from_file);
open_database(&db_path, db_type).unwrap();
assert_eq!(current_version(&db_path).unwrap(), CURRENT_VERSION);
}
}
}
+860
View File
@@ -0,0 +1,860 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Db-based backend utility structures and functions, used by both
//! full and light storages.
use std::{fmt, fs, io, path::Path, sync::Arc};
use log::{debug, info};
use crate::{Database, DatabaseSource, DbHash};
use codec::Decode;
use sc_client_api::blockchain::{BlockGap, BlockGapType};
use sp_database::Transaction;
use sp_runtime::{
generic::BlockId,
traits::{
Block as BlockT, Header as HeaderT, NumberFor, UniqueSaturatedFrom, UniqueSaturatedInto,
Zero,
},
};
use sp_trie::DBValue;
/// Number of columns in the db. Must be the same for both full && light dbs.
/// Otherwise RocksDb will fail to open database && check its type.
pub const NUM_COLUMNS: u32 = 13;
/// Meta column. The set of keys in the column is shared by full && light storages.
pub const COLUMN_META: u32 = 0;
/// Current block gap version.
pub const BLOCK_GAP_CURRENT_VERSION: u32 = 1;
/// Keys of entries in COLUMN_META.
pub mod meta_keys {
/// Type of storage (full or light).
pub const TYPE: &[u8; 4] = b"type";
/// Best block key.
pub const BEST_BLOCK: &[u8; 4] = b"best";
/// Last finalized block key.
pub const FINALIZED_BLOCK: &[u8; 5] = b"final";
/// Last finalized state key.
pub const FINALIZED_STATE: &[u8; 6] = b"fstate";
/// Block gap.
pub const BLOCK_GAP: &[u8; 3] = b"gap";
/// Block gap version.
pub const BLOCK_GAP_VERSION: &[u8; 7] = b"gap_ver";
/// Genesis block hash.
pub const GENESIS_HASH: &[u8; 3] = b"gen";
/// Leaves prefix list key.
pub const LEAF_PREFIX: &[u8; 4] = b"leaf";
/// Children prefix list key.
pub const CHILDREN_PREFIX: &[u8; 8] = b"children";
}
/// Database metadata.
#[derive(Debug)]
pub struct Meta<N, H> {
/// Hash of the best known block.
pub best_hash: H,
/// Number of the best known block.
pub best_number: N,
/// Hash of the best finalized block.
pub finalized_hash: H,
/// Number of the best finalized block.
pub finalized_number: N,
/// Hash of the genesis block.
pub genesis_hash: H,
/// Finalized state, if any
pub finalized_state: Option<(H, N)>,
/// Block gap, if any.
pub block_gap: Option<BlockGap<N>>,
}
/// A block lookup key: used for canonical lookup from block number to hash
pub type NumberIndexKey = [u8; 4];
/// Database type.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DatabaseType {
/// Full node database.
Full,
}
/// Convert block number into short lookup key (LE representation) for
/// blocks that are in the canonical chain.
///
/// In the current database schema, this kind of key is only used for
/// lookups into an index, NOT for storing header data or others.
pub fn number_index_key<N: TryInto<u32>>(n: N) -> sp_blockchain::Result<NumberIndexKey> {
let n = n.try_into().map_err(|_| {
sp_blockchain::Error::Backend("Block number cannot be converted to u32".into())
})?;
Ok([(n >> 24) as u8, ((n >> 16) & 0xff) as u8, ((n >> 8) & 0xff) as u8, (n & 0xff) as u8])
}
/// Convert number and hash into long lookup key for blocks that are
/// not in the canonical chain.
pub fn number_and_hash_to_lookup_key<N, H>(number: N, hash: H) -> sp_blockchain::Result<Vec<u8>>
where
N: TryInto<u32>,
H: AsRef<[u8]>,
{
let mut lookup_key = number_index_key(number)?.to_vec();
lookup_key.extend_from_slice(hash.as_ref());
Ok(lookup_key)
}
/// Delete number to hash mapping in DB transaction.
pub fn remove_number_to_key_mapping<N: TryInto<u32>>(
transaction: &mut Transaction<DbHash>,
key_lookup_col: u32,
number: N,
) -> sp_blockchain::Result<()> {
transaction.remove(key_lookup_col, number_index_key(number)?.as_ref());
Ok(())
}
/// Place a number mapping into the database. This maps number to current perceived
/// block hash at that position.
pub fn insert_number_to_key_mapping<N: TryInto<u32> + Clone, H: AsRef<[u8]>>(
transaction: &mut Transaction<DbHash>,
key_lookup_col: u32,
number: N,
hash: H,
) -> sp_blockchain::Result<()> {
transaction.set_from_vec(
key_lookup_col,
number_index_key(number.clone())?.as_ref(),
number_and_hash_to_lookup_key(number, hash)?,
);
Ok(())
}
/// Insert a hash to key mapping in the database.
pub fn insert_hash_to_key_mapping<N: TryInto<u32>, H: AsRef<[u8]> + Clone>(
transaction: &mut Transaction<DbHash>,
key_lookup_col: u32,
number: N,
hash: H,
) -> sp_blockchain::Result<()> {
transaction.set_from_vec(
key_lookup_col,
hash.as_ref(),
number_and_hash_to_lookup_key(number, hash.clone())?,
);
Ok(())
}
/// Convert block id to block lookup key.
/// block lookup key is the DB-key header, block and justification are stored under.
/// looks up lookup key by hash from DB as necessary.
pub fn block_id_to_lookup_key<Block>(
db: &dyn Database<DbHash>,
key_lookup_col: u32,
id: BlockId<Block>,
) -> Result<Option<Vec<u8>>, sp_blockchain::Error>
where
Block: BlockT,
::sp_runtime::traits::NumberFor<Block>: UniqueSaturatedFrom<u64> + UniqueSaturatedInto<u64>,
{
Ok(match id {
BlockId::Number(n) => db.get(key_lookup_col, number_index_key(n)?.as_ref()),
BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()),
})
}
/// Opens the configured database.
pub fn open_database<Block: BlockT>(
db_source: &DatabaseSource,
db_type: DatabaseType,
create: bool,
) -> OpenDbResult {
// Maybe migrate (copy) the database to a type specific subdirectory to make it
// possible that light and full databases coexist
// NOTE: This function can be removed in a few releases
maybe_migrate_to_type_subdir::<Block>(db_source, db_type)?;
open_database_at::<Block>(db_source, db_type, create)
}
fn open_database_at<Block: BlockT>(
db_source: &DatabaseSource,
db_type: DatabaseType,
create: bool,
) -> OpenDbResult {
let db: Arc<dyn Database<DbHash>> = match &db_source {
DatabaseSource::ParityDb { path } => open_parity_db::<Block>(path, db_type, create)?,
#[cfg(feature = "rocksdb")]
DatabaseSource::RocksDb { path, cache_size } =>
open_kvdb_rocksdb::<Block>(path, db_type, create, *cache_size)?,
DatabaseSource::Custom { db, require_create_flag } => {
if *require_create_flag && !create {
return Err(OpenDbError::DoesNotExist);
}
db.clone()
},
DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size } => {
// check if rocksdb exists first, if not, open paritydb
match open_kvdb_rocksdb::<Block>(rocksdb_path, db_type, false, *cache_size) {
Ok(db) => db,
Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) =>
open_parity_db::<Block>(paritydb_path, db_type, create)?,
Err(as_is) => return Err(as_is),
}
},
};
check_database_type(&*db, db_type)?;
Ok(db)
}
#[derive(Debug)]
pub enum OpenDbError {
// constructed only when rocksdb and paritydb are disabled
#[allow(dead_code)]
NotEnabled(&'static str),
DoesNotExist,
Internal(String),
DatabaseError(sp_database::error::DatabaseError),
UnexpectedDbType {
expected: DatabaseType,
found: Vec<u8>,
},
}
type OpenDbResult = Result<Arc<dyn Database<DbHash>>, OpenDbError>;
impl fmt::Display for OpenDbError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
OpenDbError::Internal(e) => write!(f, "{}", e),
OpenDbError::DoesNotExist => write!(f, "Database does not exist at given location"),
OpenDbError::NotEnabled(feat) => {
write!(f, "`{}` feature not enabled, database can not be opened", feat)
},
OpenDbError::DatabaseError(db_error) => {
write!(f, "Database Error: {}", db_error)
},
OpenDbError::UnexpectedDbType { expected, found } => {
write!(
f,
"Unexpected DB-Type. Expected: {:?}, Found: {:?}",
expected.as_str().as_bytes(),
found
)
},
}
}
}
impl From<OpenDbError> for sp_blockchain::Error {
fn from(err: OpenDbError) -> Self {
sp_blockchain::Error::Backend(err.to_string())
}
}
impl From<parity_db::Error> for OpenDbError {
fn from(err: parity_db::Error) -> Self {
if matches!(err, parity_db::Error::DatabaseNotFound) {
OpenDbError::DoesNotExist
} else {
OpenDbError::Internal(err.to_string())
}
}
}
impl From<io::Error> for OpenDbError {
fn from(err: io::Error) -> Self {
if err.to_string().contains("create_if_missing is false") {
OpenDbError::DoesNotExist
} else {
OpenDbError::Internal(err.to_string())
}
}
}
fn open_parity_db<Block: BlockT>(path: &Path, db_type: DatabaseType, create: bool) -> OpenDbResult {
match crate::parity_db::open(path, db_type, create, false) {
Ok(db) => Ok(db),
Err(parity_db::Error::InvalidConfiguration(_)) => {
log::warn!("Invalid parity db configuration, attempting database metadata update.");
// Try to update the database with the new config
Ok(crate::parity_db::open(path, db_type, create, true)?)
},
Err(e) => Err(e.into()),
}
}
#[cfg(any(feature = "rocksdb", test))]
fn open_kvdb_rocksdb<Block: BlockT>(
path: &Path,
db_type: DatabaseType,
create: bool,
cache_size: usize,
) -> OpenDbResult {
// first upgrade database to required version
match crate::upgrade::upgrade_db::<Block>(path, db_type) {
// in case of missing version file, assume that database simply does not exist at given
// location
Ok(_) | Err(crate::upgrade::UpgradeError::MissingDatabaseVersionFile) => (),
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err.to_string()).into()),
}
// and now open database assuming that it has the latest version
let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS);
db_config.create_if_missing = create;
let mut memory_budget = std::collections::HashMap::new();
match db_type {
DatabaseType::Full => {
let state_col_budget = (cache_size as f64 * 0.9) as usize;
let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1);
for i in 0..NUM_COLUMNS {
if i == crate::columns::STATE {
memory_budget.insert(i, state_col_budget);
} else {
memory_budget.insert(i, other_col_budget);
}
}
log::trace!(
target: "db",
"Open RocksDB database at {:?}, state column budget: {} MiB, others({}) column cache: {} MiB",
path,
state_col_budget,
NUM_COLUMNS,
other_col_budget,
);
},
}
db_config.memory_budget = memory_budget;
let db = kvdb_rocksdb::Database::open(&db_config, path)?;
// write database version only after the database is successfully opened
crate::upgrade::update_version(path)?;
Ok(sp_database::as_database(db))
}
#[cfg(not(any(feature = "rocksdb", test)))]
fn open_kvdb_rocksdb<Block: BlockT>(
_path: &Path,
_db_type: DatabaseType,
_create: bool,
_cache_size: usize,
) -> OpenDbResult {
Err(OpenDbError::NotEnabled("with-kvdb-rocksdb"))
}
/// Check database type.
pub fn check_database_type(
db: &dyn Database<DbHash>,
db_type: DatabaseType,
) -> Result<(), OpenDbError> {
match db.get(COLUMN_META, meta_keys::TYPE) {
Some(stored_type) =>
if db_type.as_str().as_bytes() != &*stored_type {
return Err(OpenDbError::UnexpectedDbType {
expected: db_type,
found: stored_type.to_owned(),
});
},
None => {
let mut transaction = Transaction::new();
transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes());
db.commit(transaction).map_err(OpenDbError::DatabaseError)?;
},
}
Ok(())
}
fn maybe_migrate_to_type_subdir<Block: BlockT>(
source: &DatabaseSource,
db_type: DatabaseType,
) -> Result<(), OpenDbError> {
if let Some(p) = source.path() {
let mut basedir = p.to_path_buf();
basedir.pop();
// Do we have to migrate to a database-type-based subdirectory layout:
// See if there's a file identifying a rocksdb or paritydb folder in the parent dir and
// the target path ends in a role specific directory
if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) &&
(p.ends_with(DatabaseType::Full.as_str()))
{
// Try to open the database to check if the current `DatabaseType` matches the type of
// database stored in the target directory and close the database on success.
let mut old_source = source.clone();
old_source.set_path(&basedir);
open_database_at::<Block>(&old_source, db_type, false)?;
info!(
"Migrating database to a database-type-based subdirectory: '{:?}' -> '{:?}'",
basedir,
basedir.join(db_type.as_str())
);
let mut tmp_dir = basedir.clone();
tmp_dir.pop();
tmp_dir.push("tmp");
fs::rename(&basedir, &tmp_dir)?;
fs::create_dir_all(&p)?;
fs::rename(tmp_dir, &p)?;
}
}
Ok(())
}
/// Read database column entry for the given block.
pub fn read_db<Block>(
db: &dyn Database<DbHash>,
col_index: u32,
col: u32,
id: BlockId<Block>,
) -> sp_blockchain::Result<Option<DBValue>>
where
Block: BlockT,
{
block_id_to_lookup_key(db, col_index, id).map(|key| match key {
Some(key) => db.get(col, key.as_ref()),
None => None,
})
}
/// Remove database column entry for the given block.
pub fn remove_from_db<Block>(
transaction: &mut Transaction<DbHash>,
db: &dyn Database<DbHash>,
col_index: u32,
col: u32,
id: BlockId<Block>,
) -> sp_blockchain::Result<()>
where
Block: BlockT,
{
block_id_to_lookup_key(db, col_index, id).map(|key| {
if let Some(key) = key {
transaction.remove(col, key.as_ref());
}
})
}
/// Read a header from the database.
pub fn read_header<Block: BlockT>(
db: &dyn Database<DbHash>,
col_index: u32,
col: u32,
id: BlockId<Block>,
) -> sp_blockchain::Result<Option<Block::Header>> {
match read_db(db, col_index, col, id)? {
Some(header) => match Block::Header::decode(&mut &header[..]) {
Ok(header) => Ok(Some(header)),
Err(_) => Err(sp_blockchain::Error::Backend("Error decoding header".into())),
},
None => Ok(None),
}
}
/// Read meta from the database.
pub fn read_meta<Block>(
db: &dyn Database<DbHash>,
col_header: u32,
) -> Result<Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>, sp_blockchain::Error>
where
Block: BlockT,
{
let genesis_hash: Block::Hash = match read_genesis_hash(db)? {
Some(genesis_hash) => genesis_hash,
None =>
return Ok(Meta {
best_hash: Default::default(),
best_number: Zero::zero(),
finalized_hash: Default::default(),
finalized_number: Zero::zero(),
genesis_hash: Default::default(),
finalized_state: None,
block_gap: None,
}),
};
let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> {
if let Some(Some(header)) = db
.get(COLUMN_META, key)
.and_then(|id| db.get(col_header, &id).map(|b| Block::Header::decode(&mut &b[..]).ok()))
{
let hash = header.hash();
debug!(
target: "db",
"Opened blockchain db, fetched {} = {:?} ({})",
desc,
hash,
header.number(),
);
Ok((hash, *header.number()))
} else {
Ok((Default::default(), Zero::zero()))
}
};
let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?;
let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?;
let (finalized_state_hash, finalized_state_number) =
load_meta_block("final_state", meta_keys::FINALIZED_STATE)?;
let finalized_state = if finalized_state_hash != Default::default() {
Some((finalized_state_hash, finalized_state_number))
} else {
None
};
let block_gap = match db
.get(COLUMN_META, meta_keys::BLOCK_GAP_VERSION)
.and_then(|d| u32::decode(&mut d.as_slice()).ok())
{
None => {
let old_block_gap: Option<(NumberFor<Block>, NumberFor<Block>)> = db
.get(COLUMN_META, meta_keys::BLOCK_GAP)
.and_then(|d| Decode::decode(&mut d.as_slice()).ok());
old_block_gap.map(|(start, end)| BlockGap {
start,
end,
gap_type: BlockGapType::MissingHeaderAndBody,
})
},
Some(version) => match version {
BLOCK_GAP_CURRENT_VERSION => db
.get(COLUMN_META, meta_keys::BLOCK_GAP)
.and_then(|d| Decode::decode(&mut d.as_slice()).ok()),
v =>
return Err(sp_blockchain::Error::Backend(format!(
"Unsupported block gap DB version: {v}"
))),
},
};
debug!(target: "db", "block_gap={:?}", block_gap);
Ok(Meta {
best_hash,
best_number,
finalized_hash,
finalized_number,
genesis_hash,
finalized_state,
block_gap,
})
}
/// Read genesis hash from database.
pub fn read_genesis_hash<Hash: Decode>(
db: &dyn Database<DbHash>,
) -> sp_blockchain::Result<Option<Hash>> {
match db.get(COLUMN_META, meta_keys::GENESIS_HASH) {
Some(h) => match Decode::decode(&mut &h[..]) {
Ok(h) => Ok(Some(h)),
Err(err) =>
Err(sp_blockchain::Error::Backend(format!("Error decoding genesis hash: {}", err))),
},
None => Ok(None),
}
}
impl DatabaseType {
/// Returns str representation of the type.
pub fn as_str(&self) -> &'static str {
match *self {
DatabaseType::Full => "full",
}
}
}
pub(crate) struct JoinInput<'a, 'b>(&'a [u8], &'b [u8]);
pub(crate) fn join_input<'a, 'b>(i1: &'a [u8], i2: &'b [u8]) -> JoinInput<'a, 'b> {
JoinInput(i1, i2)
}
impl<'a, 'b> codec::Input for JoinInput<'a, 'b> {
fn remaining_len(&mut self) -> Result<Option<usize>, codec::Error> {
Ok(Some(self.0.len() + self.1.len()))
}
fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> {
let mut read = 0;
if self.0.len() > 0 {
read = std::cmp::min(self.0.len(), into.len());
self.0.read(&mut into[..read])?;
}
if read < into.len() {
self.1.read(&mut into[read..])?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use codec::Input;
use sp_runtime::testing::{Block as RawBlock, MockCallU64, TestXt};
pub type UncheckedXt = TestXt<MockCallU64, ()>;
type Block = RawBlock<UncheckedXt>;
#[cfg(feature = "rocksdb")]
#[test]
fn database_type_subdir_migration() {
use std::path::PathBuf;
type Block = RawBlock<UncheckedXt>;
fn check_dir_for_db_type(
db_type: DatabaseType,
mut source: DatabaseSource,
db_check_file: &str,
) {
let base_path = tempfile::TempDir::new().unwrap();
let old_db_path = base_path.path().join("chains/dev/db");
source.set_path(&old_db_path);
{
let db_res = open_database::<Block>(&source, db_type, true);
assert!(db_res.is_ok(), "New database should be created.");
assert!(old_db_path.join(db_check_file).exists());
assert!(!old_db_path.join(db_type.as_str()).join("db_version").exists());
}
source.set_path(&old_db_path.join(db_type.as_str()));
let db_res = open_database::<Block>(&source, db_type, true);
assert!(db_res.is_ok(), "Reopening the db with the same role should work");
// check if the database dir had been migrated
assert!(!old_db_path.join(db_check_file).exists());
assert!(old_db_path.join(db_type.as_str()).join(db_check_file).exists());
}
check_dir_for_db_type(
DatabaseType::Full,
DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 },
"db_version",
);
check_dir_for_db_type(
DatabaseType::Full,
DatabaseSource::ParityDb { path: PathBuf::new() },
"metadata",
);
// check failure on reopening with wrong role
{
let base_path = tempfile::TempDir::new().unwrap();
let old_db_path = base_path.path().join("chains/dev/db");
let source = DatabaseSource::RocksDb { path: old_db_path.clone(), cache_size: 128 };
{
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "New database should be created.");
// check if the database dir had been migrated
assert!(old_db_path.join("db_version").exists());
assert!(!old_db_path.join("light/db_version").exists());
assert!(!old_db_path.join("full/db_version").exists());
}
// assert nothing was changed
assert!(old_db_path.join("db_version").exists());
assert!(!old_db_path.join("full/db_version").exists());
}
}
#[test]
fn number_index_key_doesnt_panic() {
let id = BlockId::<Block>::Number(72340207214430721);
match id {
BlockId::Number(n) => number_index_key(n).expect_err("number should overflow u32"),
_ => unreachable!(),
};
}
#[test]
fn database_type_as_str_works() {
assert_eq!(DatabaseType::Full.as_str(), "full");
}
#[test]
fn join_input_works() {
let buf1 = [1, 2, 3, 4];
let buf2 = [5, 6, 7, 8];
let mut test = [0, 0, 0];
let mut joined = join_input(buf1.as_ref(), buf2.as_ref());
assert_eq!(joined.remaining_len().unwrap(), Some(8));
joined.read(&mut test).unwrap();
assert_eq!(test, [1, 2, 3]);
assert_eq!(joined.remaining_len().unwrap(), Some(5));
joined.read(&mut test).unwrap();
assert_eq!(test, [4, 5, 6]);
assert_eq!(joined.remaining_len().unwrap(), Some(2));
joined.read(&mut test[0..2]).unwrap();
assert_eq!(test, [7, 8, 6]);
assert_eq!(joined.remaining_len().unwrap(), Some(0));
}
#[cfg(feature = "rocksdb")]
#[test]
fn test_open_database_auto_new() {
let db_dir = tempfile::TempDir::new().unwrap();
let db_path = db_dir.path().to_owned();
let paritydb_path = db_path.join("paritydb");
let rocksdb_path = db_path.join("rocksdb_path");
let source = DatabaseSource::Auto {
paritydb_path: paritydb_path.clone(),
rocksdb_path: rocksdb_path.clone(),
cache_size: 128,
};
// it should create new auto (paritydb) database
{
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "New database should be created.");
}
// it should reopen existing auto (pairtydb) database
{
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "Existing parity database should be reopened");
}
// it should fail to open existing auto (pairtydb) database
{
let db_res = open_database::<Block>(
&DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "New database should be opened.");
}
// it should reopen existing auto (pairtydb) database
{
let db_res = open_database::<Block>(
&DatabaseSource::ParityDb { path: paritydb_path },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "Existing parity database should be reopened");
}
}
#[cfg(feature = "rocksdb")]
#[test]
fn test_open_database_rocksdb_new() {
let db_dir = tempfile::TempDir::new().unwrap();
let db_path = db_dir.path().to_owned();
let paritydb_path = db_path.join("paritydb");
let rocksdb_path = db_path.join("rocksdb_path");
let source = DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 };
// it should create new rocksdb database
{
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "New rocksdb database should be created");
}
// it should reopen existing auto (rocksdb) database
{
let db_res = open_database::<Block>(
&DatabaseSource::Auto {
paritydb_path: paritydb_path.clone(),
rocksdb_path: rocksdb_path.clone(),
cache_size: 128,
},
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "Existing rocksdb database should be reopened");
}
// it should fail to open existing auto (rocksdb) database
{
let db_res = open_database::<Block>(
&DatabaseSource::ParityDb { path: paritydb_path },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "New paritydb database should be created");
}
// it should reopen existing auto (pairtydb) database
{
let db_res = open_database::<Block>(
&DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "Existing rocksdb database should be reopened");
}
}
#[cfg(feature = "rocksdb")]
#[test]
fn test_open_database_paritydb_new() {
let db_dir = tempfile::TempDir::new().unwrap();
let db_path = db_dir.path().to_owned();
let paritydb_path = db_path.join("paritydb");
let rocksdb_path = db_path.join("rocksdb_path");
let source = DatabaseSource::ParityDb { path: paritydb_path.clone() };
// it should create new paritydb database
{
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "New database should be created.");
}
// it should reopen existing pairtydb database
{
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "Existing parity database should be reopened");
}
// it should fail to open existing pairtydb database
{
let db_res = open_database::<Block>(
&DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "New rocksdb database should be created");
}
// it should reopen existing auto (pairtydb) database
{
let db_res = open_database::<Block>(
&DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size: 128 },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "Existing parity database should be reopened");
}
}
}