// This file is part of Substrate.
// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
//! Client backend that is backed by a database.
//!
//! # Canonicality vs. Finality
//!
//! Finality indicates that a block will not be reverted, according to the consensus algorithm,
//! while canonicality indicates that the block may be reverted, but we will be unable to do so,
//! having discarded heavy state that will allow a chain reorganization.
//!
//! Finality implies canonicality but not vice-versa.
#![warn(missing_docs)]
pub mod offchain;
pub mod bench;
mod children;
mod parity_db;
mod record_stats_state;
mod stats;
#[cfg(any(feature = "rocksdb", test))]
mod upgrade;
mod utils;
use linked_hash_map::LinkedHashMap;
use log::{debug, trace, warn};
use parking_lot::{Mutex, RwLock};
use std::{
collections::{HashMap, HashSet},
io,
path::{Path, PathBuf},
sync::Arc,
};
use crate::{
record_stats_state::RecordStatsState,
stats::StateUsageStats,
utils::{meta_keys, read_db, read_meta, DatabaseType, Meta},
};
use codec::{Decode, Encode};
use hash_db::Prefix;
use sc_client_api::{
backend::NewBlockState,
leaves::{FinalizationOutcome, LeafSet},
utils::is_descendent_of,
IoInfo, MemoryInfo, MemorySize, UsageInfo,
};
use sc_state_db::{IsPruned, StateDb};
use sp_arithmetic::traits::Saturating;
use sp_blockchain::{
well_known_cache_keys, Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend,
HeaderMetadata, HeaderMetadataCache, Result as ClientResult,
};
use sp_core::{
offchain::OffchainOverlayedChange,
storage::{well_known_keys, ChildInfo},
};
use sp_database::Transaction;
use sp_runtime::{
generic::BlockId,
traits::{
Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion,
Zero,
},
Justification, Justifications, StateVersion, Storage,
};
use sp_state_machine::{
backend::{AsTrieBackend, Backend as StateBackend},
ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats,
StorageCollection, UsageInfo as StateUsageInfo,
};
use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB};
// Re-export the Database trait so that one can pass an implementation of it.
pub use sc_state_db::PruningMode;
pub use sp_database::Database;
pub use bench::BenchmarkingState;
const CACHE_HEADERS: usize = 8;
/// DB-backed patricia trie state, transaction type is an overlay of changes to commit.
pub type DbState =
sp_state_machine::TrieBackend>>, HashFor>;
/// Builder for [`DbState`].
pub type DbStateBuilder = sp_state_machine::TrieBackendBuilder<
Arc>>,
HashFor,
>;
/// Length of a [`DbHash`].
const DB_HASH_LEN: usize = 32;
/// Hash type that this backend uses for the database.
pub type DbHash = sp_core::H256;
/// An extrinsic entry in the database.
#[derive(Debug, Encode, Decode)]
enum DbExtrinsic {
/// Extrinsic that contains indexed data.
Indexed {
/// Hash of the indexed part.
hash: DbHash,
/// Extrinsic header.
header: Vec,
},
/// Complete extrinsic data.
Full(B::Extrinsic),
}
/// A reference tracking state.
///
/// It makes sure that the hash we are using stays pinned in storage
/// until this structure is dropped.
pub struct RefTrackingState {
state: DbState,
storage: Arc>,
parent_hash: Option,
}
impl RefTrackingState {
fn new(state: DbState, storage: Arc>, parent_hash: Option) -> Self {
RefTrackingState { state, parent_hash, storage }
}
}
impl Drop for RefTrackingState {
fn drop(&mut self) {
if let Some(hash) = &self.parent_hash {
self.storage.state_db.unpin(hash);
}
}
}
impl std::fmt::Debug for RefTrackingState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Block {:?}", self.parent_hash)
}
}
impl StateBackend> for RefTrackingState {
type Error = as StateBackend>>::Error;
type Transaction = as StateBackend>>::Transaction;
type TrieBackendStorage = as StateBackend>>::TrieBackendStorage;
fn storage(&self, key: &[u8]) -> Result>, Self::Error> {
self.state.storage(key)
}
fn storage_hash(&self, key: &[u8]) -> Result , Self::Error> {
self.state.storage_hash(key)
}
fn child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result >, Self::Error> {
self.state.child_storage(child_info, key)
}
fn child_storage_hash(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result , Self::Error> {
self.state.child_storage_hash(child_info, key)
}
fn exists_storage(&self, key: &[u8]) -> Result {
self.state.exists_storage(key)
}
fn exists_child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result {
self.state.exists_child_storage(child_info, key)
}
fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> {
self.state.next_storage_key(key)
}
fn next_child_storage_key(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result >, Self::Error> {
self.state.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix(&self, prefix: &[u8], f: F) {
self.state.for_keys_with_prefix(prefix, f)
}
fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) {
self.state.for_key_values_with_prefix(prefix, f)
}
fn apply_to_key_values_while, Vec) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result {
self.state
.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing)
}
fn apply_to_keys_while bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.state.apply_to_keys_while(child_info, prefix, start_at, f)
}
fn for_child_keys_with_prefix(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.state.for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<'a>(
&self,
delta: impl Iterator- )>,
state_version: StateVersion,
) -> (B::Hash, Self::Transaction)
where
B::Hash: Ord,
{
self.state.storage_root(delta, state_version)
}
fn child_storage_root<'a>(
&self,
child_info: &ChildInfo,
delta: impl Iterator
- )>,
state_version: StateVersion,
) -> (B::Hash, bool, Self::Transaction)
where
B::Hash: Ord,
{
self.state.child_storage_root(child_info, delta, state_version)
}
fn pairs(&self) -> Vec<(Vec
, Vec)> {
self.state.pairs()
}
fn keys(&self, prefix: &[u8]) -> Vec> {
self.state.keys(prefix)
}
fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> {
self.state.child_keys(child_info, prefix)
}
fn register_overlay_stats(&self, stats: &StateMachineStats) {
self.state.register_overlay_stats(stats);
}
fn usage_info(&self) -> StateUsageInfo {
self.state.usage_info()
}
}
impl AsTrieBackend> for RefTrackingState {
type TrieBackendStorage = as StateBackend>>::TrieBackendStorage;
fn as_trie_backend(
&self,
) -> &sp_state_machine::TrieBackend> {
&self.state.as_trie_backend()
}
}
/// Database settings.
pub struct DatabaseSettings {
/// The maximum trie cache size in bytes.
///
/// If `None` is given, the cache is disabled.
pub trie_cache_maximum_size: Option,
/// Requested state pruning mode.
pub state_pruning: Option,
/// Where to find the database.
pub source: DatabaseSource,
/// Block pruning mode.
///
/// NOTE: only finalized blocks are subject for removal!
pub blocks_pruning: BlocksPruning,
}
/// Block pruning settings.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum BlocksPruning {
/// Keep full block history, of every block that was ever imported.
KeepAll,
/// Keep full finalized block history.
KeepFinalized,
/// Keep N recent finalized blocks.
Some(u32),
}
/// Where to find the database..
#[derive(Debug, Clone)]
pub enum DatabaseSource {
/// Check given path, and see if there is an existing database there. If it's either `RocksDb`
/// or `ParityDb`, use it. If there is none, create a new instance of `ParityDb`.
Auto {
/// Path to the paritydb database.
paritydb_path: PathBuf,
/// Path to the rocksdb database.
rocksdb_path: PathBuf,
/// Cache size in MiB. Used only by `RocksDb` variant of `DatabaseSource`.
cache_size: usize,
},
/// Load a RocksDB database from a given path. Recommended for most uses.
#[cfg(feature = "rocksdb")]
RocksDb {
/// Path to the database.
path: PathBuf,
/// Cache size in MiB.
cache_size: usize,
},
/// Load a ParityDb database from a given path.
ParityDb {
/// Path to the database.
path: PathBuf,
},
/// Use a custom already-open database.
Custom {
/// the handle to the custom storage
db: Arc>,
/// if set, the `create` flag will be required to open such datasource
require_create_flag: bool,
},
}
impl DatabaseSource {
/// Return path for databases that are stored on disk.
pub fn path(&self) -> Option<&Path> {
match self {
// as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550
//
// IIUC this is needed for polkadot to create its own dbs, so until it can use parity db
// I would think rocksdb, but later parity-db.
DatabaseSource::Auto { paritydb_path, .. } => Some(paritydb_path),
#[cfg(feature = "rocksdb")]
DatabaseSource::RocksDb { path, .. } => Some(path),
DatabaseSource::ParityDb { path } => Some(path),
DatabaseSource::Custom { .. } => None,
}
}
/// Set path for databases that are stored on disk.
pub fn set_path(&mut self, p: &Path) -> bool {
match self {
DatabaseSource::Auto { ref mut paritydb_path, .. } => {
*paritydb_path = p.into();
true
},
#[cfg(feature = "rocksdb")]
DatabaseSource::RocksDb { ref mut path, .. } => {
*path = p.into();
true
},
DatabaseSource::ParityDb { ref mut path } => {
*path = p.into();
true
},
DatabaseSource::Custom { .. } => false,
}
}
}
impl std::fmt::Display for DatabaseSource {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let name = match self {
DatabaseSource::Auto { .. } => "Auto",
#[cfg(feature = "rocksdb")]
DatabaseSource::RocksDb { .. } => "RocksDb",
DatabaseSource::ParityDb { .. } => "ParityDb",
DatabaseSource::Custom { .. } => "Custom",
};
write!(f, "{}", name)
}
}
pub(crate) mod columns {
pub const META: u32 = crate::utils::COLUMN_META;
pub const STATE: u32 = 1;
pub const STATE_META: u32 = 2;
/// maps hashes to lookup keys and numbers to canon hashes.
pub const KEY_LOOKUP: u32 = 3;
pub const HEADER: u32 = 4;
pub const BODY: u32 = 5;
pub const JUSTIFICATIONS: u32 = 6;
pub const AUX: u32 = 8;
/// Offchain workers local storage
pub const OFFCHAIN: u32 = 9;
/// Transactions
pub const TRANSACTION: u32 = 11;
pub const BODY_INDEX: u32 = 12;
}
struct PendingBlock {
header: Block::Header,
justifications: Option,
body: Option>,
indexed_body: Option>>,
leaf_state: NewBlockState,
}
// wrapper that implements trait required for state_db
#[derive(Clone)]
struct StateMetaDb(Arc>);
impl sc_state_db::MetaDb for StateMetaDb {
type Error = sp_database::error::DatabaseError;
fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> {
Ok(self.0.get(columns::STATE_META, key))
}
}
struct MetaUpdate {
pub hash: Block::Hash,
pub number: NumberFor,
pub is_best: bool,
pub is_finalized: bool,
pub with_state: bool,
}
fn cache_header(
cache: &mut LinkedHashMap>,
hash: Hash,
header: Option,
) {
cache.insert(hash, header);
while cache.len() > CACHE_HEADERS {
cache.pop_front();
}
}
/// Block database
pub struct BlockchainDb {
db: Arc>,
meta: Arc, Block::Hash>>>,
leaves: RwLock>>,
header_metadata_cache: Arc>,
header_cache: Mutex>>,
}
impl BlockchainDb {
fn new(db: Arc>) -> ClientResult {
let meta = read_meta::(&*db, columns::HEADER)?;
let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?;
Ok(BlockchainDb {
db,
leaves: RwLock::new(leaves),
meta: Arc::new(RwLock::new(meta)),
header_metadata_cache: Arc::new(HeaderMetadataCache::default()),
header_cache: Default::default(),
})
}
fn update_meta(&self, update: MetaUpdate) {
let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update;
let mut meta = self.meta.write();
if number.is_zero() {
meta.genesis_hash = hash;
}
if is_best {
meta.best_number = number;
meta.best_hash = hash;
}
if is_finalized {
if with_state {
meta.finalized_state = Some((hash, number));
}
meta.finalized_number = number;
meta.finalized_hash = hash;
}
}
fn update_block_gap(&self, gap: Option<(NumberFor, NumberFor)>) {
let mut meta = self.meta.write();
meta.block_gap = gap;
}
}
impl sc_client_api::blockchain::HeaderBackend for BlockchainDb {
fn header(&self, id: BlockId) -> ClientResult> {
match &id {
BlockId::Hash(h) => {
let mut cache = self.header_cache.lock();
if let Some(result) = cache.get_refresh(h) {
return Ok(result.clone())
}
let header =
utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?;
cache_header(&mut cache, *h, header.clone());
Ok(header)
},
BlockId::Number(_) =>
utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id),
}
}
fn info(&self) -> sc_client_api::blockchain::Info {
let meta = self.meta.read();
sc_client_api::blockchain::Info {
best_hash: meta.best_hash,
best_number: meta.best_number,
genesis_hash: meta.genesis_hash,
finalized_hash: meta.finalized_hash,
finalized_number: meta.finalized_number,
finalized_state: meta.finalized_state,
number_leaves: self.leaves.read().count(),
block_gap: meta.block_gap,
}
}
fn status(&self, id: BlockId) -> ClientResult {
let exists = match id {
BlockId::Hash(_) => self.header(id)?.is_some(),
BlockId::Number(n) => n <= self.meta.read().best_number,
};
match exists {
true => Ok(sc_client_api::blockchain::BlockStatus::InChain),
false => Ok(sc_client_api::blockchain::BlockStatus::Unknown),
}
}
fn number(&self, hash: Block::Hash) -> ClientResult>> {
Ok(self.header_metadata(hash).ok().map(|header_metadata| header_metadata.number))
}
fn hash(&self, number: NumberFor) -> ClientResult> {
self.header(BlockId::Number(number))
.map(|maybe_header| maybe_header.map(|header| header.hash()))
}
}
impl sc_client_api::blockchain::Backend for BlockchainDb {
fn body(&self, id: BlockId) -> ClientResult>> {
if let Some(body) = read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? {
// Plain body
match Decode::decode(&mut &body[..]) {
Ok(body) => return Ok(Some(body)),
Err(err) =>
return Err(sp_blockchain::Error::Backend(format!(
"Error decoding body: {}",
err
))),
}
}
if let Some(index) = read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? {
match Vec::>::decode(&mut &index[..]) {
Ok(index) => {
let mut body = Vec::new();
for ex in index {
match ex {
DbExtrinsic::Indexed { hash, header } => {
match self.db.get(columns::TRANSACTION, hash.as_ref()) {
Some(t) => {
let mut input =
utils::join_input(header.as_ref(), t.as_ref());
let ex = Block::Extrinsic::decode(&mut input).map_err(
|err| {
sp_blockchain::Error::Backend(format!(
"Error decoding indexed extrinsic: {}",
err
))
},
)?;
body.push(ex);
},
None =>
return Err(sp_blockchain::Error::Backend(format!(
"Missing indexed transaction {:?}",
hash
))),
};
},
DbExtrinsic::Full(ex) => {
body.push(ex);
},
}
}
return Ok(Some(body))
},
Err(err) =>
return Err(sp_blockchain::Error::Backend(format!(
"Error decoding body list: {}",
err
))),
}
}
Ok(None)
}
fn justifications(&self, id: BlockId) -> ClientResult> {
match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATIONS, id)? {
Some(justifications) => match Decode::decode(&mut &justifications[..]) {
Ok(justifications) => Ok(Some(justifications)),
Err(err) =>
return Err(sp_blockchain::Error::Backend(format!(
"Error decoding justifications: {}",
err
))),
},
None => Ok(None),
}
}
fn last_finalized(&self) -> ClientResult {
Ok(self.meta.read().finalized_hash)
}
fn leaves(&self) -> ClientResult> {
Ok(self.leaves.read().hashes())
}
fn displaced_leaves_after_finalizing(
&self,
block_number: NumberFor,
) -> ClientResult> {
Ok(self
.leaves
.read()
.displaced_by_finalize_height(block_number)
.leaves()
.cloned()
.collect::>())
}
fn children(&self, parent_hash: Block::Hash) -> ClientResult> {
children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)
}
fn indexed_transaction(&self, hash: &Block::Hash) -> ClientResult>> {
Ok(self.db.get(columns::TRANSACTION, hash.as_ref()))
}
fn has_indexed_transaction(&self, hash: &Block::Hash) -> ClientResult {
Ok(self.db.contains(columns::TRANSACTION, hash.as_ref()))
}
fn block_indexed_body(&self, id: BlockId) -> ClientResult>>> {
let body = match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? {
Some(body) => body,
None => return Ok(None),
};
match Vec::>::decode(&mut &body[..]) {
Ok(index) => {
let mut transactions = Vec::new();
for ex in index.into_iter() {
if let DbExtrinsic::Indexed { hash, .. } = ex {
match self.db.get(columns::TRANSACTION, hash.as_ref()) {
Some(t) => transactions.push(t),
None =>
return Err(sp_blockchain::Error::Backend(format!(
"Missing indexed transaction {:?}",
hash
))),
}
}
}
Ok(Some(transactions))
},
Err(err) =>
Err(sp_blockchain::Error::Backend(format!("Error decoding body list: {}", err))),
}
}
}
impl HeaderMetadata for BlockchainDb {
type Error = sp_blockchain::Error;
fn header_metadata(
&self,
hash: Block::Hash,
) -> Result, Self::Error> {
self.header_metadata_cache.header_metadata(hash).map_or_else(
|| {
self.header(BlockId::hash(hash))?
.map(|header| {
let header_metadata = CachedHeaderMetadata::from(&header);
self.header_metadata_cache
.insert_header_metadata(header_metadata.hash, header_metadata.clone());
header_metadata
})
.ok_or_else(|| {
ClientError::UnknownBlock(format!(
"Header was not found in the database: {:?}",
hash
))
})
},
Ok,
)
}
fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) {
self.header_metadata_cache.insert_header_metadata(hash, metadata)
}
fn remove_header_metadata(&self, hash: Block::Hash) {
self.header_cache.lock().remove(&hash);
self.header_metadata_cache.remove_header_metadata(hash);
}
}
/// Database transaction
pub struct BlockImportOperation {
old_state: RecordStatsState, Block>,
db_updates: PrefixedMemoryDB>,
storage_updates: StorageCollection,
child_storage_updates: ChildStorageCollection,
offchain_storage_updates: OffchainChangesCollection,
pending_block: Option>,
aux_ops: Vec<(Vec, Option>)>,
finalized_blocks: Vec<(BlockId, Option)>,
set_head: Option>,
commit_state: bool,
index_ops: Vec,
}
impl BlockImportOperation {
fn apply_offchain(&mut self, transaction: &mut Transaction) {
let mut count = 0;
for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) {
count += 1;
let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key);
match value_operation {
OffchainOverlayedChange::SetValue(val) =>
transaction.set_from_vec(columns::OFFCHAIN, &key, val),
OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key),
}
}
if count > 0 {
log::debug!(target: "sc_offchain", "Applied {} offchain indexing changes.", count);
}
}
fn apply_aux(&mut self, transaction: &mut Transaction) {
for (key, maybe_val) in self.aux_ops.drain(..) {
match maybe_val {
Some(val) => transaction.set_from_vec(columns::AUX, &key, val),
None => transaction.remove(columns::AUX, &key),
}
}
}
fn apply_new_state(
&mut self,
storage: Storage,
state_version: StateVersion,
) -> ClientResult {
if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(k)) {
return Err(sp_blockchain::Error::InvalidState)
}
let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| {
(
&child_content.child_info,
child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
)
});
let (root, transaction) = self.old_state.full_storage_root(
storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))),
child_delta,
state_version,
);
self.db_updates = transaction;
Ok(root)
}
}
impl sc_client_api::backend::BlockImportOperation
for BlockImportOperation
{
type State = RecordStatsState, Block>;
fn state(&self) -> ClientResult> {
Ok(Some(&self.old_state))
}
fn set_block_data(
&mut self,
header: Block::Header,
body: Option>,
indexed_body: Option>>,
justifications: Option,
leaf_state: NewBlockState,
) -> ClientResult<()> {
assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
self.pending_block =
Some(PendingBlock { header, body, indexed_body, justifications, leaf_state });
Ok(())
}
fn update_cache(&mut self, _cache: HashMap>) {
// Currently cache isn't implemented on full nodes.
}
fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> {
self.db_updates = update;
Ok(())
}
fn reset_storage(
&mut self,
storage: Storage,
state_version: StateVersion,
) -> ClientResult {
let root = self.apply_new_state(storage, state_version)?;
self.commit_state = true;
Ok(root)
}
fn set_genesis_state(
&mut self,
storage: Storage,
commit: bool,
state_version: StateVersion,
) -> ClientResult {
let root = self.apply_new_state(storage, state_version)?;
self.commit_state = commit;
Ok(root)
}
fn insert_aux(&mut self, ops: I) -> ClientResult<()>
where
I: IntoIterator- , Option
>)>,
{
self.aux_ops.append(&mut ops.into_iter().collect());
Ok(())
}
fn update_storage(
&mut self,
update: StorageCollection,
child_update: ChildStorageCollection,
) -> ClientResult<()> {
self.storage_updates = update;
self.child_storage_updates = child_update;
Ok(())
}
fn update_offchain_storage(
&mut self,
offchain_update: OffchainChangesCollection,
) -> ClientResult<()> {
self.offchain_storage_updates = offchain_update;
Ok(())
}
fn mark_finalized(
&mut self,
block: BlockId,
justification: Option,
) -> ClientResult<()> {
self.finalized_blocks.push((block, justification));
Ok(())
}
fn mark_head(&mut self, block: BlockId) -> ClientResult<()> {
assert!(self.set_head.is_none(), "Only one set head per operation is allowed");
self.set_head = Some(block);
Ok(())
}
fn update_transaction_index(&mut self, index_ops: Vec) -> ClientResult<()> {
self.index_ops = index_ops;
Ok(())
}
}
struct StorageDb {
pub db: Arc>,
pub state_db: StateDb, StateMetaDb>,
prefix_keys: bool,
}
impl sp_state_machine::Storage> for StorageDb {
fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> {
if self.prefix_keys {
let key = prefixed_key::>(key, prefix);
self.state_db.get(&key, self)
} else {
self.state_db.get(key.as_ref(), self)
}
.map_err(|e| format!("Database backend error: {:?}", e))
}
}
impl sc_state_db::NodeDb for StorageDb {
type Error = io::Error;
type Key = [u8];
fn get(&self, key: &[u8]) -> Result>, Self::Error> {
Ok(self.db.get(columns::STATE, key))
}
}
struct DbGenesisStorage {
root: Block::Hash,
storage: PrefixedMemoryDB>,
}
impl DbGenesisStorage {
pub fn new(root: Block::Hash, storage: PrefixedMemoryDB>) -> Self {
DbGenesisStorage { root, storage }
}
}
impl sp_state_machine::Storage> for DbGenesisStorage {
fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> {
use hash_db::HashDB;
Ok(self.storage.get(key, prefix))
}
}
struct EmptyStorage(pub Block::Hash);
impl EmptyStorage {
pub fn new() -> Self {
let mut root = Block::Hash::default();
let mut mdb = MemoryDB::>::default();
// both triedbmut are the same on empty storage.
sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build();
EmptyStorage(root)
}
}
impl sp_state_machine::Storage> for EmptyStorage {
fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> {
Ok(None)
}
}
/// Frozen `value` at time `at`.
///
/// Used as inner structure under lock in `FrozenForDuration`.
struct Frozen {
at: std::time::Instant,
value: Option,
}
/// Some value frozen for period of time.
///
/// If time `duration` not passed since the value was instantiated,
/// current frozen value is returned. Otherwise, you have to provide
/// a new value which will be again frozen for `duration`.
pub(crate) struct FrozenForDuration {
duration: std::time::Duration,
value: parking_lot::Mutex>,
}
impl FrozenForDuration {
fn new(duration: std::time::Duration) -> Self {
Self { duration, value: Frozen { at: std::time::Instant::now(), value: None }.into() }
}
fn take_or_else(&self, f: F) -> T
where
F: FnOnce() -> T,
{
let mut lock = self.value.lock();
let now = std::time::Instant::now();
if now.saturating_duration_since(lock.at) > self.duration || lock.value.is_none() {
let new_value = f();
lock.at = now;
lock.value = Some(new_value.clone());
new_value
} else {
lock.value.as_ref().expect("Checked with in branch above; qed").clone()
}
}
}
/// Disk backend.
///
/// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all
/// blocks. Otherwise, trie nodes are kept only from some recent blocks.
pub struct Backend {
storage: Arc>,
offchain_storage: offchain::LocalStorage,
blockchain: BlockchainDb,
canonicalization_delay: u64,
import_lock: Arc>,
is_archive: bool,
blocks_pruning: BlocksPruning,
io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>,
state_usage: Arc,
genesis_state: RwLock>>>,
shared_trie_cache: Option>>,
}
impl Backend {
/// Create a new instance of database backend.
///
/// The pruning window is how old a block must be before the state is pruned.
pub fn new(db_config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult {
use utils::OpenDbError;
let db_source = &db_config.source;
let (needs_init, db) =
match crate::utils::open_database::(db_source, DatabaseType::Full, false) {
Ok(db) => (false, db),
Err(OpenDbError::DoesNotExist) => {
let db =
crate::utils::open_database::(db_source, DatabaseType::Full, true)?;
(true, db)
},
Err(as_is) => return Err(as_is.into()),
};
Self::from_database(db as Arc<_>, canonicalization_delay, &db_config, needs_init)
}
/// Create new memory-backed client backend for tests.
#[cfg(any(test, feature = "test-helpers"))]
pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self {
Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay)
}
/// Create new memory-backed client backend for tests.
#[cfg(any(test, feature = "test-helpers"))]
pub fn new_test_with_tx_storage(
blocks_pruning: BlocksPruning,
canonicalization_delay: u64,
) -> Self {
let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS);
let db = sp_database::as_database(db);
let state_pruning = match blocks_pruning {
BlocksPruning::KeepAll => PruningMode::ArchiveAll,
BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical,
BlocksPruning::Some(n) => PruningMode::blocks_pruning(n),
};
let db_setting = DatabaseSettings {
trie_cache_maximum_size: Some(16 * 1024 * 1024),
state_pruning: Some(state_pruning),
source: DatabaseSource::Custom { db, require_create_flag: true },
blocks_pruning,
};
Self::new(db_setting, canonicalization_delay).expect("failed to create test-db")
}
/// Expose the Database that is used by this backend.
/// The second argument is the Column that stores the State.
///
/// Should only be needed for benchmarking.
#[cfg(any(feature = "runtime-benchmarks"))]
pub fn expose_db(&self) -> (Arc>, sp_database::ColumnId) {
(self.storage.db.clone(), columns::STATE)
}
/// Expose the Storage that is used by this backend.
///
/// Should only be needed for benchmarking.
#[cfg(any(feature = "runtime-benchmarks"))]
pub fn expose_storage(&self) -> Arc>> {
self.storage.clone()
}
fn from_database(
db: Arc>,
canonicalization_delay: u64,
config: &DatabaseSettings,
should_init: bool,
) -> ClientResult {
let mut db_init_transaction = Transaction::new();
let requested_state_pruning = config.state_pruning.clone();
let state_meta_db = StateMetaDb(db.clone());
let map_e = sp_blockchain::Error::from_state_db;
let (state_db_init_commit_set, state_db) = StateDb::open(
state_meta_db,
requested_state_pruning,
!db.supports_ref_counting(),
should_init,
)
.map_err(map_e)?;
apply_state_commit(&mut db_init_transaction, state_db_init_commit_set);
let state_pruning_used = state_db.pruning_mode();
let is_archive_pruning = state_pruning_used.is_archive();
let blockchain = BlockchainDb::new(db.clone())?;
let storage_db =
StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() };
let offchain_storage = offchain::LocalStorage::new(db.clone());
let backend = Backend {
storage: Arc::new(storage_db),
offchain_storage,
blockchain,
canonicalization_delay,
import_lock: Default::default(),
is_archive: is_archive_pruning,
io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)),
state_usage: Arc::new(StateUsageStats::new()),
blocks_pruning: config.blocks_pruning,
genesis_state: RwLock::new(None),
shared_trie_cache: config.trie_cache_maximum_size.map(|maximum_size| {
SharedTrieCache::new(sp_trie::cache::CacheSize::Maximum(maximum_size))
}),
};
// Older DB versions have no last state key. Check if the state is available and set it.
let info = backend.blockchain.info();
if info.finalized_state.is_none() &&
info.finalized_hash != Default::default() &&
sc_client_api::Backend::have_state_at(
&backend,
&info.finalized_hash,
info.finalized_number,
) {
backend.blockchain.update_meta(MetaUpdate {
hash: info.finalized_hash,
number: info.finalized_number,
is_best: info.finalized_hash == info.best_hash,
is_finalized: true,
with_state: true,
});
}
db.commit(db_init_transaction)?;
Ok(backend)
}
/// Handle setting head within a transaction. `route_to` should be the last
/// block that existed in the database. `best_to` should be the best block
/// to be set.
///
/// In the case where the new best block is a block to be imported, `route_to`
/// should be the parent of `best_to`. In the case where we set an existing block
/// to be best, `route_to` should equal to `best_to`.
fn set_head_with_transaction(
&self,
transaction: &mut Transaction,
route_to: Block::Hash,
best_to: (NumberFor, Block::Hash),
) -> ClientResult<(Vec, Vec)> {
let mut enacted = Vec::default();
let mut retracted = Vec::default();
let (best_number, best_hash) = best_to;
let meta = self.blockchain.meta.read();
if meta.best_number > best_number &&
(meta.best_number - best_number).saturated_into::() >
self.canonicalization_delay
{
return Err(sp_blockchain::Error::SetHeadTooOld)
}
let parent_exists =
self.blockchain.status(BlockId::Hash(route_to))? == sp_blockchain::BlockStatus::InChain;
// Cannot find tree route with empty DB or when imported a detached block.
if meta.best_hash != Default::default() && parent_exists {
let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?;
// uncanonicalize: check safety violations and ensure the numbers no longer
// point to these block hashes in the key mapping.
for r in tree_route.retracted() {
if r.hash == meta.finalized_hash {
warn!(
"Potential safety failure: reverting finalized block {:?}",
(&r.number, &r.hash)
);
return Err(sp_blockchain::Error::NotInFinalizedChain)
}
retracted.push(r.hash);
utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?;
}
// canonicalize: set the number lookup to map to this block's hash.
for e in tree_route.enacted() {
enacted.push(e.hash);
utils::insert_number_to_key_mapping(
transaction,
columns::KEY_LOOKUP,
e.number,
e.hash,
)?;
}
}
let lookup_key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?;
transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key);
utils::insert_number_to_key_mapping(
transaction,
columns::KEY_LOOKUP,
best_number,
best_hash,
)?;
Ok((enacted, retracted))
}
fn ensure_sequential_finalization(
&self,
header: &Block::Header,
last_finalized: Option,
) -> ClientResult<()> {
let last_finalized =
last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash);
if last_finalized != self.blockchain.meta.read().genesis_hash &&
*header.parent_hash() != last_finalized
{
return Err(sp_blockchain::Error::NonSequentialFinalization(format!(
"Last finalized {:?} not parent of {:?}",
last_finalized,
header.hash()
)))
}
Ok(())
}
fn finalize_block_with_transaction(
&self,
transaction: &mut Transaction,
hash: &Block::Hash,
header: &Block::Header,
last_finalized: Option,
justification: Option,
finalization_displaced: &mut Option>>,
) -> ClientResult> {
// TODO: ensure best chain contains this block.
let number = *header.number();
self.ensure_sequential_finalization(header, last_finalized)?;
let with_state = sc_client_api::Backend::have_state_at(self, hash, number);
self.note_finalized(transaction, header, *hash, finalization_displaced, with_state)?;
if let Some(justification) = justification {
transaction.set_from_vec(
columns::JUSTIFICATIONS,
&utils::number_and_hash_to_lookup_key(number, hash)?,
Justifications::from(justification).encode(),
);
}
Ok(MetaUpdate { hash: *hash, number, is_best: false, is_finalized: true, with_state })
}
// performs forced canonicalization with a delay after importing a non-finalized block.
fn force_delayed_canonicalize(
&self,
transaction: &mut Transaction,
hash: Block::Hash,
number: NumberFor,
) -> ClientResult<()> {
let number_u64 = number.saturated_into::();
if number_u64 > self.canonicalization_delay {
let new_canonical = number_u64 - self.canonicalization_delay;
if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) {
return Ok(())
}
let hash = if new_canonical == number_u64 {
hash
} else {
sc_client_api::blockchain::HeaderBackend::hash(
&self.blockchain,
new_canonical.saturated_into(),
)?
.ok_or_else(|| {
sp_blockchain::Error::Backend(format!(
"Can't canonicalize missing block number #{} when importing {:?} (#{})",
new_canonical, hash, number,
))
})?
};
if !sc_client_api::Backend::have_state_at(self, &hash, new_canonical.saturated_into()) {
return Ok(())
}
trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash);
let commit = self.storage.state_db.canonicalize_block(&hash).map_err(
sp_blockchain::Error::from_state_db::<
sc_state_db::Error,
>,
)?;
apply_state_commit(transaction, commit);
}
Ok(())
}
fn try_commit_operation(&self, mut operation: BlockImportOperation) -> ClientResult<()> {
let mut transaction = Transaction::new();
let mut finalization_displaced_leaves = None;
operation.apply_aux(&mut transaction);
operation.apply_offchain(&mut transaction);
let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len());
let (best_num, mut last_finalized_hash, mut last_finalized_num, mut block_gap) = {
let meta = self.blockchain.meta.read();
(meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap)
};
for (block, justification) in operation.finalized_blocks {
let block_hash = self.blockchain.expect_block_hash_from_id(&block)?;
let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?;
meta_updates.push(self.finalize_block_with_transaction(
&mut transaction,
&block_hash,
&block_header,
Some(last_finalized_hash),
justification,
&mut finalization_displaced_leaves,
)?);
last_finalized_hash = block_hash;
last_finalized_num = *block_header.number();
}
let imported = if let Some(pending_block) = operation.pending_block {
let hash = pending_block.header.hash();
let parent_hash = *pending_block.header.parent_hash();
let number = *pending_block.header.number();
let highest_leaf = self
.blockchain
.leaves
.read()
.highest_leaf()
.map(|(n, _)| n)
.unwrap_or(Zero::zero());
let existing_header =
number <= highest_leaf && self.blockchain.header(BlockId::hash(hash))?.is_some();
// blocks are keyed by number + hash.
let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?;
if pending_block.leaf_state.is_best() {
self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?;
};
utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?;
transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode());
if let Some(body) = pending_block.body {
// If we have any index operations we save block in the new format with indexed
// extrinsic headers Otherwise we save the body as a single blob.
if operation.index_ops.is_empty() {
transaction.set_from_vec(columns::BODY, &lookup_key, body.encode());
} else {
let body =
apply_index_ops::(&mut transaction, body, operation.index_ops);
transaction.set_from_vec(columns::BODY_INDEX, &lookup_key, body);
}
}
if let Some(body) = pending_block.indexed_body {
apply_indexed_body::(&mut transaction, body);
}
if let Some(justifications) = pending_block.justifications {
transaction.set_from_vec(
columns::JUSTIFICATIONS,
&lookup_key,
justifications.encode(),
);
}
if number.is_zero() {
transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
if operation.commit_state {
transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key);
} else {
// When we don't want to commit the genesis state, we still preserve it in
// memory to bootstrap consensus. It is queried for an initial list of
// authorities, etc.
*self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new(
*pending_block.header.state_root(),
operation.db_updates.clone(),
)));
}
}
let finalized = if operation.commit_state {
let mut changeset: sc_state_db::ChangeSet> =
sc_state_db::ChangeSet::default();
let mut ops: u64 = 0;
let mut bytes: u64 = 0;
let mut removal: u64 = 0;
let mut bytes_removal: u64 = 0;
for (mut key, (val, rc)) in operation.db_updates.drain() {
self.storage.db.sanitize_key(&mut key);
if rc > 0 {
ops += 1;
bytes += key.len() as u64 + val.len() as u64;
if rc == 1 {
changeset.inserted.push((key, val.to_vec()));
} else {
changeset.inserted.push((key.clone(), val.to_vec()));
for _ in 0..rc - 1 {
changeset.inserted.push((key.clone(), Default::default()));
}
}
} else if rc < 0 {
removal += 1;
bytes_removal += key.len() as u64;
if rc == -1 {
changeset.deleted.push(key);
} else {
for _ in 0..-rc {
changeset.deleted.push(key.clone());
}
}
}
}
self.state_usage.tally_writes_nodes(ops, bytes);
self.state_usage.tally_removed_nodes(removal, bytes_removal);
let mut ops: u64 = 0;
let mut bytes: u64 = 0;
for (key, value) in operation
.storage_updates
.iter()
.chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter()))
{
ops += 1;
bytes += key.len() as u64;
if let Some(v) = value.as_ref() {
bytes += v.len() as u64;
}
}
self.state_usage.tally_writes(ops, bytes);
let number_u64 = number.saturated_into::();
let commit = self
.storage
.state_db
.insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset)
.map_err(|e: sc_state_db::Error| {
sp_blockchain::Error::from_state_db(e)
})?;
apply_state_commit(&mut transaction, commit);
if number <= last_finalized_num {
// Canonicalize in the db when re-importing existing blocks with state.
let commit = self.storage.state_db.canonicalize_block(&hash).map_err(
sp_blockchain::Error::from_state_db::<
sc_state_db::Error,
>,
)?;
apply_state_commit(&mut transaction, commit);
meta_updates.push(MetaUpdate {
hash,
number,
is_best: false,
is_finalized: true,
with_state: true,
});
}
// Check if need to finalize. Genesis is always finalized instantly.
let finalized = number_u64 == 0 || pending_block.leaf_state.is_final();
finalized
} else {
(number.is_zero() && last_finalized_num.is_zero()) ||
pending_block.leaf_state.is_final()
};
let header = &pending_block.header;
let is_best = pending_block.leaf_state.is_best();
debug!(
target: "db",
"DB Commit {:?} ({}), best={}, state={}, existing={}, finalized={}",
hash,
number,
is_best,
operation.commit_state,
existing_header,
finalized,
);
self.state_usage.merge_sm(operation.old_state.usage_info());
// release state reference so that it can be finalized
// VERY IMPORTANT
drop(operation.old_state);
if finalized {
// TODO: ensure best chain contains this block.
self.ensure_sequential_finalization(header, Some(last_finalized_hash))?;
self.note_finalized(
&mut transaction,
header,
hash,
&mut finalization_displaced_leaves,
operation.commit_state,
)?;
} else {
// canonicalize blocks which are old enough, regardless of finality.
self.force_delayed_canonicalize(&mut transaction, hash, *header.number())?
}
if !existing_header {
// Add a new leaf if the block has the potential to be finalized.
if number > last_finalized_num || last_finalized_num.is_zero() {
let mut leaves = self.blockchain.leaves.write();
leaves.import(hash, number, parent_hash);
leaves.prepare_transaction(
&mut transaction,
columns::META,
meta_keys::LEAF_PREFIX,
);
}
let mut children = children::read_children(
&*self.storage.db,
columns::META,
meta_keys::CHILDREN_PREFIX,
parent_hash,
)?;
if !children.contains(&hash) {
children.push(hash);
children::write_children(
&mut transaction,
columns::META,
meta_keys::CHILDREN_PREFIX,
parent_hash,
children,
);
}
if let Some((mut start, end)) = block_gap {
if number == start {
start += One::one();
utils::insert_number_to_key_mapping(
&mut transaction,
columns::KEY_LOOKUP,
number,
hash,
)?;
}
if start > end {
transaction.remove(columns::META, meta_keys::BLOCK_GAP);
block_gap = None;
debug!(target: "db", "Removed block gap.");
} else {
block_gap = Some((start, end));
debug!(target: "db", "Update block gap. {:?}", block_gap);
transaction.set(
columns::META,
meta_keys::BLOCK_GAP,
&(start, end).encode(),
);
}
} else if number > best_num + One::one() &&
number > One::one() && self
.blockchain
.header(BlockId::hash(parent_hash))?
.is_none()
{
let gap = (best_num + One::one(), number - One::one());
transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode());
block_gap = Some(gap);
debug!(target: "db", "Detected block gap {:?}", block_gap);
}
}
meta_updates.push(MetaUpdate {
hash,
number,
is_best: pending_block.leaf_state.is_best(),
is_finalized: finalized,
with_state: operation.commit_state,
});
Some((pending_block.header, hash))
} else {
None
};
if let Some(set_head) = operation.set_head {
if let Some(header) =
sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)?
{
let number = header.number();
let hash = header.hash();
self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?;
meta_updates.push(MetaUpdate {
hash,
number: *number,
is_best: true,
is_finalized: false,
with_state: false,
});
} else {
return Err(sp_blockchain::Error::UnknownBlock(format!(
"Cannot set head {:?}",
set_head
)))
}
}
self.storage.db.commit(transaction)?;
// Apply all in-memory state changes.
// Code beyond this point can't fail.
if let Some((header, hash)) = imported {
trace!(target: "db", "DB Commit done {:?}", hash);
let header_metadata = CachedHeaderMetadata::from(&header);
self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata);
cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header));
}
for m in meta_updates {
self.blockchain.update_meta(m);
}
self.blockchain.update_block_gap(block_gap);
Ok(())
}
// write stuff to a transaction after a new block is finalized.
// this canonicalizes finalized blocks. Fails if called with a block which
// was not a child of the last finalized block.
fn note_finalized(
&self,
transaction: &mut Transaction,
f_header: &Block::Header,
f_hash: Block::Hash,
displaced: &mut Option>>,
with_state: bool,
) -> ClientResult<()> {
let f_num = *f_header.number();
let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash)?;
if with_state {
transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone());
}
transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key);
if sc_client_api::Backend::have_state_at(self, &f_hash, f_num) &&
self.storage
.state_db
.best_canonical()
.map(|c| f_num.saturated_into::() > c)
.unwrap_or(true)
{
let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err(
sp_blockchain::Error::from_state_db::<
sc_state_db::Error,
>,
)?;
apply_state_commit(transaction, commit);
}
let new_displaced = self.blockchain.leaves.write().finalize_height(f_num);
self.prune_blocks(transaction, f_num, &new_displaced)?;
match displaced {
x @ &mut None => *x = Some(new_displaced),
&mut Some(ref mut displaced) => displaced.merge(new_displaced),
}
Ok(())
}
fn prune_blocks(
&self,
transaction: &mut Transaction,
finalized: NumberFor,
displaced: &FinalizationOutcome>,
) -> ClientResult<()> {
match self.blocks_pruning {
BlocksPruning::KeepAll => {},
BlocksPruning::Some(blocks_pruning) => {
// Always keep the last finalized block
let keep = std::cmp::max(blocks_pruning, 1);
if finalized >= keep.into() {
let number = finalized.saturating_sub(keep.into());
self.prune_block(transaction, BlockId::::number(number))?;
}
self.prune_displaced_branches(transaction, finalized, displaced)?;
},
BlocksPruning::KeepFinalized => {
self.prune_displaced_branches(transaction, finalized, displaced)?;
},
}
Ok(())
}
fn prune_displaced_branches(
&self,
transaction: &mut Transaction,
finalized: NumberFor,
displaced: &FinalizationOutcome>,
) -> ClientResult<()> {
// Discard all blocks from displaced branches
for h in displaced.leaves() {
let mut number = finalized;
let mut hash = *h;
// Follow displaced chains back until we reach a finalized block.
// Since leaves are discarded due to finality, they can't have parents
// that are canonical, but not yet finalized. So we stop deleting as soon as
// we reach canonical chain.
while self.blockchain.hash(number)? != Some(hash) {
let id = BlockId::::hash(hash);
match self.blockchain.header(id)? {
Some(header) => {
self.prune_block(transaction, id)?;
number = header.number().saturating_sub(One::one());
hash = *header.parent_hash();
},
None => break,
}
}
}
Ok(())
}
fn prune_block(
&self,
transaction: &mut Transaction,
id: BlockId,
) -> ClientResult<()> {
debug!(target: "db", "Removing block #{}", id);
utils::remove_from_db(
transaction,
&*self.storage.db,
columns::KEY_LOOKUP,
columns::BODY,
id,
)?;
utils::remove_from_db(
transaction,
&*self.storage.db,
columns::KEY_LOOKUP,
columns::JUSTIFICATIONS,
id,
)?;
if let Some(index) =
read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)?
{
utils::remove_from_db(
transaction,
&*self.storage.db,
columns::KEY_LOOKUP,
columns::BODY_INDEX,
id,
)?;
match Vec::>::decode(&mut &index[..]) {
Ok(index) =>
for ex in index {
if let DbExtrinsic::Indexed { hash, .. } = ex {
transaction.release(columns::TRANSACTION, hash);
}
},
Err(err) =>
return Err(sp_blockchain::Error::Backend(format!(
"Error decoding body list: {}",
err
))),
}
}
Ok(())
}
fn empty_state(&self) -> ClientResult, Block>> {
let root = EmptyStorage::::new().0; // Empty trie
let db_state = DbStateBuilder::::new(self.storage.clone(), root)
.with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache()))
.build();
let state = RefTrackingState::new(db_state, self.storage.clone(), None);
Ok(RecordStatsState::new(state, None, self.state_usage.clone()))
}
}
fn apply_state_commit(
transaction: &mut Transaction,
commit: sc_state_db::CommitSet>,
) {
for (key, val) in commit.data.inserted.into_iter() {
transaction.set_from_vec(columns::STATE, &key[..], val);
}
for key in commit.data.deleted.into_iter() {
transaction.remove(columns::STATE, &key[..]);
}
for (key, val) in commit.meta.inserted.into_iter() {
transaction.set_from_vec(columns::STATE_META, &key[..], val);
}
for key in commit.meta.deleted.into_iter() {
transaction.remove(columns::STATE_META, &key[..]);
}
}
fn apply_index_ops(
transaction: &mut Transaction,
body: Vec,
ops: Vec,
) -> Vec {
let mut extrinsic_index: Vec> = Vec::with_capacity(body.len());
let mut index_map = HashMap::new();
let mut renewed_map = HashMap::new();
for op in ops {
match op {
IndexOperation::Insert { extrinsic, hash, size } => {
index_map.insert(extrinsic, (hash, size));
},
IndexOperation::Renew { extrinsic, hash } => {
renewed_map.insert(extrinsic, DbHash::from_slice(hash.as_ref()));
},
}
}
for (index, extrinsic) in body.into_iter().enumerate() {
let db_extrinsic = if let Some(hash) = renewed_map.get(&(index as u32)) {
// Bump ref counter
let extrinsic = extrinsic.encode();
transaction.reference(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()));
DbExtrinsic::Indexed { hash: *hash, header: extrinsic }
} else {
match index_map.get(&(index as u32)) {
Some((hash, size)) => {
let encoded = extrinsic.encode();
if *size as usize <= encoded.len() {
let offset = encoded.len() - *size as usize;
transaction.store(
columns::TRANSACTION,
DbHash::from_slice(hash.as_ref()),
encoded[offset..].to_vec(),
);
DbExtrinsic::Indexed {
hash: DbHash::from_slice(hash.as_ref()),
header: encoded[..offset].to_vec(),
}
} else {
// Invalid indexed slice. Just store full data and don't index anything.
DbExtrinsic::Full(extrinsic)
}
},
_ => DbExtrinsic::Full(extrinsic),
}
};
extrinsic_index.push(db_extrinsic);
}
debug!(
target: "db",
"DB transaction index: {} inserted, {} renewed, {} full",
index_map.len(),
renewed_map.len(),
extrinsic_index.len() - index_map.len() - renewed_map.len(),
);
extrinsic_index.encode()
}
fn apply_indexed_body(transaction: &mut Transaction, body: Vec>) {
for extrinsic in body {
let hash = sp_runtime::traits::BlakeTwo256::hash(&extrinsic);
transaction.store(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), extrinsic);
}
}
impl sc_client_api::backend::AuxStore for Backend