diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock
index 7f1d46f46a..1afa55c771 100644
--- a/substrate/Cargo.lock
+++ b/substrate/Cargo.lock
@@ -503,9 +503,9 @@ dependencies = [
[[package]]
name = "bindgen"
-version = "0.57.0"
+version = "0.59.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d"
+checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375"
dependencies = [
"bitflags",
"cexpr",
@@ -526,6 +526,18 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+[[package]]
+name = "bitvec"
+version = "0.19.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321"
+dependencies = [
+ "funty",
+ "radium 0.5.3",
+ "tap",
+ "wyz",
+]
+
[[package]]
name = "bitvec"
version = "0.20.2"
@@ -533,7 +545,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f682656975d3a682daff957be4ddeb65d6ad656737cd821f2d00685ae466af1"
dependencies = [
"funty",
- "radium",
+ "radium 0.6.2",
"tap",
"wyz",
]
@@ -788,9 +800,9 @@ dependencies = [
[[package]]
name = "cexpr"
-version = "0.4.0"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27"
+checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89"
dependencies = [
"nom",
]
@@ -3119,9 +3131,9 @@ dependencies = [
[[package]]
name = "kvdb-rocksdb"
-version = "0.12.0"
+version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "431ca65516efab86e65d96281f750ebb54277dec656fcf6c027f3d1c0cb69e4c"
+checksum = "9b1b6ea8f2536f504b645ad78419c8246550e19d2c3419a167080ce08edee35a"
dependencies = [
"fs-swap",
"kvdb",
@@ -3621,9 +3633,9 @@ dependencies = [
[[package]]
name = "librocksdb-sys"
-version = "6.17.3"
+version = "6.20.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5da125e1c0f22c7cae785982115523a0738728498547f415c9054cb17c7e89f9"
+checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d"
dependencies = [
"bindgen",
"cc",
@@ -3747,6 +3759,26 @@ dependencies = [
"linked-hash-map",
]
+[[package]]
+name = "lz4"
+version = "1.23.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aac20ed6991e01bf6a2e68cc73df2b389707403662a8ba89f68511fb340f724c"
+dependencies = [
+ "libc",
+ "lz4-sys",
+]
+
+[[package]]
+name = "lz4-sys"
+version = "1.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dca79aa95d8b3226213ad454d328369853be3a1382d89532a854f4d69640acae"
+dependencies = [
+ "cc",
+ "libc",
+]
+
[[package]]
name = "mach"
version = "0.3.2"
@@ -4576,10 +4608,12 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451"
[[package]]
name = "nom"
-version = "5.1.2"
+version = "6.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af"
+checksum = "9c5c51b9083a3c620fa67a2a635d1ce7d95b897e957d6b28ff9a5da960a103a6"
dependencies = [
+ "bitvec 0.19.5",
+ "funty",
"memchr",
"version_check",
]
@@ -5742,9 +5776,9 @@ dependencies = [
[[package]]
name = "parity-db"
-version = "0.2.4"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2e337f62db341435f0da05b8f6b97e984ef4ea5800510cd07c2d624688c40b47"
+checksum = "241f9c5d25063080f2c02846221f13e1d0e5e18fa00c32c234aad585b744ee55"
dependencies = [
"blake2-rfc",
"crc32fast",
@@ -5752,9 +5786,11 @@ dependencies = [
"hex",
"libc",
"log",
+ "lz4",
"memmap2",
"parking_lot 0.11.1",
"rand 0.8.4",
+ "snap",
]
[[package]]
@@ -5782,7 +5818,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8975095a2a03bbbdc70a74ab11a4f76a6d0b84680d87c68d722531b0ac28e8a9"
dependencies = [
"arrayvec 0.7.0",
- "bitvec",
+ "bitvec 0.20.2",
"byte-slice-cast",
"impl-trait-for-tuples",
"parity-scale-codec-derive",
@@ -6506,6 +6542,12 @@ dependencies = [
"proc-macro2",
]
+[[package]]
+name = "radium"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8"
+
[[package]]
name = "radium"
version = "0.6.2"
@@ -6870,9 +6912,9 @@ dependencies = [
[[package]]
name = "rocksdb"
-version = "0.16.0"
+version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3"
+checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7"
dependencies = [
"libc",
"librocksdb-sys",
@@ -8546,9 +8588,9 @@ dependencies = [
[[package]]
name = "shlex"
-version = "0.1.1"
+version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2"
+checksum = "42a568c8f2cd051a4d283bd6eb0343ac214c1b0f1ac19f93e1175b2dee38c73d"
[[package]]
name = "signal-hook"
@@ -8617,6 +8659,12 @@ version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e"
+[[package]]
+name = "snap"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451"
+
[[package]]
name = "snow"
version = "0.7.2"
diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml
index ac643a1109..01ec8b253e 100644
--- a/substrate/bin/node/bench/Cargo.toml
+++ b/substrate/bin/node/bench/Cargo.toml
@@ -22,7 +22,7 @@ serde_json = "1.0.41"
structopt = "0.3"
derive_more = "0.99.2"
kvdb = "0.10.0"
-kvdb-rocksdb = "0.12.0"
+kvdb-rocksdb = "0.14.0"
sp-trie = { version = "4.0.0-dev", path = "../../../primitives/trie" }
sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" }
sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" }
@@ -37,7 +37,7 @@ hex = "0.4.0"
rand = { version = "0.7.2", features = ["small_rng"] }
lazy_static = "1.4.0"
parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
-parity-db = { version = "0.2.4" }
+parity-db = { version = "0.3" }
sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" }
sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" }
futures = { version = "0.3.4", features = ["thread-pool"] }
diff --git a/substrate/bin/node/bench/src/tempdb.rs b/substrate/bin/node/bench/src/tempdb.rs
index 3c1c0f250e..518c0dd961 100644
--- a/substrate/bin/node/bench/src/tempdb.rs
+++ b/substrate/bin/node/bench/src/tempdb.rs
@@ -91,8 +91,7 @@ impl TempDatabase {
match db_type {
DatabaseType::RocksDb => {
let db_cfg = DatabaseConfig::with_columns(1);
- let db = Database::open(&db_cfg, &self.0.path().to_string_lossy())
- .expect("Database backend error");
+ let db = Database::open(&db_cfg, &self.0.path()).expect("Database backend error");
Arc::new(db)
},
DatabaseType::ParityDb => Arc::new(ParityDbWrapper({
@@ -101,7 +100,7 @@ impl TempDatabase {
column_options.ref_counted = true;
column_options.preimage = true;
column_options.uniform = true;
- parity_db::Db::open(&options).expect("db open error")
+ parity_db::Db::open_or_create(&options).expect("db open error")
})),
}
}
diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs
index 6aaaab04b6..9b49f82c6a 100644
--- a/substrate/bin/node/testing/src/bench.rs
+++ b/substrate/bin/node/testing/src/bench.rs
@@ -220,10 +220,10 @@ pub enum DatabaseType {
}
impl DatabaseType {
- fn into_settings(self, path: PathBuf) -> sc_client_db::DatabaseSettingsSrc {
+ fn into_settings(self, path: PathBuf) -> sc_client_db::DatabaseSource {
match self {
- Self::RocksDb => sc_client_db::DatabaseSettingsSrc::RocksDb { path, cache_size: 512 },
- Self::ParityDb => sc_client_db::DatabaseSettingsSrc::ParityDb { path },
+ Self::RocksDb => sc_client_db::DatabaseSource::RocksDb { path, cache_size: 512 },
+ Self::ParityDb => sc_client_db::DatabaseSource::ParityDb { path },
}
}
}
diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs
index 72741d7bea..5221500f08 100644
--- a/substrate/client/cli/src/arg_enums.rs
+++ b/substrate/client/cli/src/arg_enums.rs
@@ -197,6 +197,9 @@ pub enum Database {
RocksDb,
/// ParityDb.
ParityDb,
+ /// Detect whether there is an existing database. Use it, if there is, if not, create new
+ /// instance of paritydb
+ Auto,
}
impl std::str::FromStr for Database {
@@ -207,6 +210,8 @@ impl std::str::FromStr for Database {
Ok(Self::RocksDb)
} else if s.eq_ignore_ascii_case("paritydb-experimental") {
Ok(Self::ParityDb)
+ } else if s.eq_ignore_ascii_case("auto") {
+ Ok(Self::Auto)
} else {
Err(format!("Unknown variant `{}`, known variants: {:?}", s, Self::variants()))
}
@@ -216,7 +221,7 @@ impl std::str::FromStr for Database {
impl Database {
/// Returns all the variants of this enum to be shown in the cli.
pub fn variants() -> &'static [&'static str] {
- &["rocksdb", "paritydb-experimental"]
+ &["rocksdb", "paritydb-experimental", "auto"]
}
}
diff --git a/substrate/client/cli/src/commands/export_blocks_cmd.rs b/substrate/client/cli/src/commands/export_blocks_cmd.rs
index 0ed8e3ff35..ca3069442a 100644
--- a/substrate/client/cli/src/commands/export_blocks_cmd.rs
+++ b/substrate/client/cli/src/commands/export_blocks_cmd.rs
@@ -23,7 +23,7 @@ use crate::{
};
use log::info;
use sc_client_api::{BlockBackend, UsageProvider};
-use sc_service::{chain_ops::export_blocks, config::DatabaseConfig};
+use sc_service::{chain_ops::export_blocks, config::DatabaseSource};
use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
use std::{fmt::Debug, fs, io, path::PathBuf, str::FromStr, sync::Arc};
use structopt::StructOpt;
@@ -69,14 +69,14 @@ impl ExportBlocksCmd {
pub async fn run(
&self,
client: Arc,
- database_config: DatabaseConfig,
+ database_config: DatabaseSource,
) -> error::Result<()>
where
B: BlockT,
C: BlockBackend + UsageProvider + 'static,
<::Number as FromStr>::Err: Debug,
{
- if let DatabaseConfig::RocksDb { ref path, .. } = database_config {
+ if let DatabaseSource::RocksDb { ref path, .. } = database_config {
info!("DB path: {}", path.display());
}
diff --git a/substrate/client/cli/src/commands/purge_chain_cmd.rs b/substrate/client/cli/src/commands/purge_chain_cmd.rs
index 590046aa77..e1bdb3a03c 100644
--- a/substrate/client/cli/src/commands/purge_chain_cmd.rs
+++ b/substrate/client/cli/src/commands/purge_chain_cmd.rs
@@ -21,7 +21,7 @@ use crate::{
params::{DatabaseParams, SharedParams},
CliConfiguration,
};
-use sc_service::DatabaseConfig;
+use sc_service::DatabaseSource;
use std::{
fmt::Debug,
fs,
@@ -47,7 +47,7 @@ pub struct PurgeChainCmd {
impl PurgeChainCmd {
/// Run the purge command
- pub fn run(&self, database_config: DatabaseConfig) -> error::Result<()> {
+ pub fn run(&self, database_config: DatabaseSource) -> error::Result<()> {
let db_path = database_config.path().ok_or_else(|| {
error::Error::Input("Cannot purge custom database implementation".into())
})?;
diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs
index d586156410..d985dce75d 100644
--- a/substrate/client/cli/src/config.rs
+++ b/substrate/client/cli/src/config.rs
@@ -27,7 +27,7 @@ use names::{Generator, Name};
use sc_client_api::execution_extensions::ExecutionStrategies;
use sc_service::{
config::{
- BasePath, Configuration, DatabaseConfig, ExtTransport, KeystoreConfig,
+ BasePath, Configuration, DatabaseSource, ExtTransport, KeystoreConfig,
NetworkConfiguration, NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode,
Role, RpcMethods, TaskExecutor, TelemetryEndpoints, TransactionPoolOptions,
WasmExecutionMethod,
@@ -220,10 +220,13 @@ pub trait CliConfiguration: Sized {
base_path: &PathBuf,
cache_size: usize,
database: Database,
- ) -> Result {
+ ) -> Result {
+ let rocksdb_path = base_path.join("db");
+ let paritydb_path = base_path.join("paritydb");
Ok(match database {
- Database::RocksDb => DatabaseConfig::RocksDb { path: base_path.join("db"), cache_size },
- Database::ParityDb => DatabaseConfig::ParityDb { path: base_path.join("paritydb") },
+ Database::RocksDb => DatabaseSource::RocksDb { path: rocksdb_path, cache_size },
+ Database::ParityDb => DatabaseSource::ParityDb { path: rocksdb_path },
+ Database::Auto => DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size },
})
}
diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml
index ab06ecee75..85ab58472f 100644
--- a/substrate/client/db/Cargo.toml
+++ b/substrate/client/db/Cargo.toml
@@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
parking_lot = "0.11.1"
log = "0.4.8"
kvdb = "0.10.0"
-kvdb-rocksdb = { version = "0.12.0", optional = true }
+kvdb-rocksdb = { version = "0.14.0", optional = true }
kvdb-memorydb = "0.10.0"
linked-hash-map = "0.5.4"
hash-db = "0.15.2"
@@ -34,7 +34,7 @@ sc-state-db = { version = "0.10.0-dev", path = "../state-db" }
sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" }
sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" }
sp-database = { version = "4.0.0-dev", path = "../../primitives/database" }
-parity-db = { version = "0.2.4", optional = true }
+parity-db = { version = "0.3.1", optional = true }
prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" }
[dev-dependencies]
@@ -42,7 +42,7 @@ sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" }
sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" }
substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" }
quickcheck = "1.0.3"
-kvdb-rocksdb = "0.12.0"
+kvdb-rocksdb = "0.14.0"
tempfile = "3"
[features]
diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs
index dda469f4fd..b909b52610 100644
--- a/substrate/client/db/src/lib.rs
+++ b/substrate/client/db/src/lib.rs
@@ -297,7 +297,7 @@ pub struct DatabaseSettings {
/// State pruning mode.
pub state_pruning: PruningMode,
/// Where to find the database.
- pub source: DatabaseSettingsSrc,
+ pub source: DatabaseSource,
/// Block pruning mode.
pub keep_blocks: KeepBlocks,
/// Block body/Transaction storage scheme.
@@ -325,7 +325,17 @@ pub enum TransactionStorageMode {
/// Where to find the database..
#[derive(Debug, Clone)]
-pub enum DatabaseSettingsSrc {
+pub enum DatabaseSource {
+ /// Check given path, and see if there is an existing database there. If it's either `RocksDb`
+ /// or `ParityDb`, use it. If there is none, create a new instance of `ParityDb`.
+ Auto {
+ /// Path to the paritydb database.
+ paritydb_path: PathBuf,
+ /// Path to the rocksdb database.
+ rocksdb_path: PathBuf,
+ /// Cache size in MiB. Used only by `RocksDb` variant of `DatabaseSource`.
+ cache_size: usize,
+ },
/// Load a RocksDB database from a given path. Recommended for most uses.
RocksDb {
/// Path to the database.
@@ -344,27 +354,28 @@ pub enum DatabaseSettingsSrc {
Custom(Arc>),
}
-impl DatabaseSettingsSrc {
+impl DatabaseSource {
/// Return dabase path for databases that are on the disk.
pub fn path(&self) -> Option<&Path> {
match self {
- DatabaseSettingsSrc::RocksDb { path, .. } => Some(path.as_path()),
- DatabaseSettingsSrc::ParityDb { path, .. } => Some(path.as_path()),
- DatabaseSettingsSrc::Custom(_) => None,
+ // as per https://github.com/paritytech/substrate/pull/9500#discussion_r684312550
+ //
+ // IIUC this is needed for polkadot to create its own dbs, so until it can use parity db
+ // I would think rocksdb, but later parity-db.
+ DatabaseSource::Auto { paritydb_path, .. } => Some(&paritydb_path),
+ DatabaseSource::RocksDb { path, .. } | DatabaseSource::ParityDb { path } => Some(&path),
+ DatabaseSource::Custom(..) => None,
}
}
- /// Check if database supports internal ref counting for state data.
- pub fn supports_ref_counting(&self) -> bool {
- matches!(self, DatabaseSettingsSrc::ParityDb { .. })
- }
}
-impl std::fmt::Display for DatabaseSettingsSrc {
+impl std::fmt::Display for DatabaseSource {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let name = match self {
- DatabaseSettingsSrc::RocksDb { .. } => "RocksDb",
- DatabaseSettingsSrc::ParityDb { .. } => "ParityDb",
- DatabaseSettingsSrc::Custom(_) => "Custom",
+ DatabaseSource::Auto { .. } => "Auto",
+ DatabaseSource::RocksDb { .. } => "RocksDb",
+ DatabaseSource::ParityDb { .. } => "ParityDb",
+ DatabaseSource::Custom(_) => "Custom",
};
write!(f, "{}", name)
}
@@ -1106,7 +1117,7 @@ impl Backend {
state_cache_size: 16777216,
state_cache_child_ratio: Some((50, 100)),
state_pruning: PruningMode::keep_blocks(keep_blocks),
- source: DatabaseSettingsSrc::Custom(db),
+ source: DatabaseSource::Custom(db),
keep_blocks: KeepBlocks::Some(keep_blocks),
transaction_storage,
};
@@ -1125,15 +1136,12 @@ impl Backend {
let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e);
let state_db: StateDb<_, _> = StateDb::new(
config.state_pruning.clone(),
- !config.source.supports_ref_counting(),
+ !db.supports_ref_counting(),
&StateMetaDb(&*db),
)
.map_err(map_e)?;
- let storage_db = StorageDb {
- db: db.clone(),
- state_db,
- prefix_keys: !config.source.supports_ref_counting(),
- };
+ let storage_db =
+ StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() };
let offchain_storage = offchain::LocalStorage::new(db.clone());
let changes_tries_storage = DbChangesTrieStorage::new(
db,
@@ -2516,7 +2524,7 @@ pub(crate) mod tests {
state_cache_size: 16777216,
state_cache_child_ratio: Some((50, 100)),
state_pruning: PruningMode::keep_blocks(1),
- source: DatabaseSettingsSrc::Custom(backing),
+ source: DatabaseSource::Custom(backing),
keep_blocks: KeepBlocks::All,
transaction_storage: TransactionStorageMode::BlockBody,
},
diff --git a/substrate/client/db/src/parity_db.rs b/substrate/client/db/src/parity_db.rs
index 07f58baf01..1b645ca9fb 100644
--- a/substrate/client/db/src/parity_db.rs
+++ b/substrate/client/db/src/parity_db.rs
@@ -16,7 +16,7 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
use crate::{
- columns,
+ columns, light,
utils::{DatabaseType, NUM_COLUMNS},
};
/// A `Database` adapter for parity-db.
@@ -37,16 +37,42 @@ fn handle_err(result: parity_db::Result) -> T {
pub fn open>(
path: &std::path::Path,
db_type: DatabaseType,
+ create: bool,
) -> parity_db::Result>> {
let mut config = parity_db::Options::with_columns(path, NUM_COLUMNS as u8);
- config.sync = true; // Flush each commit
- if db_type == DatabaseType::Full {
- let mut state_col = &mut config.columns[columns::STATE as usize];
- state_col.ref_counted = true;
- state_col.preimage = true;
- state_col.uniform = true;
+
+ match db_type {
+ DatabaseType::Full => {
+ let indexes = [
+ columns::STATE,
+ columns::HEADER,
+ columns::BODY,
+ columns::TRANSACTION,
+ columns::JUSTIFICATIONS,
+ ];
+
+ for i in indexes {
+ let mut column = &mut config.columns[i as usize];
+ column.compression = parity_db::CompressionType::Lz4;
+ }
+
+ let mut state_col = &mut config.columns[columns::STATE as usize];
+ state_col.ref_counted = true;
+ state_col.preimage = true;
+ state_col.uniform = true;
+ },
+ DatabaseType::Light => {
+ config.columns[light::columns::HEADER as usize].compression =
+ parity_db::CompressionType::Lz4;
+ },
}
- let db = parity_db::Db::open(&config)?;
+
+ let db = if create {
+ parity_db::Db::open_or_create(&config)?
+ } else {
+ parity_db::Db::open(&config)?
+ };
+
Ok(std::sync::Arc::new(DbAdapter(db)))
}
@@ -72,4 +98,8 @@ impl> Database for DbAdapter {
fn value_size(&self, col: ColumnId, key: &[u8]) -> Option {
handle_err(self.0.get_size(col as u8, key)).map(|s| s as usize)
}
+
+ fn supports_ref_counting(&self) -> bool {
+ true
+ }
}
diff --git a/substrate/client/db/src/upgrade.rs b/substrate/client/db/src/upgrade.rs
index fe0abaed1b..0358086690 100644
--- a/substrate/client/db/src/upgrade.rs
+++ b/substrate/client/db/src/upgrade.rs
@@ -19,8 +19,8 @@
//! Database upgrade logic.
use std::{
- fs,
- io::{ErrorKind, Read, Write},
+ fmt, fs,
+ io::{self, ErrorKind, Read, Write},
path::{Path, PathBuf},
};
@@ -39,61 +39,79 @@ const CURRENT_VERSION: u32 = 3;
const V1_NUM_COLUMNS: u32 = 11;
const V2_NUM_COLUMNS: u32 = 12;
-/// Upgrade database to current version.
-pub fn upgrade_db(
- db_path: &Path,
- db_type: DatabaseType,
-) -> sp_blockchain::Result<()> {
- let is_empty = db_path.read_dir().map_or(true, |mut d| d.next().is_none());
- if !is_empty {
- let db_version = current_version(db_path)?;
- match db_version {
- 0 => Err(sp_blockchain::Error::Backend(format!(
- "Unsupported database version: {}",
- db_version
- )))?,
- 1 => {
- migrate_1_to_2::(db_path, db_type)?;
- migrate_2_to_3::(db_path, db_type)?
- },
- 2 => migrate_2_to_3::(db_path, db_type)?,
- CURRENT_VERSION => (),
- _ => Err(sp_blockchain::Error::Backend(format!(
- "Future database version: {}",
- db_version
- )))?,
+/// Database upgrade errors.
+#[derive(Debug)]
+pub enum UpgradeError {
+ /// Database version cannot be read from existing db_version file.
+ UnknownDatabaseVersion,
+ /// Missing database version file.
+ MissingDatabaseVersionFile,
+ /// Database version no longer supported.
+ UnsupportedVersion(u32),
+ /// Database version comes from future version of the client.
+ FutureDatabaseVersion(u32),
+ /// Invalid justification block.
+ DecodingJustificationBlock,
+ /// Common io error.
+ Io(io::Error),
+}
+
+pub type UpgradeResult = Result;
+
+impl From for UpgradeError {
+ fn from(err: io::Error) -> Self {
+ UpgradeError::Io(err)
+ }
+}
+
+impl fmt::Display for UpgradeError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ UpgradeError::UnknownDatabaseVersion =>
+ write!(f, "Database version cannot be read from exisiting db_version file"),
+ UpgradeError::MissingDatabaseVersionFile => write!(f, "Missing database version file"),
+ UpgradeError::UnsupportedVersion(version) =>
+ write!(f, "Database version no longer supported: {}", version),
+ UpgradeError::FutureDatabaseVersion(version) =>
+ write!(f, "Database version comes from future version of the client: {}", version),
+ UpgradeError::DecodingJustificationBlock =>
+ write!(f, "Decodoning justification block failed"),
+ UpgradeError::Io(err) => write!(f, "Io error: {}", err),
}
}
+}
- update_version(db_path)
+/// Upgrade database to current version.
+pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> UpgradeResult<()> {
+ let db_version = current_version(db_path)?;
+ match db_version {
+ 0 => return Err(UpgradeError::UnsupportedVersion(db_version)),
+ 1 => {
+ migrate_1_to_2::(db_path, db_type)?;
+ migrate_2_to_3::(db_path, db_type)?
+ },
+ 2 => migrate_2_to_3::(db_path, db_type)?,
+ CURRENT_VERSION => (),
+ _ => return Err(UpgradeError::FutureDatabaseVersion(db_version)),
+ }
+ update_version(db_path)?;
+ Ok(())
}
/// Migration from version1 to version2:
/// 1) the number of columns has changed from 11 to 12;
/// 2) transactions column is added;
-fn migrate_1_to_2(
- db_path: &Path,
- _db_type: DatabaseType,
-) -> sp_blockchain::Result<()> {
- let db_path = db_path
- .to_str()
- .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?;
+fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> {
let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS);
- let db = Database::open(&db_cfg, db_path).map_err(db_err)?;
- db.add_column().map_err(db_err)
+ let db = Database::open(&db_cfg, db_path)?;
+ db.add_column().map_err(Into::into)
}
/// Migration from version2 to version3:
/// - The format of the stored Justification changed to support multiple Justifications.
-fn migrate_2_to_3(
- db_path: &Path,
- _db_type: DatabaseType,
-) -> sp_blockchain::Result<()> {
- let db_path = db_path
- .to_str()
- .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?;
+fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> {
let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS);
- let db = Database::open(&db_cfg, db_path).map_err(db_err)?;
+ let db = Database::open(&db_cfg, db_path)?;
// Get all the keys we need to update
let keys: Vec<_> = db.iter(columns::JUSTIFICATIONS).map(|entry| entry.0).collect();
@@ -101,49 +119,43 @@ fn migrate_2_to_3(
// Read and update each entry
let mut transaction = db.transaction();
for key in keys {
- if let Some(justification) = db.get(columns::JUSTIFICATIONS, &key).map_err(db_err)? {
+ if let Some(justification) = db.get(columns::JUSTIFICATIONS, &key)? {
// Tag each justification with the hardcoded ID for GRANDPA to avoid the dependency on
// the GRANDPA crate.
// NOTE: when storing justifications the previous API would get a `Vec` and still
// call encode on it.
let justification = Vec::::decode(&mut &justification[..])
- .map_err(|_| sp_blockchain::Error::Backend("Invalid justification blob".into()))?;
+ .map_err(|_| UpgradeError::DecodingJustificationBlock)?;
let justifications = sp_runtime::Justifications::from((*b"FRNK", justification));
transaction.put_vec(columns::JUSTIFICATIONS, &key, justifications.encode());
}
}
- db.write(transaction).map_err(db_err)?;
+ db.write(transaction)?;
Ok(())
}
/// Reads current database version from the file at given path.
/// If the file does not exist returns 0.
-fn current_version(path: &Path) -> sp_blockchain::Result {
- let unknown_version_err = || sp_blockchain::Error::Backend("Unknown database version".into());
-
+fn current_version(path: &Path) -> UpgradeResult {
match fs::File::open(version_file_path(path)) {
- Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(0),
- Err(_) => Err(unknown_version_err()),
+ Err(ref err) if err.kind() == ErrorKind::NotFound =>
+ Err(UpgradeError::MissingDatabaseVersionFile),
+ Err(_) => Err(UpgradeError::UnknownDatabaseVersion),
Ok(mut file) => {
let mut s = String::new();
- file.read_to_string(&mut s).map_err(|_| unknown_version_err())?;
- u32::from_str_radix(&s, 10).map_err(|_| unknown_version_err())
+ file.read_to_string(&mut s).map_err(|_| UpgradeError::UnknownDatabaseVersion)?;
+ u32::from_str_radix(&s, 10).map_err(|_| UpgradeError::UnknownDatabaseVersion)
},
}
}
-/// Maps database error to client error
-fn db_err(err: std::io::Error) -> sp_blockchain::Error {
- sp_blockchain::Error::Backend(format!("{}", err))
-}
-
/// Writes current database version to the file.
/// Creates a new file if the version file does not exist yet.
-fn update_version(path: &Path) -> sp_blockchain::Result<()> {
- fs::create_dir_all(path).map_err(db_err)?;
- let mut file = fs::File::create(version_file_path(path)).map_err(db_err)?;
- file.write_all(format!("{}", CURRENT_VERSION).as_bytes()).map_err(db_err)?;
+pub fn update_version(path: &Path) -> io::Result<()> {
+ fs::create_dir_all(path)?;
+ let mut file = fs::File::create(version_file_path(path))?;
+ file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?;
Ok(())
}
@@ -158,7 +170,7 @@ fn version_file_path(path: &Path) -> PathBuf {
mod tests {
use super::*;
use crate::{
- tests::Block, DatabaseSettings, DatabaseSettingsSrc, KeepBlocks, TransactionStorageMode,
+ tests::Block, DatabaseSettings, DatabaseSource, KeepBlocks, TransactionStorageMode,
};
use sc_state_db::PruningMode;
@@ -176,7 +188,7 @@ mod tests {
state_cache_size: 0,
state_cache_child_ratio: None,
state_pruning: PruningMode::ArchiveAll,
- source: DatabaseSettingsSrc::RocksDb { path: db_path.to_owned(), cache_size: 128 },
+ source: DatabaseSource::RocksDb { path: db_path.to_owned(), cache_size: 128 },
keep_blocks: KeepBlocks::All,
transaction_storage: TransactionStorageMode::BlockBody,
},
diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs
index fc2324f35a..95cf698c24 100644
--- a/substrate/client/db/src/utils.rs
+++ b/substrate/client/db/src/utils.rs
@@ -19,11 +19,11 @@
//! Db-based backend utility structures and functions, used by both
//! full and light storages.
-use std::{convert::TryInto, sync::Arc};
+use std::{convert::TryInto, fmt, io, path::Path, sync::Arc};
use log::debug;
-use crate::{Database, DatabaseSettings, DatabaseSettingsSrc, DbHash};
+use crate::{Database, DatabaseSettings, DatabaseSource, DbHash};
use codec::Decode;
use sp_database::Transaction;
use sp_runtime::{
@@ -204,88 +204,170 @@ where
})
}
+fn backend_err(feat: &'static str) -> sp_blockchain::Error {
+ sp_blockchain::Error::Backend(feat.to_string())
+}
+
/// Opens the configured database.
pub fn open_database(
config: &DatabaseSettings,
db_type: DatabaseType,
) -> sp_blockchain::Result>> {
- #[allow(unused)]
- fn db_open_error(feat: &'static str) -> sp_blockchain::Error {
- sp_blockchain::Error::Backend(format!(
- "`{}` feature not enabled, database can not be opened",
- feat
- ))
- }
-
let db: Arc> = match &config.source {
- #[cfg(any(feature = "with-kvdb-rocksdb", test))]
- DatabaseSettingsSrc::RocksDb { path, cache_size } => {
- // first upgrade database to required version
- crate::upgrade::upgrade_db::(&path, db_type)?;
-
- // and now open database assuming that it has the latest version
- let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS);
- let path = path
- .to_str()
- .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?;
-
- let mut memory_budget = std::collections::HashMap::new();
- match db_type {
- DatabaseType::Full => {
- let state_col_budget = (*cache_size as f64 * 0.9) as usize;
- let other_col_budget =
- (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1);
-
- for i in 0..NUM_COLUMNS {
- if i == crate::columns::STATE {
- memory_budget.insert(i, state_col_budget);
- } else {
- memory_budget.insert(i, other_col_budget);
- }
- }
- log::trace!(
- target: "db",
- "Open RocksDB database at {}, state column budget: {} MiB, others({}) column cache: {} MiB",
- path,
- state_col_budget,
- NUM_COLUMNS,
- other_col_budget,
- );
- },
- DatabaseType::Light => {
- let col_budget = cache_size / (NUM_COLUMNS as usize);
- for i in 0..NUM_COLUMNS {
- memory_budget.insert(i, col_budget);
- }
- log::trace!(
- target: "db",
- "Open RocksDB light database at {}, column cache: {} MiB",
- path,
- col_budget,
- );
- },
+ DatabaseSource::ParityDb { path } => open_parity_db::(&path, db_type, true)?,
+ DatabaseSource::RocksDb { path, cache_size } =>
+ open_kvdb_rocksdb::(&path, db_type, true, *cache_size)?,
+ DatabaseSource::Custom(db) => db.clone(),
+ DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size } => {
+ // check if rocksdb exists first, if not, open paritydb
+ match open_kvdb_rocksdb::(&rocksdb_path, db_type, false, *cache_size) {
+ Ok(db) => db,
+ Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) =>
+ open_parity_db::(&paritydb_path, db_type, true)?,
+ Err(_) => return Err(backend_err("cannot open rocksdb. corrupted database")),
}
- db_config.memory_budget = memory_budget;
-
- let db = kvdb_rocksdb::Database::open(&db_config, &path)
- .map_err(|err| sp_blockchain::Error::Backend(format!("{}", err)))?;
- sp_database::as_database(db)
},
- #[cfg(not(any(feature = "with-kvdb-rocksdb", test)))]
- DatabaseSettingsSrc::RocksDb { .. } => return Err(db_open_error("with-kvdb-rocksdb")),
- #[cfg(feature = "with-parity-db")]
- DatabaseSettingsSrc::ParityDb { path } => crate::parity_db::open(&path, db_type)
- .map_err(|e| sp_blockchain::Error::Backend(format!("{}", e)))?,
- #[cfg(not(feature = "with-parity-db"))]
- DatabaseSettingsSrc::ParityDb { .. } => return Err(db_open_error("with-parity-db")),
- DatabaseSettingsSrc::Custom(db) => db.clone(),
};
check_database_type(&*db, db_type)?;
-
Ok(db)
}
+#[derive(Debug)]
+enum OpenDbError {
+ // constructed only when rocksdb and paritydb are disabled
+ #[allow(dead_code)]
+ NotEnabled(&'static str),
+ DoesNotExist,
+ Internal(String),
+}
+
+type OpenDbResult = Result>, OpenDbError>;
+
+impl fmt::Display for OpenDbError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ OpenDbError::Internal(e) => write!(f, "{}", e.to_string()),
+ OpenDbError::DoesNotExist => write!(f, "Database does not exist at given location"),
+ OpenDbError::NotEnabled(feat) =>
+ write!(f, "`{}` feature not enabled, database can not be opened", feat),
+ }
+ }
+}
+
+impl From for sp_blockchain::Error {
+ fn from(err: OpenDbError) -> Self {
+ sp_blockchain::Error::Backend(err.to_string())
+ }
+}
+
+#[cfg(feature = "with-parity-db")]
+impl From for OpenDbError {
+ fn from(err: parity_db::Error) -> Self {
+ if err.to_string().contains("use open_or_create") {
+ OpenDbError::DoesNotExist
+ } else {
+ OpenDbError::Internal(err.to_string())
+ }
+ }
+}
+
+impl From for OpenDbError {
+ fn from(err: io::Error) -> Self {
+ if err.to_string().contains("create_if_missing is false") {
+ OpenDbError::DoesNotExist
+ } else {
+ OpenDbError::Internal(err.to_string())
+ }
+ }
+}
+
+#[cfg(feature = "with-parity-db")]
+fn open_parity_db(path: &Path, db_type: DatabaseType, create: bool) -> OpenDbResult {
+ let db = crate::parity_db::open(path, db_type, create)?;
+ Ok(db)
+}
+
+#[cfg(not(feature = "with-parity-db"))]
+fn open_parity_db(
+ _path: &Path,
+ _db_type: DatabaseType,
+ _create: bool,
+) -> OpenDbResult {
+ Err(OpenDbError::NotEnabled("with-parity-db"))
+}
+
+#[cfg(any(feature = "with-kvdb-rocksdb", test))]
+fn open_kvdb_rocksdb(
+ path: &Path,
+ db_type: DatabaseType,
+ create: bool,
+ cache_size: usize,
+) -> OpenDbResult {
+ // first upgrade database to required version
+ match crate::upgrade::upgrade_db::(&path, db_type) {
+ // in case of missing version file, assume that database simply does not exist at given location
+ Ok(_) | Err(crate::upgrade::UpgradeError::MissingDatabaseVersionFile) => (),
+ Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err.to_string()).into()),
+ }
+
+ // and now open database assuming that it has the latest version
+ let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS);
+ db_config.create_if_missing = create;
+
+ let mut memory_budget = std::collections::HashMap::new();
+ match db_type {
+ DatabaseType::Full => {
+ let state_col_budget = (cache_size as f64 * 0.9) as usize;
+ let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1);
+
+ for i in 0..NUM_COLUMNS {
+ if i == crate::columns::STATE {
+ memory_budget.insert(i, state_col_budget);
+ } else {
+ memory_budget.insert(i, other_col_budget);
+ }
+ }
+ log::trace!(
+ target: "db",
+ "Open RocksDB database at {:?}, state column budget: {} MiB, others({}) column cache: {} MiB",
+ path,
+ state_col_budget,
+ NUM_COLUMNS,
+ other_col_budget,
+ );
+ },
+ DatabaseType::Light => {
+ let col_budget = cache_size / (NUM_COLUMNS as usize);
+ for i in 0..NUM_COLUMNS {
+ memory_budget.insert(i, col_budget);
+ }
+ log::trace!(
+ target: "db",
+ "Open RocksDB light database at {:?}, column cache: {} MiB",
+ path,
+ col_budget,
+ );
+ },
+ }
+ db_config.memory_budget = memory_budget;
+
+ let db = kvdb_rocksdb::Database::open(&db_config, path)?;
+ // write database version only after the database is succesfully opened
+ crate::upgrade::update_version(path)?;
+ Ok(sp_database::as_database(db))
+}
+
+#[cfg(not(any(feature = "with-kvdb-rocksdb", test)))]
+fn open_kvdb_rocksdb(
+ _path: &Path,
+ _db_type: DatabaseType,
+ _create: bool,
+ _cache_size: usize,
+) -> OpenDbResult {
+ Err(OpenDbError::NotEnabled("with-kvdb-rocksdb"))
+}
+
/// Check database type.
pub fn check_database_type(
db: &dyn Database,
@@ -482,7 +564,9 @@ impl<'a, 'b> codec::Input for JoinInput<'a, 'b> {
#[cfg(test)]
mod tests {
use super::*;
+ use crate::{KeepBlocks, TransactionStorageMode};
use codec::Input;
+ use sc_state_db::PruningMode;
use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper};
type Block = RawBlock>;
@@ -521,4 +605,141 @@ mod tests {
assert_eq!(test, [7, 8, 6]);
assert_eq!(joined.remaining_len().unwrap(), Some(0));
}
+
+ fn db_settings(source: DatabaseSource) -> DatabaseSettings {
+ DatabaseSettings {
+ state_cache_size: 0,
+ state_cache_child_ratio: None,
+ state_pruning: PruningMode::ArchiveAll,
+ source,
+ keep_blocks: KeepBlocks::All,
+ transaction_storage: TransactionStorageMode::BlockBody,
+ }
+ }
+
+ #[cfg(feature = "with-parity-db")]
+ #[cfg(any(feature = "with-kvdb-rocksdb", test))]
+ #[test]
+ fn test_open_database_auto_new() {
+ let db_dir = tempfile::TempDir::new().unwrap();
+ let db_path = db_dir.path().to_owned();
+ let paritydb_path = db_path.join("paritydb");
+ let rocksdb_path = db_path.join("rocksdb_path");
+ let source = DatabaseSource::Auto {
+ paritydb_path: paritydb_path.clone(),
+ rocksdb_path: rocksdb_path.clone(),
+ cache_size: 128,
+ };
+ let mut settings = db_settings(source);
+
+ // it should create new auto (paritydb) database
+ {
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "New database should be created.");
+ }
+
+ // it should reopen existing auto (pairtydb) database
+ {
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "Existing parity database should be reopened");
+ }
+
+ // it should fail to open existing auto (pairtydb) database
+ {
+ settings.source = DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 };
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "New database should be opened.");
+ }
+
+ // it should reopen existing auto (pairtydb) database
+ {
+ settings.source = DatabaseSource::ParityDb { path: paritydb_path };
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "Existing parity database should be reopened");
+ }
+ }
+
+ #[cfg(feature = "with-parity-db")]
+ #[cfg(any(feature = "with-kvdb-rocksdb", test))]
+ #[test]
+ fn test_open_database_rocksdb_new() {
+ let db_dir = tempfile::TempDir::new().unwrap();
+ let db_path = db_dir.path().to_owned();
+ let paritydb_path = db_path.join("paritydb");
+ let rocksdb_path = db_path.join("rocksdb_path");
+
+ let source = DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 };
+ let mut settings = db_settings(source);
+
+ // it should create new rocksdb database
+ {
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "New rocksdb database should be created");
+ }
+
+ // it should reopen existing auto (rocksdb) database
+ {
+ settings.source = DatabaseSource::Auto {
+ paritydb_path: paritydb_path.clone(),
+ rocksdb_path: rocksdb_path.clone(),
+ cache_size: 128,
+ };
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "Existing rocksdb database should be reopened");
+ }
+
+ // it should fail to open existing auto (rocksdb) database
+ {
+ settings.source = DatabaseSource::ParityDb { path: paritydb_path };
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "New paritydb database should be created");
+ }
+
+ // it should reopen existing auto (pairtydb) database
+ {
+ settings.source = DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 };
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "Existing rocksdb database should be reopened");
+ }
+ }
+
+ #[cfg(feature = "with-parity-db")]
+ #[cfg(any(feature = "with-kvdb-rocksdb", test))]
+ #[test]
+ fn test_open_database_paritydb_new() {
+ let db_dir = tempfile::TempDir::new().unwrap();
+ let db_path = db_dir.path().to_owned();
+ let paritydb_path = db_path.join("paritydb");
+ let rocksdb_path = db_path.join("rocksdb_path");
+
+ let source = DatabaseSource::ParityDb { path: paritydb_path.clone() };
+ let mut settings = db_settings(source);
+
+ // it should create new paritydb database
+ {
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "New database should be created.");
+ }
+
+ // it should reopen existing pairtydb database
+ {
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "Existing parity database should be reopened");
+ }
+
+ // it should fail to open existing pairtydb database
+ {
+ settings.source =
+ DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 };
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "New rocksdb database should be created");
+ }
+
+ // it should reopen existing auto (pairtydb) database
+ {
+ settings.source = DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size: 128 };
+ let db_res = open_database::(&settings, DatabaseType::Full);
+ assert!(db_res.is_ok(), "Existing parity database should be reopened");
+ }
+ }
}
diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs
index c915978f53..6b10545886 100644
--- a/substrate/client/service/src/config.rs
+++ b/substrate/client/service/src/config.rs
@@ -19,10 +19,7 @@
//! Service configuration.
pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy};
-pub use sc_client_db::{
- Database, DatabaseSettingsSrc as DatabaseConfig, KeepBlocks, PruningMode,
- TransactionStorageMode,
-};
+pub use sc_client_db::{Database, DatabaseSource, KeepBlocks, PruningMode, TransactionStorageMode};
pub use sc_executor::WasmExecutionMethod;
pub use sc_network::{
config::{
@@ -69,7 +66,7 @@ pub struct Configuration {
/// Remote URI to connect to for async keystore support
pub keystore_remote: Option,
/// Configuration for the database.
- pub database: DatabaseConfig,
+ pub database: DatabaseSource,
/// Size of internal state cache in Bytes
pub state_cache_size: usize,
/// Size in percent of cache size dedicated to child tries
diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs
index 5791165e53..b1dcc615a4 100644
--- a/substrate/client/service/src/lib.rs
+++ b/substrate/client/service/src/lib.rs
@@ -59,7 +59,7 @@ pub use self::{
error::Error,
};
pub use config::{
- BasePath, Configuration, DatabaseConfig, KeepBlocks, PruningMode, Role, RpcMethods,
+ BasePath, Configuration, DatabaseSource, KeepBlocks, PruningMode, Role, RpcMethods,
TaskExecutor, TaskType, TransactionStorageMode,
};
pub use sc_chain_spec::{
diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs
index 6ac149677b..01d46c9678 100644
--- a/substrate/client/service/test/src/client/mod.rs
+++ b/substrate/client/service/test/src/client/mod.rs
@@ -22,7 +22,7 @@ use parity_scale_codec::{Decode, Encode, Joiner};
use sc_block_builder::BlockBuilderProvider;
use sc_client_api::{in_mem, BlockBackend, BlockchainEvents, StorageProvider};
use sc_client_db::{
- Backend, DatabaseSettings, DatabaseSettingsSrc, KeepBlocks, PruningMode, TransactionStorageMode,
+ Backend, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode, TransactionStorageMode,
};
use sc_consensus::{
BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult,
@@ -1433,7 +1433,7 @@ fn doesnt_import_blocks_that_revert_finality() {
state_pruning: PruningMode::ArchiveAll,
keep_blocks: KeepBlocks::All,
transaction_storage: TransactionStorageMode::BlockBody,
- source: DatabaseSettingsSrc::RocksDb { path: tmp.path().into(), cache_size: 1024 },
+ source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 },
},
u64::MAX,
)
@@ -1648,7 +1648,7 @@ fn returns_status_for_pruned_blocks() {
state_pruning: PruningMode::keep_blocks(1),
keep_blocks: KeepBlocks::All,
transaction_storage: TransactionStorageMode::BlockBody,
- source: DatabaseSettingsSrc::RocksDb { path: tmp.path().into(), cache_size: 1024 },
+ source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 },
},
u64::MAX,
)
diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs
index 9433ed0bde..87153c2736 100644
--- a/substrate/client/service/test/src/lib.rs
+++ b/substrate/client/service/test/src/lib.rs
@@ -29,7 +29,7 @@ use sc_network::{
};
use sc_service::{
client::Client,
- config::{BasePath, DatabaseConfig, KeystoreConfig},
+ config::{BasePath, DatabaseSource, KeystoreConfig},
ChainSpecExtension, Configuration, Error, GenericChainSpec, KeepBlocks, Role, RuntimeGenesis,
SpawnTaskHandle, TaskExecutor, TaskManager, TransactionStorageMode,
};
@@ -236,7 +236,7 @@ fn node_config<
network: network_config,
keystore_remote: Default::default(),
keystore: KeystoreConfig::Path { path: root.join("key"), password: None },
- database: DatabaseConfig::RocksDb { path: root.join("db"), cache_size: 128 },
+ database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
state_cache_size: 16777216,
state_cache_child_ratio: None,
state_pruning: Default::default(),
diff --git a/substrate/primitives/database/src/lib.rs b/substrate/primitives/database/src/lib.rs
index ed5d93ed5b..d30c7eb332 100644
--- a/substrate/primitives/database/src/lib.rs
+++ b/substrate/primitives/database/src/lib.rs
@@ -103,6 +103,13 @@ pub trait Database>: Send + Sync {
fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) {
self.get(col, key).map(|v| f(&v));
}
+
+ /// Check if database supports internal ref counting for state data.
+ ///
+ /// For backwards compatibility returns `false` by default.
+ fn supports_ref_counting(&self) -> bool {
+ false
+ }
}
impl std::fmt::Debug for dyn Database {
diff --git a/substrate/test-utils/test-runner/src/lib.rs b/substrate/test-utils/test-runner/src/lib.rs
index 9f0a8d5d6c..ed0cc222bf 100644
--- a/substrate/test-utils/test-runner/src/lib.rs
+++ b/substrate/test-utils/test-runner/src/lib.rs
@@ -42,7 +42,7 @@
//! use sc_finality_grandpa::GrandpaBlockImport;
//! use sc_service::{
//! TFullBackend, TFullClient, Configuration, TaskManager, new_full_parts, BasePath,
-//! DatabaseConfig, KeepBlocks, TransactionStorageMode, ChainSpec, Role,
+//! DatabaseSource, KeepBlocks, TransactionStorageMode, ChainSpec, Role,
//! config::{NetworkConfiguration, KeystoreConfig},
//! };
//! use std::sync::Arc;
diff --git a/substrate/test-utils/test-runner/src/utils.rs b/substrate/test-utils/test-runner/src/utils.rs
index e0176fcb6c..2fe3a98d44 100644
--- a/substrate/test-utils/test-runner/src/utils.rs
+++ b/substrate/test-utils/test-runner/src/utils.rs
@@ -25,7 +25,7 @@ use sc_network::{
multiaddr,
};
use sc_service::{
- config::KeystoreConfig, BasePath, ChainSpec, Configuration, DatabaseConfig, KeepBlocks,
+ config::KeystoreConfig, BasePath, ChainSpec, Configuration, DatabaseSource, KeepBlocks,
TaskExecutor, TaskType, TransactionStorageMode,
};
use sp_keyring::sr25519::Keyring::Alice;
@@ -79,7 +79,7 @@ pub fn default_config(
transaction_pool: Default::default(),
network: network_config,
keystore: KeystoreConfig::Path { path: root_path.join("key"), password: None },
- database: DatabaseConfig::RocksDb { path: root_path.join("db"), cache_size: 128 },
+ database: DatabaseSource::RocksDb { path: root_path.join("db"), cache_size: 128 },
state_cache_size: 16777216,
state_cache_child_ratio: None,
chain_spec,
diff --git a/substrate/utils/browser/src/lib.rs b/substrate/utils/browser/src/lib.rs
index 0870ea8429..6cd35f22bf 100644
--- a/substrate/utils/browser/src/lib.rs
+++ b/substrate/utils/browser/src/lib.rs
@@ -27,7 +27,7 @@ use log::{debug, info};
use sc_chain_spec::Extension;
use sc_network::config::TransportConfig;
use sc_service::{
- config::{DatabaseConfig, KeystoreConfig, NetworkConfiguration},
+ config::{DatabaseSource, KeystoreConfig, NetworkConfiguration},
Configuration, GenericChainSpec, KeepBlocks, Role, RpcHandlers, RpcSession, RuntimeGenesis,
TaskManager, TransactionStorageMode,
};
@@ -83,7 +83,7 @@ where
info!("Opening Indexed DB database '{}'...", name);
let db = kvdb_memorydb::create(10);
- DatabaseConfig::Custom(sp_database::as_database(db))
+ DatabaseSource::Custom(sp_database::as_database(db))
},
keystore_remote: Default::default(),
keystore: KeystoreConfig::InMemory,