Remove the --unsafe-pruning CLI-argument (step 1) (#10995)

* sc-client-db: utils::open_database(...) — return OpenDbError so that the caller could tell the `OpenDbError::DoesNotExist` clearly

* sc-client-db: utils::open_database(..) — accept the `create: bool` argument

* sc-client-db: pruning — optional argument in the DatabaseSettings

* sc-state-db: Split `Error<E>` into separate `Error<E>` and `StateDbError`

* StateDb::open: choose the pruning-mode depending on the requested and stored values

* sc-state-db: test for different combinations of stored and requested pruning-modes

* CLI-argument: mark the unsafe-pruning as deprecated

* Fix tests

* tests: do not specify --pruning when running the substrate over the existing storage

* fix types for benches

* cargo fmt

* Check whether the pruning-mode and sync-mode are compatible

* cargo fmt

* parity-db: 0.3.11 -> 0.3.12

* sc-state-db: MetaDb::set_meta — a better doc-test

* cargo fmt

* make MetaDb read-only again!

* Remove the stray newline (and run the CI once again please)

* Last nitpicks

* A more comprehensive error message
This commit is contained in:
Roman Gafiyatullin
2022-05-06 13:07:44 +03:00
committed by GitHub
parent 994f8076b1
commit 729cba9d9e
23 changed files with 546 additions and 338 deletions
+94 -74
View File
@@ -23,7 +23,7 @@ use std::{fmt, fs, io, path::Path, sync::Arc};
use log::{debug, info};
use crate::{Database, DatabaseSettings, DatabaseSource, DbHash};
use crate::{Database, DatabaseSource, DbHash};
use codec::Decode;
use sp_database::Transaction;
use sp_runtime::{
@@ -177,41 +177,42 @@ where
})
}
fn backend_err(feat: &'static str) -> sp_blockchain::Error {
sp_blockchain::Error::Backend(feat.to_string())
}
/// Opens the configured database.
pub fn open_database<Block: BlockT>(
config: &DatabaseSettings,
db_source: &DatabaseSource,
db_type: DatabaseType,
) -> sp_blockchain::Result<Arc<dyn Database<DbHash>>> {
create: bool,
) -> OpenDbResult {
// Maybe migrate (copy) the database to a type specific subdirectory to make it
// possible that light and full databases coexist
// NOTE: This function can be removed in a few releases
maybe_migrate_to_type_subdir::<Block>(&config.source, db_type).map_err(|e| {
sp_blockchain::Error::Backend(format!("Error in migration to role subdirectory: {}", e))
})?;
maybe_migrate_to_type_subdir::<Block>(db_source, db_type)?;
open_database_at::<Block>(&config.source, db_type)
open_database_at::<Block>(db_source, db_type, create)
}
fn open_database_at<Block: BlockT>(
source: &DatabaseSource,
db_source: &DatabaseSource,
db_type: DatabaseType,
) -> sp_blockchain::Result<Arc<dyn Database<DbHash>>> {
let db: Arc<dyn Database<DbHash>> = match &source {
DatabaseSource::ParityDb { path } => open_parity_db::<Block>(path, db_type, true)?,
create: bool,
) -> OpenDbResult {
let db: Arc<dyn Database<DbHash>> = match &db_source {
DatabaseSource::ParityDb { path } => open_parity_db::<Block>(path, db_type, create)?,
DatabaseSource::RocksDb { path, cache_size } =>
open_kvdb_rocksdb::<Block>(path, db_type, true, *cache_size)?,
DatabaseSource::Custom(db) => db.clone(),
open_kvdb_rocksdb::<Block>(path, db_type, create, *cache_size)?,
DatabaseSource::Custom { db, require_create_flag } => {
if *require_create_flag && !create {
return Err(OpenDbError::DoesNotExist)
}
db.clone()
},
DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size } => {
// check if rocksdb exists first, if not, open paritydb
match open_kvdb_rocksdb::<Block>(rocksdb_path, db_type, false, *cache_size) {
Ok(db) => db,
Err(OpenDbError::NotEnabled(_)) | Err(OpenDbError::DoesNotExist) =>
open_parity_db::<Block>(paritydb_path, db_type, true)?,
Err(_) => return Err(backend_err("cannot open rocksdb. corrupted database")),
open_parity_db::<Block>(paritydb_path, db_type, create)?,
Err(as_is) => return Err(as_is),
}
},
};
@@ -221,12 +222,17 @@ fn open_database_at<Block: BlockT>(
}
#[derive(Debug)]
enum OpenDbError {
pub enum OpenDbError {
// constructed only when rocksdb and paritydb are disabled
#[allow(dead_code)]
NotEnabled(&'static str),
DoesNotExist,
Internal(String),
DatabaseError(sp_database::error::DatabaseError),
UnexpectedDbType {
expected: DatabaseType,
found: Vec<u8>,
},
}
type OpenDbResult = Result<Arc<dyn Database<DbHash>>, OpenDbError>;
@@ -239,6 +245,17 @@ impl fmt::Display for OpenDbError {
OpenDbError::NotEnabled(feat) => {
write!(f, "`{}` feature not enabled, database can not be opened", feat)
},
OpenDbError::DatabaseError(db_error) => {
write!(f, "Database Error: {}", db_error)
},
OpenDbError::UnexpectedDbType { expected, found } => {
write!(
f,
"Unexpected DB-Type. Expected: {:?}, Found: {:?}",
expected.as_str().as_bytes(),
found
)
},
}
}
}
@@ -356,19 +373,19 @@ fn open_kvdb_rocksdb<Block: BlockT>(
pub fn check_database_type(
db: &dyn Database<DbHash>,
db_type: DatabaseType,
) -> sp_blockchain::Result<()> {
) -> Result<(), OpenDbError> {
match db.get(COLUMN_META, meta_keys::TYPE) {
Some(stored_type) =>
if db_type.as_str().as_bytes() != &*stored_type {
return Err(sp_blockchain::Error::Backend(format!(
"Unexpected database type. Expected: {}",
db_type.as_str()
)))
return Err(OpenDbError::UnexpectedDbType {
expected: db_type,
found: stored_type.to_owned(),
})
},
None => {
let mut transaction = Transaction::new();
transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes());
db.commit(transaction)?;
db.commit(transaction).map_err(OpenDbError::DatabaseError)?;
},
}
@@ -378,7 +395,7 @@ pub fn check_database_type(
fn maybe_migrate_to_type_subdir<Block: BlockT>(
source: &DatabaseSource,
db_type: DatabaseType,
) -> io::Result<()> {
) -> Result<(), OpenDbError> {
if let Some(p) = source.path() {
let mut basedir = p.to_path_buf();
basedir.pop();
@@ -393,14 +410,14 @@ fn maybe_migrate_to_type_subdir<Block: BlockT>(
// database stored in the target directory and close the database on success.
let mut old_source = source.clone();
old_source.set_path(&basedir);
open_database_at::<Block>(&old_source, db_type)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
open_database_at::<Block>(&old_source, db_type, false)?;
info!(
"Migrating database to a database-type-based subdirectory: '{:?}' -> '{:?}'",
basedir,
basedir.join(db_type.as_str())
);
let mut tmp_dir = basedir.clone();
tmp_dir.pop();
tmp_dir.push("tmp");
@@ -580,9 +597,7 @@ impl<'a, 'b> codec::Input for JoinInput<'a, 'b> {
#[cfg(test)]
mod tests {
use super::*;
use crate::KeepBlocks;
use codec::Input;
use sc_state_db::PruningMode;
use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper};
use std::path::PathBuf;
type Block = RawBlock<ExtrinsicWrapper<u32>>;
@@ -601,18 +616,17 @@ mod tests {
let old_db_path = base_path.path().join("chains/dev/db");
source.set_path(&old_db_path);
let settings = db_settings(source.clone());
{
let db_res = open_database::<Block>(&settings, db_type);
let db_res = open_database::<Block>(&source, db_type, true);
assert!(db_res.is_ok(), "New database should be created.");
assert!(old_db_path.join(db_check_file).exists());
assert!(!old_db_path.join(db_type.as_str()).join("db_version").exists());
}
source.set_path(&old_db_path.join(db_type.as_str()));
let settings = db_settings(source);
let db_res = open_database::<Block>(&settings, db_type);
let db_res = open_database::<Block>(&source, db_type, true);
assert!(db_res.is_ok(), "Reopening the db with the same role should work");
// check if the database dir had been migrated
assert!(!old_db_path.join(db_check_file).exists());
@@ -638,9 +652,8 @@ mod tests {
let old_db_path = base_path.path().join("chains/dev/db");
let source = DatabaseSource::RocksDb { path: old_db_path.clone(), cache_size: 128 };
let settings = db_settings(source);
{
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "New database should be created.");
// check if the database dir had been migrated
@@ -689,16 +702,6 @@ mod tests {
assert_eq!(joined.remaining_len().unwrap(), Some(0));
}
fn db_settings(source: DatabaseSource) -> DatabaseSettings {
DatabaseSettings {
state_cache_size: 0,
state_cache_child_ratio: None,
state_pruning: PruningMode::ArchiveAll,
source,
keep_blocks: KeepBlocks::All,
}
}
#[cfg(feature = "with-parity-db")]
#[cfg(any(feature = "with-kvdb-rocksdb", test))]
#[test]
@@ -712,31 +715,36 @@ mod tests {
rocksdb_path: rocksdb_path.clone(),
cache_size: 128,
};
let mut settings = db_settings(source);
// it should create new auto (paritydb) database
{
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "New database should be created.");
}
// it should reopen existing auto (pairtydb) database
{
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "Existing parity database should be reopened");
}
// it should fail to open existing auto (pairtydb) database
{
settings.source = DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 };
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(
&DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "New database should be opened.");
}
// it should reopen existing auto (pairtydb) database
{
settings.source = DatabaseSource::ParityDb { path: paritydb_path };
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(
&DatabaseSource::ParityDb { path: paritydb_path },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "Existing parity database should be reopened");
}
}
@@ -751,36 +759,44 @@ mod tests {
let rocksdb_path = db_path.join("rocksdb_path");
let source = DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 };
let mut settings = db_settings(source);
// it should create new rocksdb database
{
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "New rocksdb database should be created");
}
// it should reopen existing auto (rocksdb) database
{
settings.source = DatabaseSource::Auto {
paritydb_path: paritydb_path.clone(),
rocksdb_path: rocksdb_path.clone(),
cache_size: 128,
};
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(
&DatabaseSource::Auto {
paritydb_path: paritydb_path.clone(),
rocksdb_path: rocksdb_path.clone(),
cache_size: 128,
},
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "Existing rocksdb database should be reopened");
}
// it should fail to open existing auto (rocksdb) database
{
settings.source = DatabaseSource::ParityDb { path: paritydb_path };
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(
&DatabaseSource::ParityDb { path: paritydb_path },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "New paritydb database should be created");
}
// it should reopen existing auto (pairtydb) database
{
settings.source = DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 };
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(
&DatabaseSource::RocksDb { path: rocksdb_path, cache_size: 128 },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "Existing rocksdb database should be reopened");
}
}
@@ -795,32 +811,36 @@ mod tests {
let rocksdb_path = db_path.join("rocksdb_path");
let source = DatabaseSource::ParityDb { path: paritydb_path.clone() };
let mut settings = db_settings(source);
// it should create new paritydb database
{
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "New database should be created.");
}
// it should reopen existing pairtydb database
{
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(&source, DatabaseType::Full, true);
assert!(db_res.is_ok(), "Existing parity database should be reopened");
}
// it should fail to open existing pairtydb database
{
settings.source =
DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 };
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(
&DatabaseSource::RocksDb { path: rocksdb_path.clone(), cache_size: 128 },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "New rocksdb database should be created");
}
// it should reopen existing auto (pairtydb) database
{
settings.source = DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size: 128 };
let db_res = open_database::<Block>(&settings, DatabaseType::Full);
let db_res = open_database::<Block>(
&DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size: 128 },
DatabaseType::Full,
true,
);
assert!(db_res.is_ok(), "Existing parity database should be reopened");
}
}