diff --git a/CHANGELOG.md b/CHANGELOG.md index 78b42de7..9f79f241 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,21 @@ file is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Changed + +- Enhanced to also support postgresql for backup `create`/`list`/`restore` + /`recover`. + +### Added + +- Introduced a new module `postgres`. This module was added to allow the + existing backup `creat`/`list`/`restore`/`recover` function to also support + postgresql DBs. +- Added `backup::purge_old_backups` for apply immediately after + `num_backups_to_keep` is changed. + ## [0.15.1] - 2023-06-26 ### Added diff --git a/Cargo.toml b/Cargo.toml index e7727949..9cf875dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ ipnet = { version = "2", features = ["serde"] } num-derive = "0.3" num-traits = "0.2" postgres-protocol = "0.6" +regex = "1" rand = "0.8" ring = { version = "0.16", features = ["std"] } rocksdb = "0.21" @@ -39,6 +40,7 @@ serde_json = "1" structured = "0.13" strum = "0.24" strum_macros = "0.24" +tar = "0.4" thiserror = "1" tokio = { version = "1", features = ["macros"] } tokio-postgres-rustls = "0.10" diff --git a/src/backup.rs b/src/backup.rs index 09cc4b38..61a89714 100644 --- a/src/backup.rs +++ b/src/backup.rs @@ -1,13 +1,23 @@ //! Database backup utilities. +mod postgresql; -use crate::Store; -use anyhow::Result; +#[allow(clippy::module_name_repetitions)] +pub use self::postgresql::BackupConfig; +use self::postgresql::{create_backup_path, purge_old_postgres_backups}; +use crate::{ + backup::postgresql::{postgres_backup, postgres_backup_list, postgres_restore}, + Store, +}; +use anyhow::{anyhow, Result}; use chrono::{DateTime, TimeZone, Utc}; use rocksdb::backup::BackupEngineInfo; -use std::{sync::Arc, time::Duration}; +use std::{path::Path, sync::Arc, time::Duration}; use tokio::sync::{Notify, RwLock}; -use tracing::{info, warn}; +use tracing::info; + +const BACKUP_TIMESTAMP_CONV_UNIT: i64 = 1_000_000_000; +#[derive(Debug, Clone)] #[allow(clippy::module_name_repetitions)] pub struct BackupInfo { pub id: u32, @@ -19,7 +29,7 @@ impl From for BackupInfo { fn from(backup: BackupEngineInfo) -> Self { Self { id: backup.backup_id, - timestamp: Utc.timestamp_nanos(backup.timestamp), + timestamp: Utc.timestamp_nanos(backup.timestamp * BACKUP_TIMESTAMP_CONV_UNIT), size: backup.size, } } @@ -29,8 +39,8 @@ impl From for BackupInfo { #[allow(clippy::module_name_repetitions)] pub async fn schedule_periodic( store: Arc>, + backup_cfg: Arc>, schedule: (Duration, Duration), - backups_to_keep: u32, stop: Arc, ) { use tokio::time::{sleep, Instant}; @@ -43,11 +53,13 @@ pub async fn schedule_periodic( tokio::select! { () = &mut sleep => { sleep.as_mut().reset(Instant::now() + duration); - let _res = create(&store, false, backups_to_keep); + let backup_cfg= backup_cfg.read().await.clone(); + let _res = create(&store, false, &backup_cfg); } _ = stop.notified() => { info!("creating a database backup before shutdown"); - let _res = create(&store, false, backups_to_keep); + let backup_cfg= backup_cfg.read().await.clone(); + let _res = create(&store, false, &backup_cfg); stop.notify_one(); return; } @@ -61,23 +73,26 @@ pub async fn schedule_periodic( /// # Errors /// /// Returns an error if backup fails. -pub async fn create(store: &Arc>, flush: bool, backups_to_keep: u32) -> Result<()> { - // TODO: This function should be expanded to support PostgreSQL backups as well. +pub async fn create( + store: &Arc>, + flush: bool, + backup_cfg: &BackupConfig, +) -> Result<()> { info!("backing up database..."); - let res = { - let mut store = store.write().await; - store.backup(flush, backups_to_keep) - }; - match res { - Ok(_) => { - info!("backing up database completed"); - Ok(()) - } - Err(e) => { - warn!("database backup failed: {:?}", e); - Err(e) + { + let mut backup_store = store.write().await; + if let Err(e) = backup_store.backup(flush, backup_cfg.num_of_backups) { + return Err(anyhow!("failed to create key-value database backup: {e:?}")); } } + + let backup_id_list = backup_id_list(store).await?; + if let Err(e) = postgres_backup(backup_cfg, backup_id_list) { + return Err(anyhow!( + "failed to create relational database backup: {e:?}" + )); + } + Ok(()) } /// Lists the backup information of the database. @@ -85,56 +100,98 @@ pub async fn create(store: &Arc>, flush: bool, backups_to_keep: u3 /// # Errors /// /// Returns an error if backup list fails to create -pub async fn list(store: &Arc>) -> Result> { - // TODO: This function should be expanded to support PostgreSQL backups as well. - let res = { - let store = store.read().await; - store.get_backup_info() - }; - match res { - Ok(backup_list) => { - info!("generate database backup list"); - Ok(backup_list - .into_iter() - .map(std::convert::Into::into) - .collect()) - } +pub async fn list(store: &Arc>, backup_path: &Path) -> Result> { + let store = store.read().await; + let backup_list = match store.get_backup_info() { + Ok(backup) => backup, Err(e) => { - warn!("failed to generate backup list: {:?}", e); - Err(e) + return Err(anyhow!("failed to generate key-value backup list: {e:?}")); } + }; + let mut backup_list: Vec = backup_list + .into_iter() + .map(std::convert::Into::into) + .collect(); + if let Err(e) = postgres_backup_list(backup_path, &mut backup_list) { + return Err(anyhow!( + "failed to add list information from a relational database: {e:?}" + )); } + Ok(backup_list) } -/// Restores the database from a backup with the specified ID. +/// Restores the database from a backup. If a backup file ID is not provided, +/// restore based on the latest backup. /// /// # Errors /// /// Returns an error if the restore operation fails. -pub async fn restore(store: &Arc>, backup_id: Option) -> Result<()> { - // TODO: This function should be expanded to support PostgreSQL backups as well. - info!("restoring database from {:?}", backup_id); - let res = { - let mut store = store.write().await; - match &backup_id { - Some(id) => store.restore_from_backup(*id), - None => store.restore_from_latest_backup(), +pub async fn restore( + store: &Arc>, + cfg: &BackupConfig, + backup_id: Option, +) -> Result { + let backup_id_list = backup_id_list(store).await?; + let backup_id = if let Some(id) = backup_id { + if !backup_id_list.contains(&id) { + return Err(anyhow!("backup {id} is not exist")); } + info!("start database restore {}", id); + id + } else { + let Some(id) = backup_id_list.last() else { + return Err(anyhow!("backup is not exist")); + }; + info!("start database restore from latest backup"); + *id }; - match res { - Ok(_) => { - info!("database restored from backup {:?}", backup_id); - Ok(()) - } - Err(e) => { - warn!( - "failed to restore database from backup {:?}: {:?}", - backup_id, e - ); - Err(e) + let mut store = store.write().await; + if let Err(e) = store.restore_from_backup(backup_id) { + return Err(anyhow!( + "failed to restore key-value database from {backup_id}: {e:?}" + )); + } + if let Err(e) = postgres_restore(cfg, backup_id) { + return Err(anyhow!( + "failed to restore relational database from {backup_id}: {e:?}" + )); + } + Ok(backup_id) +} + +/// Returns the number of backups in the backup list. +/// +/// # Errors +/// +/// Returns an error if getting the number of backup lists fails. +pub async fn count(store: &Arc>) -> Result { + let store = store.write().await; + Ok(store.get_backup_info()?.len()) +} + +/// Remove older backups based on the number of backups retained. +/// +/// # Errors +/// +/// Returns an error if removing old backup fails. +pub async fn purge_old_backups( + store: &Arc>, + backup_cfg: &BackupConfig, +) -> Result<()> { + { + let mut backup_store = store.write().await; + if let Err(e) = backup_store.purge_old_backups(backup_cfg.num_of_backups) { + return Err(anyhow!("failed to purge key-value database: {e:?}")); } } + + let backup_id_list = backup_id_list(store).await?; + let data_backup_path = create_backup_path(backup_cfg)?; + if let Err(e) = purge_old_postgres_backups(&data_backup_path, backup_id_list) { + return Err(anyhow!("failed to purge relational database: {e:?}")); + } + Ok(()) } /// Restores the database from a backup with the specified ID. @@ -142,23 +199,38 @@ pub async fn restore(store: &Arc>, backup_id: Option) -> Resu /// # Errors /// /// Returns an error if the restore operation fails. -pub async fn recover(store: &Arc>) -> Result<()> { - // TODO: This function should be expanded to support PostgreSQL backups as well. +pub async fn recover(store: &Arc>, cfg: &BackupConfig) -> Result { info!("recovering database from latest valid backup"); - let res = { - let mut store = store.write().await; - store.recover() - }; - match res { - Ok(_) => { - info!("database recovered from backup"); - Ok(()) - } + let mut store = store.write().await; + let recovery_id = match store.recover() { + Ok(id) => id, Err(e) => { - warn!("failed to recover database from backup: {e:?}"); - Err(e) + return Err(anyhow!( + "failed to recover key-value database from backup: {e:?}" + )); } + }; + + if let Err(e) = postgres_restore(cfg, recovery_id) { + return Err(anyhow!( + "failed to recover relational database from backup {e:?}" + )); + } + Ok(recovery_id) +} + +/// Lists the backup id. +/// +/// # Errors +/// +/// Returns an error if backup id list fails to create +#[allow(clippy::module_name_repetitions)] +pub async fn backup_id_list(store: &Arc>) -> Result> { + let store = store.read().await; + match store.get_backup_info() { + Ok(backup) => Ok(backup.into_iter().map(|b| b.backup_id).collect()), + Err(e) => Err(anyhow!("failed to generate backup id list: {e:?}")), } } @@ -250,7 +322,9 @@ mod tests { } // get backup list - let backup_list = list(&store).await.unwrap(); + let backup_list = list(&store, &backup_dir.path().to_path_buf()) + .await + .unwrap(); assert_eq!(backup_list.len(), 3); assert_eq!(backup_list.get(0).unwrap().id, 1); assert_eq!(backup_list.get(1).unwrap().id, 2); diff --git a/src/backup/postgresql.rs b/src/backup/postgresql.rs new file mode 100644 index 00000000..7f60a53c --- /dev/null +++ b/src/backup/postgresql.rs @@ -0,0 +1,471 @@ +use super::BackupInfo; +use anyhow::{anyhow, Context, Result}; +use flate2::{read::GzDecoder, write::GzEncoder, Compression}; +use regex::Regex; +use std::collections::HashSet; +use std::fs::{self, File}; +use std::path::{Path, PathBuf}; +use std::process::{Command, Stdio}; + +const DEFAULT_PATH_ENV: &str = "/usr/bin:/bin"; +const DEFAULT_POSTGRES_DB: &str = "database.db"; +const DEFAULT_POSTGRES_DUMP_FILE: &str = "postgres.dump"; +const DEFAULT_POSTGRES_TEMP_DIR: &str = "/tmp"; +const DEFAULT_HOST_TEMP_DIR: &str = "/data/review/tmp"; // "/data/review" folder should always be pre-created. +const DEFAULT_ZIP_DIR: &str = "data"; + +#[derive(Clone, Debug)] +pub struct BackupConfig { + pub backup_path: PathBuf, + pub postgres_db_path: String, + pub postgres_db_dirname: String, + pub container: String, + pub host: String, + pub port: String, + pub user: String, + pub name: String, + pub num_of_backups: u32, +} + +impl BackupConfig { + #[must_use] + pub fn builder() -> BackupConfigBuilder { + BackupConfigBuilder::default() + } +} + +#[derive(Default)] +pub struct BackupConfigBuilder { + backup_path: PathBuf, + postgres_db_path: String, + postgres_db_dirname: String, + container: String, + host: String, + port: String, + user: String, + name: String, + num_of_backups: u32, +} + +impl BackupConfigBuilder { + pub fn backup_path(mut self, backup_path: &Path) -> Self { + self.backup_path = backup_path.to_path_buf(); + self + } + + pub fn container(mut self, container: &str) -> Self { + self.container = container.to_string(); + self + } + + pub fn num_of_backup(mut self, num_of_backups: u32) -> Self { + self.num_of_backups = num_of_backups; + self + } + + pub fn database_dir(mut self, postgres_path: &Path) -> Result { + let Some(postgres_db_path) = postgres_path.to_str() else { + return Err(anyhow!("Failed to parse databse dir path")); + }; + self.postgres_db_path = postgres_db_path.to_string(); + let split_path = postgres_db_path.split('/').collect::>(); + let Some(postgres_db_dirname) = split_path.last() else{ + return Err(anyhow!("Failed to parse databse dir name")); + }; + self.postgres_db_dirname = (*postgres_db_dirname).to_string(); + Ok(self) + } + + pub fn database_url(mut self, database_url: &str) -> Result { + let Ok(reg) = Regex::new(r"postgres://(\w+):(\w+)@([\w\.-]+):(\d+)/(\w+)") else { + return Err(anyhow!("Failed to generate Regex")); + }; + let Some(caps) = reg.captures(database_url) else { + return Err(anyhow!("Failed to capture url")); + }; + self.user = caps[1].to_string(); + self.host = caps[3].to_string(); + self.port = caps[4].to_string(); + self.name = caps[5].to_string(); + Ok(self) + } + + pub fn build(self) -> BackupConfig { + BackupConfig { + backup_path: self.backup_path, + postgres_db_path: self.postgres_db_path, + postgres_db_dirname: self.postgres_db_dirname, + container: self.container, + host: self.host, + port: self.port, + user: self.user, + name: self.name, + num_of_backups: self.num_of_backups, + } + } +} + +/// Restore postgres database from the backup with `backup_id` on file +/// +/// # Errors +/// +/// Returns an error when postgres's restoration fails. +pub(super) fn postgres_restore(cfg: &BackupConfig, backup_id: u32) -> Result<()> { + let file_name = format!("{backup_id}.bck"); + let restore_path = cfg.backup_path.join(DEFAULT_POSTGRES_DB).join(file_name); + if !restore_path.exists() { + return Err(anyhow!("backup file not found")); + } + if !Path::new(&cfg.postgres_db_path).exists() { + return Err(anyhow!("Running database not found!")); + } + restore_data(cfg, &restore_path)?; + Ok(()) +} + +/// Return the integrated backup information by adding the postgres backup +/// information to the backup information in rocksdb where the `backup_id` matches. +/// +/// # Errors +/// +/// Returns an error when postgres fails to create a list. +pub(super) fn postgres_backup_list( + backup_path: &Path, + backup_list: &mut [BackupInfo], +) -> Result<()> { + let data_backup_path = format!("{}/{}", backup_path.to_string_lossy(), DEFAULT_POSTGRES_DB); + detail_files(&data_backup_path, backup_list)?; + Ok(()) +} + +/// Backup postgres database and purge the backup by referring the backup +/// list maintained by rocksdb. +/// +/// # Errors +/// +/// Returns an error when postgres's backup fails. +pub(super) fn postgres_backup(cfg: &BackupConfig, backup_id_list: Vec) -> Result<()> { + let Some(new_backup_id) = backup_id_list.last() else { + return Err(anyhow!("backup is not exist")); + }; + + //create backup + let data_backup_path = create_postgres_backup(cfg, *new_backup_id)?; + + //purge old backups + purge_old_postgres_backups(&data_backup_path, backup_id_list)?; + Ok(()) +} + +/// purge the backup by referring the backup list maintained by rocksdb. +/// +/// # Errors +/// +/// Returns an error when postgres's purge fails. +pub(super) fn purge_old_postgres_backups( + data_backup_path: &str, + backup_id_list: Vec, +) -> Result<()> { + let files = fs::read_dir(Path::new(&data_backup_path))?; + + let file_list: HashSet = files + .into_iter() + .filter_map(|file| { + file.ok().and_then(|dir_entry| { + let file_name = dir_entry.file_name(); + file_name.to_str().map(std::string::ToString::to_string) + }) + }) + .collect(); + + let backuped_id_list: HashSet = backup_id_list + .into_iter() + .map(|id| format!("{id}.bck")) + .collect(); + + let diff_list = file_list.difference(&backuped_id_list); + for diff in diff_list { + let file_path = format!("/{data_backup_path}/{diff}"); + fs::remove_file(&file_path)?; + } + Ok(()) +} + +/// # Errors +/// * dump command not found +/// * fail to dump +fn create_postgres_backup(cfg: &BackupConfig, new_backup_id: u32) -> Result { + // check database path + if !Path::new(&cfg.postgres_db_path).exists() { + return Err(anyhow!("No database found")); + } + + //create backup folder + let Ok(data_backup_path) = create_backup_path(cfg) else { + return Err(anyhow!("Backup folder creation failed")); + }; + + // mkdir default temporary folder + if !Path::new(DEFAULT_HOST_TEMP_DIR).exists() && fs::create_dir(DEFAULT_HOST_TEMP_DIR).is_err() + { + return Err(anyhow!("Host backup temporary folder creation failed")); + } + + // mkdir temporary folder + let tmpdir = format!("{DEFAULT_HOST_TEMP_DIR}/{new_backup_id}"); + if fs::create_dir(&tmpdir).is_err() { + return Err(anyhow!("Backup temporary folder creation failed")); + } + + // backup postgres db + let dump = format!("{tmpdir}/{DEFAULT_POSTGRES_DUMP_FILE}"); + + // dump docker's postgres db + if let Err(e) = postgres_dump_docker(cfg, &dump) { + remove_tmpdir_all(&tmpdir)?; + return Err(e); + } + + // make new backup zip file + let zip_path = format!("/{data_backup_path}/{new_backup_id}.bck"); + if let Err(e) = tar_gz(&tmpdir, &zip_path) { + remove_tmpdir_all(&tmpdir)?; + return Err(e); + } + + //remove temporary files in /tmp + remove_tmpdir_all(&tmpdir)?; + + Ok(data_backup_path) +} + +/// # Errors +/// * dump command not found +/// * fail to dump +fn postgres_dump_docker(cfg: &BackupConfig, to: &str) -> Result<()> { + let dump_file = format!("{DEFAULT_POSTGRES_TEMP_DIR}/{DEFAULT_POSTGRES_DUMP_FILE}"); + let args = vec!["exec", "-i", &cfg.container, "/bin/rm", "-f", &dump_file]; + + run_command("docker", None, &args) + .with_context(|| anyhow!("failed to remove old dump file"))?; + + let args = vec![ + "exec", + "-i", + &cfg.container, + "pg_dump", + "-w", + "-h", + &cfg.host, + "-p", + &cfg.port, + "-U", + &cfg.user, + "-d", + &cfg.name, + "-Fc", + "-f", + &dump_file, + ]; + run_command("docker", None, &args) + .with_context(|| anyhow!("failed to make backup for relational database"))?; + + let from = format!("{}:{dump_file}", &cfg.container); + run_command("docker", None, &["cp", &from, to]) + .with_context(|| anyhow!("failed to copy dump file"))?; + + Ok(()) +} + +/// # Errors +/// * restore command not found +/// * fail to restore +fn postgres_restore_docker(cfg: &BackupConfig, dump: &str) -> Result<()> { + // copy current database folder (For manual restore) + if let Err(e) = run_command( + "cp", + None, + &["-r", &cfg.postgres_db_path, DEFAULT_HOST_TEMP_DIR], + ) { + return Err(anyhow!("fail to copy old database. {:?}", e)); + } + + // copy backup file into postgres docker container + let dump_in_docker = format!("{DEFAULT_POSTGRES_TEMP_DIR}/{DEFAULT_POSTGRES_DUMP_FILE}"); + let to = format!("{}:{dump_in_docker}", cfg.container); + run_command("docker", None, &["cp", dump, &to])?; + + // restore dump file + let args = vec![ + "exec", + "-i", + &cfg.container, + "pg_restore", + "-c", + "-h", + &cfg.host, + "-p", + &cfg.port, + "-U", + &cfg.user, + "-d", + &cfg.name, + &dump_in_docker, + ]; + run_command("docker", None, &args)?; + Ok(()) +} + +/// remove temporary files. +/// +/// # Errors +/// * failed to remove dir/files +fn remove_tmpdir_all(path: &str) -> Result<()> { + if path.starts_with(DEFAULT_HOST_TEMP_DIR) && fs::remove_dir_all(path).is_err() { + return Err(anyhow!("Backup temporary folder deletion failed")); + } + Ok(()) +} + +/// # Errors +/// * failed to create backup directory +pub(super) fn create_backup_path(cfg: &BackupConfig) -> Result { + let data_backup_path = format!( + "{}/{}", + cfg.backup_path.to_string_lossy(), + DEFAULT_POSTGRES_DB + ); + if !Path::new(&data_backup_path).exists() { + fs::create_dir(&data_backup_path)?; + } + Ok(data_backup_path) +} + +/// # Errors +/// * get error code from executed command +fn run_command(cmd: &str, path: Option<&[&str]>, args: &[&str]) -> Result<()> { + let mut cmd = Command::new(cmd); + let val = if let Some(path) = path { + let mut temp = DEFAULT_PATH_ENV.to_string(); + for p in path { + temp.push(':'); + temp.push_str(p); + } + temp + } else { + DEFAULT_PATH_ENV.to_string() + }; + cmd.env("PATH", &val); + for arg in args { + if !arg.is_empty() { + cmd.arg(arg); + } + } + + let child = cmd + .stderr(Stdio::inherit()) + .stdout(Stdio::inherit()) + .spawn()?; + + match child.wait_with_output() { + Ok(status) => { + let output_err = String::from_utf8_lossy(&status.stderr); + if !output_err.is_empty() { + return Err(anyhow!("{output_err}")); + } + Ok(()) + } + Err(e) => Err(anyhow::anyhow!("{}", e)), + } +} + +/// tar and gzip files +/// # Errors +/// * fail to create backup file in backup path +/// * fail to save the files in tmp foler into backup file +/// * fail to finish for the tar and gzipped backup file +fn tar_gz(from: &str, to: &str) -> Result<(), anyhow::Error> { + let tgz = match File::create(to) { + Ok(ret) => ret, + Err(e) => return Err(anyhow!("failed to create new backup file. {}", e)), + }; + let encoder = GzEncoder::new(tgz, Compression::default()); + let mut tar = tar::Builder::new(encoder); + tar.append_dir_all(DEFAULT_ZIP_DIR, from) + .with_context(|| anyhow!("failed to write data into backup file"))?; + tar.finish() + .with_context(|| anyhow!("failed to finish backup"))?; + Ok(()) +} + +/// unzip and untar backup file +/// # Errors +/// * fail to open tar.gz file +/// * fail to untar or unzip +fn untar_unzip(from: &PathBuf, to: &str) -> Result<(), anyhow::Error> { + let tgz = File::open(from)?; + let decoder = GzDecoder::new(tgz); + let mut archive = tar::Archive::new(decoder); + archive.unpack(to)?; + Ok(()) +} + +/// # Errors +/// * fail to read path & metadata +fn detail_files(dir: &str, backup_list: &mut [BackupInfo]) -> Result<()> { + if let Ok(paths) = fs::read_dir(dir) { + for path in paths.flatten() { + let filepath = path.path(); + let metadata = fs::metadata(filepath)?; + if let Some(filename) = path.path().file_name() { + if let Some(filename) = filename.to_str() { + if let Some((id, _)) = filename.split_once('.') { + if let Ok(id) = id.parse::() { + for backup in backup_list.iter_mut() { + if backup.id == id { + backup.size += metadata.len(); + } + } + } + } + } + } + } + } else { + fs::create_dir(dir)?; + } + Ok(()) +} + +/// # Errors +/// * fail to extract +fn restore_data(cfg: &BackupConfig, from: &PathBuf) -> Result<()> { + if !Path::new(DEFAULT_HOST_TEMP_DIR).exists() && fs::create_dir(DEFAULT_HOST_TEMP_DIR).is_err() + { + return Err(anyhow!("Host backup temporary folder creation failed")); + } + + let tmp_path = format!("{DEFAULT_HOST_TEMP_DIR}/{DEFAULT_ZIP_DIR}"); + if Path::new(&tmp_path).exists() { + remove_tmpdir_all(&tmp_path)?; + } + + if extract_to(from, DEFAULT_HOST_TEMP_DIR).is_err() { + return Err(anyhow!("backup file extraction failed")); + } + + let postgres_dump = format!("{tmp_path}/{DEFAULT_POSTGRES_DUMP_FILE}"); + if Path::new(&postgres_dump).exists() { + postgres_restore_docker(cfg, &postgres_dump)?; + } + remove_tmpdir_all(&tmp_path)?; + Ok(()) +} + +/// # Errors +/// * fail to untar or unzip +fn extract_to(from: &PathBuf, to: &str) -> Result<()> { + // extract backup file in "/tmp" + untar_unzip(from, to)?; + Ok(()) +} diff --git a/src/lib.rs b/src/lib.rs index 3cf5cf95..600e01eb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -345,7 +345,7 @@ impl Store { /// /// Returns an error when all the available backups are not valid /// for restoration. - pub fn recover(&mut self) -> Result<()> { + pub fn recover(&mut self) -> Result { self.states.recover() } diff --git a/src/tables.rs b/src/tables.rs index 068dec76..4549c7e1 100644 --- a/src/tables.rs +++ b/src/tables.rs @@ -80,7 +80,10 @@ impl StateDb { tracing::warn!("fail to open db {e:?}"); tracing::warn!("recovering from latest backup available"); - Self::recover_db(&path, &backup)? + match Self::recover_db(&path, &backup) { + Ok((db, _)) => db, + Err(e) => return Err(e), + } } else { return Err(e); } @@ -191,12 +194,12 @@ impl StateDb { Ok(()) } - pub fn recover(&mut self) -> Result<()> { + pub fn recover(&mut self) -> Result { self.close(); - let db = Self::recover_db(&self.db, &self.backup)?; + let (db, backup_id) = Self::recover_db(&self.db, &self.backup)?; self.inner = Some(db); - Ok(()) + Ok(backup_id) } fn close(&mut self) { @@ -226,7 +229,7 @@ impl StateDb { )?) } - fn recover_db(path: &Path, backup: &Path) -> Result { + fn recover_db(path: &Path, backup: &Path) -> Result<(rocksdb::OptimisticTransactionDB, u32)> { let mut engine = Self::open_backup_engine(backup)?; let available = engine.get_backup_info(); let restore_opts = rocksdb::backup::RestoreOptions::default(); @@ -235,7 +238,7 @@ impl StateDb { Ok(_) => match Self::open_db(path) { Ok(db) => { tracing::info!("restored from backup (id: {backup_id})"); - return Ok(db); + return Ok((db, backup_id)); } Err(e) => { tracing::warn!("opening restored backup (id: {backup_id}) failed {e:?}");