From f171e2aa10920f92d58c584ca1b7ee02c0558dc5 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Thu, 19 Mar 2026 17:11:52 +0100 Subject: [PATCH 1/8] feat: scheduled database snapshots via pg_dump (#23) Add opt-in daily pg_dump snapshots to protect against data loss. Runs as a background tokio task with configurable time, retention, and output directory. Includes integration test for dump/restore round-trip. --- .env.example | 7 + backend/Dockerfile | 2 +- backend/crates/atlas-server/Cargo.toml | 1 + backend/crates/atlas-server/src/config.rs | 145 ++++++++++++++ backend/crates/atlas-server/src/main.rs | 15 ++ backend/crates/atlas-server/src/snapshot.rs | 179 ++++++++++++++++++ .../atlas-server/tests/integration/common.rs | 10 +- .../atlas-server/tests/integration/main.rs | 1 + .../tests/integration/snapshots.rs | 90 +++++++++ docker-compose.yml | 6 + 10 files changed, 453 insertions(+), 3 deletions(-) create mode 100644 backend/crates/atlas-server/src/snapshot.rs create mode 100644 backend/crates/atlas-server/tests/integration/snapshots.rs diff --git a/.env.example b/.env.example index d554b30..aab2171 100644 --- a/.env.example +++ b/.env.example @@ -55,3 +55,10 @@ ENABLE_DA_TRACKING=false # FAUCET_PRIVATE_KEY=0x... # FAUCET_AMOUNT=0.01 # FAUCET_COOLDOWN_MINUTES=30 + +# Optional snapshot feature (daily pg_dump backups) +# SNAPSHOT_ENABLED=false +# SNAPSHOT_TIME=03:00 # UTC time (HH:MM) to run daily pg_dump +# SNAPSHOT_RETENTION=7 # Number of snapshot files to keep +# SNAPSHOT_DIR=/snapshots # Container path for snapshots +# SNAPSHOT_HOST_DIR=./snapshots # Host path mounted to SNAPSHOT_DIR diff --git a/backend/Dockerfile b/backend/Dockerfile index a912b69..99e9cfd 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -11,7 +11,7 @@ RUN cargo build --release # Server image FROM alpine:3.21 AS server -RUN apk add --no-cache ca-certificates +RUN apk add --no-cache ca-certificates postgresql16-client COPY --from=builder /app/target/release/atlas-server /usr/local/bin/ diff --git a/backend/crates/atlas-server/Cargo.toml b/backend/crates/atlas-server/Cargo.toml index 08c8e82..0d594e6 100644 --- a/backend/crates/atlas-server/Cargo.toml +++ b/backend/crates/atlas-server/Cargo.toml @@ -46,3 +46,4 @@ tokio = { workspace = true } tower = { workspace = true, features = ["util"] } serde_json = { workspace = true } sqlx = { workspace = true } +tempfile = "3" diff --git a/backend/crates/atlas-server/src/config.rs b/backend/crates/atlas-server/src/config.rs index 1114654..74a4fc3 100644 --- a/backend/crates/atlas-server/src/config.rs +++ b/backend/crates/atlas-server/src/config.rs @@ -1,6 +1,7 @@ use alloy::primitives::U256; use alloy::signers::local::PrivateKeySigner; use anyhow::{bail, Context, Result}; +use chrono::NaiveTime; use std::{env, str::FromStr}; #[cfg(test)] @@ -379,6 +380,72 @@ impl FaucetConfig { } } +#[derive(Clone)] +pub struct SnapshotConfig { + pub enabled: bool, + pub time: NaiveTime, + pub retention: u32, + pub dir: String, + pub database_url: String, +} + +impl std::fmt::Debug for SnapshotConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SnapshotConfig") + .field("enabled", &self.enabled) + .field("time", &self.time) + .field("retention", &self.retention) + .field("dir", &self.dir) + .field("database_url", &"[redacted]") + .finish() + } +} + +impl SnapshotConfig { + pub fn from_env(database_url: &str) -> Result { + let enabled = env::var("SNAPSHOT_ENABLED") + .unwrap_or_else(|_| "false".to_string()) + .parse::() + .context("Invalid SNAPSHOT_ENABLED")?; + + if !enabled { + return Ok(Self { + enabled, + time: NaiveTime::from_hms_opt(3, 0, 0).unwrap(), + retention: 7, + dir: "/snapshots".to_string(), + database_url: database_url.to_string(), + }); + } + + let time_str = env::var("SNAPSHOT_TIME").unwrap_or_else(|_| "03:00".to_string()); + let time = NaiveTime::parse_from_str(&time_str, "%H:%M") + .context("Invalid SNAPSHOT_TIME (expected HH:MM)")?; + + let retention = env::var("SNAPSHOT_RETENTION") + .unwrap_or_else(|_| "7".to_string()) + .parse::() + .context("Invalid SNAPSHOT_RETENTION")?; + if retention == 0 { + bail!("SNAPSHOT_RETENTION must be greater than 0"); + } + + let dir = env::var("SNAPSHOT_DIR").unwrap_or_else(|_| "/snapshots".to_string()); + let dir = dir.trim().to_string(); + if dir.is_empty() { + bail!("SNAPSHOT_DIR must not be empty"); + } + + Ok(Self { + enabled, + time, + retention, + dir, + database_url: database_url.to_string(), + }) + } +} + fn parse_optional_env(val: Option) -> Option { val.map(|s| s.trim().to_string()).filter(|s| !s.is_empty()) } @@ -835,6 +902,84 @@ mod tests { ); } + fn clear_snapshot_env() { + env::remove_var("SNAPSHOT_ENABLED"); + env::remove_var("SNAPSHOT_TIME"); + env::remove_var("SNAPSHOT_RETENTION"); + env::remove_var("SNAPSHOT_DIR"); + } + + #[test] + fn snapshot_config_defaults_disabled() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + + let config = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap(); + assert!(!config.enabled); + assert_eq!(config.time, NaiveTime::from_hms_opt(3, 0, 0).unwrap()); + assert_eq!(config.retention, 7); + assert_eq!(config.dir, "/snapshots"); + } + + #[test] + fn snapshot_config_parses_valid_time() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + + for (input, hour, minute) in [("00:00", 0, 0), ("03:00", 3, 0), ("23:59", 23, 59)] { + env::set_var("SNAPSHOT_TIME", input); + let config = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap(); + assert_eq!( + config.time, + NaiveTime::from_hms_opt(hour, minute, 0).unwrap(), + "failed for input {input}" + ); + } + clear_snapshot_env(); + } + + #[test] + fn snapshot_config_rejects_invalid_time() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + + for val in ["25:00", "abc", "12:60"] { + env::set_var("SNAPSHOT_TIME", val); + let err = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap_err(); + assert!( + err.to_string().contains("Invalid SNAPSHOT_TIME"), + "expected error for {val}, got: {err}" + ); + } + clear_snapshot_env(); + } + + #[test] + fn snapshot_config_rejects_zero_retention() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + env::set_var("SNAPSHOT_RETENTION", "0"); + + let err = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap_err(); + assert!(err.to_string().contains("must be greater than 0")); + clear_snapshot_env(); + } + + #[test] + fn snapshot_config_custom_dir() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + env::set_var("SNAPSHOT_DIR", "/data/backups"); + + let config = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap(); + assert_eq!(config.dir, "/data/backups"); + clear_snapshot_env(); + } + #[test] fn faucet_config_rejects_bad_inputs() { let _lock = ENV_LOCK.lock().unwrap(); diff --git a/backend/crates/atlas-server/src/main.rs b/backend/crates/atlas-server/src/main.rs index 14d932c..109a926 100644 --- a/backend/crates/atlas-server/src/main.rs +++ b/backend/crates/atlas-server/src/main.rs @@ -14,6 +14,7 @@ mod config; mod faucet; mod head; mod indexer; +mod snapshot; /// Retry delays for exponential backoff (in seconds) const RETRY_DELAYS: &[u64] = &[5, 10, 20, 30, 60]; @@ -224,6 +225,7 @@ async fn run(args: cli::RunArgs) -> Result<()> { let config = config::Config::from_run_args(args.clone())?; let faucet_config = config::FaucetConfig::from_faucet_args(&args.faucet)?; + let snapshot_config = config::SnapshotConfig::from_env(&config.database_url)?; let faucet = if faucet_config.enabled { tracing::info!("Faucet enabled"); @@ -338,6 +340,19 @@ async fn run(args: cli::RunArgs) -> Result<()> { } }); + // Spawn snapshot scheduler if enabled + if snapshot_config.enabled { + tracing::info!("Snapshot scheduler enabled"); + tokio::spawn(async move { + if let Err(e) = + run_with_retry(|| snapshot::run_snapshot_loop(snapshot_config.clone())).await + { + tracing::error!("Snapshot scheduler terminated with error: {}", e); + } + }); + } + + let app = api::build_router(state, config.cors_origin.clone()); let addr = format!("{}:{}", config.api_host, config.api_port); tracing::info!("API listening on {}", addr); diff --git a/backend/crates/atlas-server/src/snapshot.rs b/backend/crates/atlas-server/src/snapshot.rs new file mode 100644 index 0000000..4aa5cef --- /dev/null +++ b/backend/crates/atlas-server/src/snapshot.rs @@ -0,0 +1,179 @@ +use anyhow::{bail, Result}; +use chrono::{DateTime, NaiveTime, Utc}; +use std::time::Duration; + +use crate::config::SnapshotConfig; + +/// Calculate duration from `now` until the next occurrence of `target` time (UTC). +/// If `target` has already passed today, returns the duration until tomorrow's `target`. +fn duration_until_next(target: NaiveTime, now: DateTime) -> Duration { + let today_target = now.date_naive().and_time(target).and_utc(); + let next = if today_target > now { + today_target + } else { + today_target + chrono::Duration::days(1) + }; + (next - now).to_std().expect("positive duration") +} + +/// Run the snapshot scheduler loop. Returns Err on failure (caller retries with backoff). +pub async fn run_snapshot_loop(config: SnapshotConfig) -> Result<()> { + tracing::info!( + time = %config.time.format("%H:%M"), + retention = config.retention, + dir = %config.dir, + "Snapshot scheduler started" + ); + + tokio::fs::create_dir_all(&config.dir).await?; + + loop { + let sleep_dur = duration_until_next(config.time, Utc::now()); + tracing::info!( + seconds = sleep_dur.as_secs(), + "Sleeping until next snapshot" + ); + tokio::time::sleep(sleep_dur).await; + + let timestamp = Utc::now().format("%Y-%m-%dT%H-%M-%S"); + let filename = format!("atlas_snapshot_{timestamp}.dump"); + let tmp_path = format!("{}/{filename}.tmp", config.dir); + let final_path = format!("{}/{filename}", config.dir); + + tracing::info!(%filename, "Starting database snapshot"); + + let status = tokio::process::Command::new("pg_dump") + .arg("--dbname") + .arg(&config.database_url) + .arg("-Fc") + .arg("-f") + .arg(&tmp_path) + .status() + .await?; + + if status.success() { + tokio::fs::rename(&tmp_path, &final_path).await?; + tracing::info!(%filename, "Snapshot complete"); + cleanup_old_snapshots(&config.dir, config.retention).await; + } else { + let _ = tokio::fs::remove_file(&tmp_path).await; + bail!("pg_dump failed with status: {status}"); + } + } +} + +/// Remove old snapshot files, keeping only the newest `retention` count. +async fn cleanup_old_snapshots(dir: &str, retention: u32) { + let mut files = Vec::new(); + let Ok(mut entries) = tokio::fs::read_dir(dir).await else { + return; + }; + + while let Ok(Some(entry)) = entries.next_entry().await { + let name = entry.file_name(); + let name = name.to_string_lossy(); + if name.starts_with("atlas_snapshot_") && name.ends_with(".dump") && !name.ends_with(".tmp") + { + files.push(entry.path()); + } + } + + // Sort descending (newest first) — timestamp in filename gives lexicographic order + files.sort(); + files.reverse(); + + for old in files.into_iter().skip(retention as usize) { + tracing::info!(path = %old.display(), "Removing old snapshot"); + if let Err(e) = tokio::fs::remove_file(&old).await { + tracing::warn!(path = %old.display(), error = %e, "Failed to remove old snapshot"); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::TimeZone; + + #[test] + fn duration_until_next_target_in_future_today() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 10, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(15, 0, 0).unwrap(); + let dur = duration_until_next(target, now); + assert_eq!(dur, Duration::from_secs(5 * 3600)); // 5 hours + } + + #[test] + fn duration_until_next_target_already_passed() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 16, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(3, 0, 0).unwrap(); + let dur = duration_until_next(target, now); + assert_eq!(dur, Duration::from_secs(11 * 3600)); // 11 hours until 03:00 next day + } + + #[test] + fn duration_until_next_target_exactly_now_wraps_to_tomorrow() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 3, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(3, 0, 0).unwrap(); + let dur = duration_until_next(target, now); + assert_eq!(dur, Duration::from_secs(24 * 3600)); // full 24 hours + } + + #[tokio::test] + async fn cleanup_keeps_only_retention_count() { + let dir = tempfile::tempdir().unwrap(); + let dir_path = dir.path().to_str().unwrap(); + + // Create 5 snapshot files with different timestamps + for i in 1..=5 { + let path = dir + .path() + .join(format!("atlas_snapshot_2026-03-{i:02}T03-00-00.dump")); + tokio::fs::write(&path, b"test").await.unwrap(); + } + + // Also create a .tmp file that should be ignored + let tmp = dir + .path() + .join("atlas_snapshot_2026-03-06T03-00-00.dump.tmp"); + tokio::fs::write(&tmp, b"tmp").await.unwrap(); + + cleanup_old_snapshots(dir_path, 3).await; + + let mut remaining = Vec::new(); + let mut entries = tokio::fs::read_dir(dir_path).await.unwrap(); + while let Some(entry) = entries.next_entry().await.unwrap() { + remaining.push(entry.file_name().to_string_lossy().to_string()); + } + remaining.sort(); + + // Should keep 3 newest + the .tmp file + assert_eq!(remaining.len(), 4); + assert!(remaining.contains(&"atlas_snapshot_2026-03-03T03-00-00.dump".to_string())); + assert!(remaining.contains(&"atlas_snapshot_2026-03-04T03-00-00.dump".to_string())); + assert!(remaining.contains(&"atlas_snapshot_2026-03-05T03-00-00.dump".to_string())); + assert!(remaining.contains(&"atlas_snapshot_2026-03-06T03-00-00.dump.tmp".to_string())); + } + + #[tokio::test] + async fn cleanup_noop_when_under_retention() { + let dir = tempfile::tempdir().unwrap(); + let dir_path = dir.path().to_str().unwrap(); + + for i in 1..=2 { + let path = dir + .path() + .join(format!("atlas_snapshot_2026-03-{i:02}T03-00-00.dump")); + tokio::fs::write(&path, b"test").await.unwrap(); + } + + cleanup_old_snapshots(dir_path, 5).await; + + let mut count = 0; + let mut entries = tokio::fs::read_dir(dir_path).await.unwrap(); + while entries.next_entry().await.unwrap().is_some() { + count += 1; + } + assert_eq!(count, 2); + } +} diff --git a/backend/crates/atlas-server/tests/integration/common.rs b/backend/crates/atlas-server/tests/integration/common.rs index da128f6..ad8b4a0 100644 --- a/backend/crates/atlas-server/tests/integration/common.rs +++ b/backend/crates/atlas-server/tests/integration/common.rs @@ -13,6 +13,7 @@ use atlas_server::head::HeadTracker; struct TestEnv { runtime: tokio::runtime::Runtime, pool: PgPool, + database_url: String, _container: ContainerAsync, } @@ -20,7 +21,7 @@ struct TestEnv { static ENV: LazyLock = LazyLock::new(|| { let runtime = tokio::runtime::Runtime::new().expect("create test runtime"); - let (pool, container) = runtime.block_on(async { + let (pool, container, database_url) = runtime.block_on(async { let container = Postgres::default() .start() .await @@ -42,12 +43,13 @@ static ENV: LazyLock = LazyLock::new(|| { .await .expect("Failed to run migrations"); - (pool, container) + (pool, container, database_url) }); TestEnv { runtime, pool, + database_url, _container: container, } }); @@ -56,6 +58,10 @@ pub fn pool() -> &'static PgPool { &ENV.pool } +pub fn database_url() -> &'static str { + &ENV.database_url +} + pub fn test_router() -> Router { let pool = pool().clone(); let head_tracker = Arc::new(HeadTracker::empty(10)); diff --git a/backend/crates/atlas-server/tests/integration/main.rs b/backend/crates/atlas-server/tests/integration/main.rs index 92e9a0d..085f736 100644 --- a/backend/crates/atlas-server/tests/integration/main.rs +++ b/backend/crates/atlas-server/tests/integration/main.rs @@ -5,6 +5,7 @@ mod blocks; mod nfts; mod schema; mod search; +mod snapshots; mod status; mod tokens; mod transactions; diff --git a/backend/crates/atlas-server/tests/integration/snapshots.rs b/backend/crates/atlas-server/tests/integration/snapshots.rs new file mode 100644 index 0000000..1558368 --- /dev/null +++ b/backend/crates/atlas-server/tests/integration/snapshots.rs @@ -0,0 +1,90 @@ +use crate::common; + +/// Check if a command is available on PATH. +fn has_command(cmd: &str) -> bool { + std::process::Command::new("which") + .arg(cmd) + .output() + .map(|o| o.status.success()) + .unwrap_or(false) +} + +#[test] +#[ignore] // Requires pg_dump and pg_restore on PATH +fn snapshot_dump_and_restore_round_trip() { + if !has_command("pg_dump") || !has_command("pg_restore") { + eprintln!("Skipping: pg_dump/pg_restore not found on PATH"); + return; + } + + common::run(async { + let pool = common::pool(); + let db_url = common::database_url(); + + // Insert test data + sqlx::query("INSERT INTO indexer_state (key, value) VALUES ('snapshot_test', 'hello') ON CONFLICT (key) DO UPDATE SET value = 'hello'") + .execute(pool) + .await + .expect("insert test data"); + + // pg_dump to temp file + let dir = tempfile::tempdir().expect("create temp dir"); + let dump_path = dir.path().join("test_snapshot.dump"); + + let dump_status = tokio::process::Command::new("pg_dump") + .arg("--dbname") + .arg(db_url) + .arg("-Fc") + .arg("-f") + .arg(&dump_path) + .status() + .await + .expect("spawn pg_dump"); + + assert!(dump_status.success(), "pg_dump failed: {dump_status}"); + + let metadata = tokio::fs::metadata(&dump_path).await.expect("stat dump file"); + assert!(metadata.len() > 0, "dump file is empty"); + + // Create a separate database for restore + sqlx::query("CREATE DATABASE test_restore") + .execute(pool) + .await + .expect("create test_restore database"); + + let restore_url = db_url.replace("/postgres", "/test_restore"); + + // pg_restore into the new database + let restore_status = tokio::process::Command::new("pg_restore") + .arg("--dbname") + .arg(&restore_url) + .arg(&dump_path) + .status() + .await + .expect("spawn pg_restore"); + + assert!(restore_status.success(), "pg_restore failed: {restore_status}"); + + // Verify data in restored database + let restore_pool = sqlx::postgres::PgPoolOptions::new() + .max_connections(1) + .connect(&restore_url) + .await + .expect("connect to restored database"); + + let row: (String,) = + sqlx::query_as("SELECT value FROM indexer_state WHERE key = 'snapshot_test'") + .fetch_one(&restore_pool) + .await + .expect("query restored data"); + + assert_eq!(row.0, "hello"); + + // Cleanup + restore_pool.close().await; + sqlx::query("DROP DATABASE test_restore") + .execute(pool) + .await + .expect("drop test_restore database"); + }); +} diff --git a/docker-compose.yml b/docker-compose.yml index 4ce3d37..a5c0d2e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -43,9 +43,15 @@ services: BACKGROUND_COLOR_LIGHT: ${BACKGROUND_COLOR_LIGHT:-} SUCCESS_COLOR: ${SUCCESS_COLOR:-} ERROR_COLOR: ${ERROR_COLOR:-} + SNAPSHOT_ENABLED: ${SNAPSHOT_ENABLED:-false} + SNAPSHOT_TIME: ${SNAPSHOT_TIME:-03:00} + SNAPSHOT_RETENTION: ${SNAPSHOT_RETENTION:-7} + SNAPSHOT_DIR: ${SNAPSHOT_DIR:-/snapshots} API_HOST: 0.0.0.0 API_PORT: 3000 RUST_LOG: atlas_server=info,tower_http=info + volumes: + - ${SNAPSHOT_HOST_DIR:-./snapshots}:/snapshots depends_on: postgres: condition: service_healthy From fa1565f33dbc5b37a4859b0c9038cb36929104e4 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Thu, 19 Mar 2026 17:31:37 +0100 Subject: [PATCH 2/8] fix snapshot retries and CI formatting --- backend/Dockerfile | 4 +- backend/crates/atlas-server/src/snapshot.rs | 122 +++++++++++++----- .../tests/integration/snapshots.rs | 9 +- docker-compose.yml | 2 +- 4 files changed, 103 insertions(+), 34 deletions(-) diff --git a/backend/Dockerfile b/backend/Dockerfile index 99e9cfd..da3a4d2 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -15,7 +15,9 @@ RUN apk add --no-cache ca-certificates postgresql16-client COPY --from=builder /app/target/release/atlas-server /usr/local/bin/ -RUN addgroup -S atlas && adduser -S atlas -G atlas +RUN addgroup -S atlas && adduser -S atlas -G atlas \ + && mkdir -p /snapshots \ + && chown atlas:atlas /snapshots USER atlas EXPOSE 3000 diff --git a/backend/crates/atlas-server/src/snapshot.rs b/backend/crates/atlas-server/src/snapshot.rs index 4aa5cef..c145edc 100644 --- a/backend/crates/atlas-server/src/snapshot.rs +++ b/backend/crates/atlas-server/src/snapshot.rs @@ -4,6 +4,9 @@ use std::time::Duration; use crate::config::SnapshotConfig; +const SNAPSHOT_RETRY_DELAYS: &[u64] = &[5, 10, 20, 30, 60]; +const SNAPSHOT_MAX_RETRY_DELAY: u64 = 60; + /// Calculate duration from `now` until the next occurrence of `target` time (UTC). /// If `target` has already passed today, returns the duration until tomorrow's `target`. fn duration_until_next(target: NaiveTime, now: DateTime) -> Duration { @@ -16,7 +19,54 @@ fn duration_until_next(target: NaiveTime, now: DateTime) -> Duration { (next - now).to_std().expect("positive duration") } -/// Run the snapshot scheduler loop. Returns Err on failure (caller retries with backoff). +fn retry_delay(attempt: usize) -> Duration { + Duration::from_secs( + SNAPSHOT_RETRY_DELAYS + .get(attempt) + .copied() + .unwrap_or(SNAPSHOT_MAX_RETRY_DELAY), + ) +} + +fn sleep_duration(target: NaiveTime, now: DateTime, retry_attempt: Option) -> Duration { + retry_attempt + .map(retry_delay) + .unwrap_or_else(|| duration_until_next(target, now)) +} + +async fn attempt_snapshot(config: &SnapshotConfig) -> Result<()> { + tokio::fs::create_dir_all(&config.dir).await?; + + let timestamp = Utc::now().format("%Y-%m-%dT%H-%M-%S"); + let filename = format!("atlas_snapshot_{timestamp}.dump"); + let tmp_path = format!("{}/{filename}.tmp", config.dir); + let final_path = format!("{}/{filename}", config.dir); + + tracing::info!(%filename, "Starting database snapshot"); + + let status = tokio::process::Command::new("pg_dump") + .arg("--dbname") + .arg(&config.database_url) + .arg("-Fc") + .arg("-f") + .arg(&tmp_path) + .status() + .await?; + + if status.success() { + tokio::fs::rename(&tmp_path, &final_path).await?; + tracing::info!(%filename, "Snapshot complete"); + cleanup_old_snapshots(&config.dir, config.retention).await; + Ok(()) + } else { + let _ = tokio::fs::remove_file(&tmp_path).await; + bail!("pg_dump failed with status: {status}"); + } +} + +/// Run the snapshot scheduler loop. +/// Snapshot attempts retry with backoff within the same scheduled run so a +/// transient failure does not skip the day entirely. pub async fn run_snapshot_loop(config: SnapshotConfig) -> Result<()> { tracing::info!( time = %config.time.format("%H:%M"), @@ -25,39 +75,35 @@ pub async fn run_snapshot_loop(config: SnapshotConfig) -> Result<()> { "Snapshot scheduler started" ); - tokio::fs::create_dir_all(&config.dir).await?; + let mut retry_attempt = None; loop { - let sleep_dur = duration_until_next(config.time, Utc::now()); - tracing::info!( - seconds = sleep_dur.as_secs(), - "Sleeping until next snapshot" - ); + let sleep_dur = sleep_duration(config.time, Utc::now(), retry_attempt); + if let Some(attempt) = retry_attempt { + tracing::warn!( + attempt = attempt + 1, + seconds = sleep_dur.as_secs(), + "Retrying failed snapshot after backoff" + ); + } else { + tracing::info!( + seconds = sleep_dur.as_secs(), + "Sleeping until next snapshot" + ); + } tokio::time::sleep(sleep_dur).await; - let timestamp = Utc::now().format("%Y-%m-%dT%H-%M-%S"); - let filename = format!("atlas_snapshot_{timestamp}.dump"); - let tmp_path = format!("{}/{filename}.tmp", config.dir); - let final_path = format!("{}/{filename}", config.dir); - - tracing::info!(%filename, "Starting database snapshot"); - - let status = tokio::process::Command::new("pg_dump") - .arg("--dbname") - .arg(&config.database_url) - .arg("-Fc") - .arg("-f") - .arg(&tmp_path) - .status() - .await?; - - if status.success() { - tokio::fs::rename(&tmp_path, &final_path).await?; - tracing::info!(%filename, "Snapshot complete"); - cleanup_old_snapshots(&config.dir, config.retention).await; - } else { - let _ = tokio::fs::remove_file(&tmp_path).await; - bail!("pg_dump failed with status: {status}"); + match attempt_snapshot(&config).await { + Ok(()) => retry_attempt = None, + Err(err) => { + let next_attempt = retry_attempt.map(|attempt| attempt + 1).unwrap_or(0); + tracing::error!( + error = %err, + attempt = next_attempt + 1, + "Snapshot attempt failed" + ); + retry_attempt = Some(next_attempt); + } } } } @@ -119,6 +165,22 @@ mod tests { assert_eq!(dur, Duration::from_secs(24 * 3600)); // full 24 hours } + #[test] + fn sleep_duration_uses_schedule_when_not_retrying() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 16, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(3, 0, 0).unwrap(); + let dur = sleep_duration(target, now, None); + assert_eq!(dur, Duration::from_secs(11 * 3600)); + } + + #[test] + fn sleep_duration_uses_retry_backoff_after_failure() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 16, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(3, 0, 0).unwrap(); + let dur = sleep_duration(target, now, Some(0)); + assert_eq!(dur, Duration::from_secs(5)); + } + #[tokio::test] async fn cleanup_keeps_only_retention_count() { let dir = tempfile::tempdir().unwrap(); diff --git a/backend/crates/atlas-server/tests/integration/snapshots.rs b/backend/crates/atlas-server/tests/integration/snapshots.rs index 1558368..8304bfa 100644 --- a/backend/crates/atlas-server/tests/integration/snapshots.rs +++ b/backend/crates/atlas-server/tests/integration/snapshots.rs @@ -43,7 +43,9 @@ fn snapshot_dump_and_restore_round_trip() { assert!(dump_status.success(), "pg_dump failed: {dump_status}"); - let metadata = tokio::fs::metadata(&dump_path).await.expect("stat dump file"); + let metadata = tokio::fs::metadata(&dump_path) + .await + .expect("stat dump file"); assert!(metadata.len() > 0, "dump file is empty"); // Create a separate database for restore @@ -63,7 +65,10 @@ fn snapshot_dump_and_restore_round_trip() { .await .expect("spawn pg_restore"); - assert!(restore_status.success(), "pg_restore failed: {restore_status}"); + assert!( + restore_status.success(), + "pg_restore failed: {restore_status}" + ); // Verify data in restored database let restore_pool = sqlx::postgres::PgPoolOptions::new() diff --git a/docker-compose.yml b/docker-compose.yml index a5c0d2e..d9f8dbc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -51,7 +51,7 @@ services: API_PORT: 3000 RUST_LOG: atlas_server=info,tower_http=info volumes: - - ${SNAPSHOT_HOST_DIR:-./snapshots}:/snapshots + - ${SNAPSHOT_HOST_DIR:-./snapshots}:${SNAPSHOT_DIR:-/snapshots} depends_on: postgres: condition: service_healthy From 8417d8564358ada4aee1f60b93512cdbac18db53 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Fri, 20 Mar 2026 09:46:43 +0100 Subject: [PATCH 3/8] harden snapshot config handling --- .env.example | 2 ++ backend/crates/atlas-server/src/config.rs | 27 +++++++++++++++++++++++ docker-compose.yml | 1 + 3 files changed, 30 insertions(+) diff --git a/.env.example b/.env.example index aab2171..100e153 100644 --- a/.env.example +++ b/.env.example @@ -62,3 +62,5 @@ ENABLE_DA_TRACKING=false # SNAPSHOT_RETENTION=7 # Number of snapshot files to keep # SNAPSHOT_DIR=/snapshots # Container path for snapshots # SNAPSHOT_HOST_DIR=./snapshots # Host path mounted to SNAPSHOT_DIR +# UID=1000 # Optional: host UID for writable snapshot bind mounts +# GID=1000 # Optional: host GID for writable snapshot bind mounts diff --git a/backend/crates/atlas-server/src/config.rs b/backend/crates/atlas-server/src/config.rs index 74a4fc3..16d41da 100644 --- a/backend/crates/atlas-server/src/config.rs +++ b/backend/crates/atlas-server/src/config.rs @@ -980,6 +980,33 @@ mod tests { clear_snapshot_env(); } + #[test] + fn snapshot_config_rejects_empty_dir() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + env::set_var("SNAPSHOT_DIR", " "); + + let err = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap_err(); + assert!(err.to_string().contains("SNAPSHOT_DIR must not be empty")); + clear_snapshot_env(); + } + + #[test] + fn snapshot_config_debug_redacts_database_url() { + let config = SnapshotConfig { + enabled: true, + time: NaiveTime::from_hms_opt(3, 0, 0).unwrap(), + retention: 7, + dir: "/snapshots".to_string(), + database_url: "postgres://atlas:secret@db/atlas".to_string(), + }; + + let debug = format!("{config:?}"); + assert!(debug.contains("[redacted]")); + assert!(!debug.contains("secret")); + } + #[test] fn faucet_config_rejects_bad_inputs() { let _lock = ENV_LOCK.lock().unwrap(); diff --git a/docker-compose.yml b/docker-compose.yml index d9f8dbc..98159b3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -50,6 +50,7 @@ services: API_HOST: 0.0.0.0 API_PORT: 3000 RUST_LOG: atlas_server=info,tower_http=info + user: "${UID:-1000}:${GID:-1000}" volumes: - ${SNAPSHOT_HOST_DIR:-./snapshots}:${SNAPSHOT_DIR:-/snapshots} depends_on: From 487d20e9370764dfa7e5f9ad758afd8d56676456 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Mon, 23 Mar 2026 16:27:01 +0100 Subject: [PATCH 4/8] ignore local snapshot artifacts --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 9bc3dd2..7125d61 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,9 @@ Thumbs.db *.log logs/ +# Local snapshot test artifacts +snapshots/ + # Node (frontend) frontend/node_modules/ frontend/dist/ From 1aec6194495ea696b07ad590be2f6c1ffda01283 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Tue, 31 Mar 2026 11:47:46 +0200 Subject: [PATCH 5/8] fix: pin atlas user to uid/gid 1000 to match compose default --- backend/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/Dockerfile b/backend/Dockerfile index da3a4d2..6b80c2f 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -15,7 +15,7 @@ RUN apk add --no-cache ca-certificates postgresql16-client COPY --from=builder /app/target/release/atlas-server /usr/local/bin/ -RUN addgroup -S atlas && adduser -S atlas -G atlas \ +RUN addgroup -S -g 1000 atlas && adduser -S -u 1000 atlas -G atlas \ && mkdir -p /snapshots \ && chown atlas:atlas /snapshots USER atlas From 93dc63c98925f6903d5f16bdae0e2de8d89ce716 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Tue, 31 Mar 2026 11:51:55 +0200 Subject: [PATCH 6/8] refactor: share postgres_connection_config between snapshot and db CLI commands --- backend/crates/atlas-server/src/main.rs | 58 +++++++++++++-------- backend/crates/atlas-server/src/snapshot.rs | 9 ++-- 2 files changed, 40 insertions(+), 27 deletions(-) diff --git a/backend/crates/atlas-server/src/main.rs b/backend/crates/atlas-server/src/main.rs index 109a926..61d16db 100644 --- a/backend/crates/atlas-server/src/main.rs +++ b/backend/crates/atlas-server/src/main.rs @@ -35,11 +35,29 @@ fn required_db_url(db_url: &str) -> Result<&str> { Ok(db_url) } -struct PostgresConnectionConfig { - database_name: String, - env_vars: Vec<(&'static str, String)>, +pub(crate) struct PostgresConnectionConfig { + pub(crate) database_name: String, + pub(crate) env_vars: Vec<(&'static str, String)>, } +const PG_ENV_VARS: &[&str] = &[ + "PGHOST", + "PGHOSTADDR", + "PGPORT", + "PGUSER", + "PGPASSWORD", + "PGDATABASE", + "PGSERVICE", + "PGSSLMODE", + "PGSSLCERT", + "PGSSLKEY", + "PGSSLROOTCERT", + "PGSSLCRL", + "PGAPPNAME", + "PGOPTIONS", + "PGCONNECT_TIMEOUT", +]; + fn set_pg_env(env_vars: &mut Vec<(&'static str, String)>, key: &'static str, value: &str) { if value.is_empty() { return; @@ -54,7 +72,7 @@ fn set_pg_env(env_vars: &mut Vec<(&'static str, String)>, key: &'static str, val } } -fn postgres_connection_config(db_url: &str) -> Result { +pub(crate) fn postgres_connection_config(db_url: &str) -> Result { let url = reqwest::Url::parse(required_db_url(db_url)?).context("Invalid DATABASE_URL")?; match url.scheme() { "postgres" | "postgresql" => {} @@ -142,23 +160,21 @@ fn postgres_connection_config(db_url: &str) -> Result fn postgres_command(program: &str, config: &PostgresConnectionConfig) -> std::process::Command { let mut command = std::process::Command::new(program); - for env_var in [ - "PGHOST", - "PGHOSTADDR", - "PGPORT", - "PGUSER", - "PGPASSWORD", - "PGDATABASE", - "PGSERVICE", - "PGSSLMODE", - "PGSSLCERT", - "PGSSLKEY", - "PGSSLROOTCERT", - "PGSSLCRL", - "PGAPPNAME", - "PGOPTIONS", - "PGCONNECT_TIMEOUT", - ] { + for env_var in PG_ENV_VARS { + command.env_remove(env_var); + } + for (key, value) in &config.env_vars { + command.env(key, value); + } + command +} + +pub(crate) fn postgres_command_async( + program: &str, + config: &PostgresConnectionConfig, +) -> tokio::process::Command { + let mut command = tokio::process::Command::new(program); + for env_var in PG_ENV_VARS { command.env_remove(env_var); } for (key, value) in &config.env_vars { diff --git a/backend/crates/atlas-server/src/snapshot.rs b/backend/crates/atlas-server/src/snapshot.rs index c145edc..346faff 100644 --- a/backend/crates/atlas-server/src/snapshot.rs +++ b/backend/crates/atlas-server/src/snapshot.rs @@ -44,12 +44,9 @@ async fn attempt_snapshot(config: &SnapshotConfig) -> Result<()> { tracing::info!(%filename, "Starting database snapshot"); - let status = tokio::process::Command::new("pg_dump") - .arg("--dbname") - .arg(&config.database_url) - .arg("-Fc") - .arg("-f") - .arg(&tmp_path) + let pg_config = crate::postgres_connection_config(&config.database_url)?; + let status = crate::postgres_command_async("pg_dump", &pg_config) + .args(["-Fc", "-f", tmp_path.as_str()]) .status() .await?; From 302692d7bb2e7126e62a4ed66525823d7309a61a Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Tue, 31 Mar 2026 13:07:36 +0200 Subject: [PATCH 7/8] Fix snapshot dump portability --- backend/crates/atlas-server/src/main.rs | 32 +++++++++++++++++++-- backend/crates/atlas-server/src/snapshot.rs | 4 +-- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/backend/crates/atlas-server/src/main.rs b/backend/crates/atlas-server/src/main.rs index 61d16db..a796f2a 100644 --- a/backend/crates/atlas-server/src/main.rs +++ b/backend/crates/atlas-server/src/main.rs @@ -19,6 +19,7 @@ mod snapshot; /// Retry delays for exponential backoff (in seconds) const RETRY_DELAYS: &[u64] = &[5, 10, 20, 30, 60]; const MAX_RETRY_DELAY: u64 = 60; +const PORTABLE_PG_DUMP_FLAGS: &[&str] = &["--format=custom", "--no-owner", "--no-acl"]; fn init_tracing(filter: &str) { tracing_subscriber::registry() @@ -183,6 +184,24 @@ pub(crate) fn postgres_command_async( command } +fn portable_pg_dump_command( + program: &str, + config: &PostgresConnectionConfig, +) -> std::process::Command { + let mut command = postgres_command(program, config); + command.args(PORTABLE_PG_DUMP_FLAGS); + command +} + +pub(crate) fn portable_pg_dump_command_async( + program: &str, + config: &PostgresConnectionConfig, +) -> tokio::process::Command { + let mut command = postgres_command_async(program, config); + command.args(PORTABLE_PG_DUMP_FLAGS); + command +} + fn parse_chain_id(hex: &str) -> Option { u64::from_str_radix(hex.trim_start_matches("0x"), 16).ok() } @@ -368,7 +387,6 @@ async fn run(args: cli::RunArgs) -> Result<()> { }); } - let app = api::build_router(state, config.cors_origin.clone()); let addr = format!("{}:{}", config.api_host, config.api_port); tracing::info!("API listening on {}", addr); @@ -404,8 +422,8 @@ async fn check(args: cli::RunArgs) -> Result<()> { fn cmd_db_dump(db_url: &str, output: &str) -> Result<()> { let config = postgres_connection_config(db_url)?; - let status = postgres_command("pg_dump", &config) - .args(["--format=custom", "--file", output]) + let status = portable_pg_dump_command("pg_dump", &config) + .args(["--file", output]) .status() .map_err(|e| anyhow::anyhow!("Failed to run pg_dump (is it installed?): {e}"))?; @@ -656,4 +674,12 @@ mod tests { assert_eq!(env_value(&config, "PGPASSWORD"), Some("query-pass")); assert_eq!(env_value(&config, "PGDATABASE"), Some("query_db")); } + + #[test] + fn portable_pg_dump_flags_omit_source_ownership_and_acls() { + assert_eq!( + PORTABLE_PG_DUMP_FLAGS, + ["--format=custom", "--no-owner", "--no-acl"] + ); + } } diff --git a/backend/crates/atlas-server/src/snapshot.rs b/backend/crates/atlas-server/src/snapshot.rs index 346faff..0a7f2cf 100644 --- a/backend/crates/atlas-server/src/snapshot.rs +++ b/backend/crates/atlas-server/src/snapshot.rs @@ -45,8 +45,8 @@ async fn attempt_snapshot(config: &SnapshotConfig) -> Result<()> { tracing::info!(%filename, "Starting database snapshot"); let pg_config = crate::postgres_connection_config(&config.database_url)?; - let status = crate::postgres_command_async("pg_dump", &pg_config) - .args(["-Fc", "-f", tmp_path.as_str()]) + let status = crate::portable_pg_dump_command_async("pg_dump", &pg_config) + .args(["--file", tmp_path.as_str()]) .status() .await?; From a076445de54c1a824587bfcf528f9b4c5e128543 Mon Sep 17 00:00:00 2001 From: pthmas <9058370+pthmas@users.noreply.github.com> Date: Tue, 31 Mar 2026 14:44:29 +0200 Subject: [PATCH 8/8] Fix CLI env parsing and restore flow --- backend/crates/atlas-server/src/cli.rs | 30 +++++++++++++++- backend/crates/atlas-server/src/config.rs | 25 ++++++++++++- backend/crates/atlas-server/src/main.rs | 43 ++++++++++++++++++++++- 3 files changed, 95 insertions(+), 3 deletions(-) diff --git a/backend/crates/atlas-server/src/cli.rs b/backend/crates/atlas-server/src/cli.rs index 24bdf52..7f26e59 100644 --- a/backend/crates/atlas-server/src/cli.rs +++ b/backend/crates/atlas-server/src/cli.rs @@ -310,7 +310,7 @@ pub struct FaucetArgs { value_name = "MINS", help = "Cooldown period in minutes between faucet requests per address" )] - pub cooldown_minutes: Option, + pub cooldown_minutes: Option, // FAUCET_PRIVATE_KEY is intentionally env-only (security: never pass secrets as CLI flags) } @@ -371,6 +371,34 @@ pub struct LogArgs { pub level: String, } +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn empty_faucet_cooldown_env_is_ignored_when_faucet_disabled() { + unsafe { + std::env::set_var("DATABASE_URL", "postgres://atlas:atlas@localhost/atlas"); + std::env::set_var("RPC_URL", "http://localhost:8545"); + std::env::set_var("FAUCET_ENABLED", "false"); + std::env::set_var("FAUCET_COOLDOWN_MINUTES", ""); + } + + let cli = Cli::try_parse_from(["atlas-server", "run"]).expect("parse cli"); + match cli.command { + Command::Run(args) => assert_eq!(args.faucet.cooldown_minutes, Some(String::new())), + _ => panic!("expected run command"), + } + + unsafe { + std::env::remove_var("DATABASE_URL"); + std::env::remove_var("RPC_URL"); + std::env::remove_var("FAUCET_ENABLED"); + std::env::remove_var("FAUCET_COOLDOWN_MINUTES"); + } + } +} + // ── db subcommand ───────────────────────────────────────────────────────────── #[derive(Args)] diff --git a/backend/crates/atlas-server/src/config.rs b/backend/crates/atlas-server/src/config.rs index 16d41da..1f17dbd 100644 --- a/backend/crates/atlas-server/src/config.rs +++ b/backend/crates/atlas-server/src/config.rs @@ -359,11 +359,14 @@ impl FaucetConfig { bail!("faucet amount must be greater than 0"); } - let cooldown_minutes = args.cooldown_minutes.ok_or_else(|| { + let cooldown_minutes = parse_optional_env(args.cooldown_minutes.clone()).ok_or_else(|| { anyhow::anyhow!( "--atlas.faucet.cooldown-minutes (or FAUCET_COOLDOWN_MINUTES) must be set when faucet is enabled" ) })?; + let cooldown_minutes = cooldown_minutes + .parse::() + .context("Invalid --atlas.faucet.cooldown-minutes / FAUCET_COOLDOWN_MINUTES")?; if cooldown_minutes == 0 { bail!("faucet cooldown must be greater than 0"); } @@ -645,6 +648,26 @@ mod tests_from_run_args { assert!(config.accent_color.is_none()); assert_eq!(config.success_color.as_deref(), Some("#00ff00")); } + + #[test] + fn faucet_blank_cooldown_is_treated_as_missing() { + let mut args = minimal_run_args(); + args.faucet.enabled = true; + args.faucet.amount = Some("0.1".to_string()); + args.faucet.cooldown_minutes = Some(" ".to_string()); + + unsafe { + env::set_var( + "FAUCET_PRIVATE_KEY", + "0x59c6995e998f97a5a0044966f0945382dbd8c5df5440d8d6d0d0f66f6d7d6a0d", + ); + } + let err = FaucetConfig::from_faucet_args(&args.faucet).unwrap_err(); + assert!(err.to_string().contains("cooldown-minutes")); + unsafe { + env::remove_var("FAUCET_PRIVATE_KEY"); + } + } } #[cfg(test)] diff --git a/backend/crates/atlas-server/src/main.rs b/backend/crates/atlas-server/src/main.rs index a796f2a..0a4e0d4 100644 --- a/backend/crates/atlas-server/src/main.rs +++ b/backend/crates/atlas-server/src/main.rs @@ -20,6 +20,13 @@ mod snapshot; const RETRY_DELAYS: &[u64] = &[5, 10, 20, 30, 60]; const MAX_RETRY_DELAY: u64 = 60; const PORTABLE_PG_DUMP_FLAGS: &[&str] = &["--format=custom", "--no-owner", "--no-acl"]; +const PORTABLE_PG_RESTORE_FLAGS: &[&str] = &[ + "--format=custom", + "--no-owner", + "--no-acl", + "--exit-on-error", +]; +const RESET_DB_FOR_RESTORE_SQL: &str = "DROP SCHEMA public CASCADE; CREATE SCHEMA public;"; fn init_tracing(filter: &str) { tracing_subscriber::registry() @@ -436,10 +443,27 @@ fn cmd_db_dump(db_url: &str, output: &str) -> Result<()> { fn cmd_db_restore(db_url: &str, input: &str) -> Result<()> { let config = postgres_connection_config(db_url)?; + + let reset_status = postgres_command("psql", &config) + .arg("--dbname") + .arg(&config.database_name) + .arg("-v") + .arg("ON_ERROR_STOP=1") + .arg("-c") + .arg(RESET_DB_FOR_RESTORE_SQL) + .status() + .map_err(|e| { + anyhow::anyhow!("Failed to run psql before restore (is it installed?): {e}") + })?; + if !reset_status.success() { + anyhow::bail!("psql exited with status {reset_status}"); + } + let status = postgres_command("pg_restore", &config) .arg("--dbname") .arg(&config.database_name) - .args(["--format=custom", "--clean", "--if-exists", input]) + .args(PORTABLE_PG_RESTORE_FLAGS) + .arg(input) .status() .map_err(|e| anyhow::anyhow!("Failed to run pg_restore (is it installed?): {e}"))?; @@ -682,4 +706,21 @@ mod tests { ["--format=custom", "--no-owner", "--no-acl"] ); } + + #[test] + fn portable_pg_restore_prepares_clean_schema_and_exits_on_first_error() { + assert_eq!( + PORTABLE_PG_RESTORE_FLAGS, + [ + "--format=custom", + "--no-owner", + "--no-acl", + "--exit-on-error" + ] + ); + assert_eq!( + RESET_DB_FOR_RESTORE_SQL, + "DROP SCHEMA public CASCADE; CREATE SCHEMA public;" + ); + } }