From 3528ff3c98b343cb552330a9104da257eaef905f Mon Sep 17 00:00:00 2001 From: Jan Michael Auer Date: Fri, 20 Mar 2026 09:14:55 +0100 Subject: [PATCH 1/5] test(service): Restructure bigtable.rs tests into labelled sections Reorganize 32 flat tests into 20 grouped tests across 5 sections. Merges paired happy/mismatch CAS tests, fills coverage gaps (put_non_tombstone scenarios, get_object TTI bump path, CAS-create object), and removes tests made redundant by the merged groups. --- objectstore-service/src/backend/bigtable.rs | 1182 ++++++++++--------- 1 file changed, 610 insertions(+), 572 deletions(-) diff --git a/objectstore-service/src/backend/bigtable.rs b/objectstore-service/src/backend/bigtable.rs index 1c33d09d..2137b26b 100644 --- a/objectstore-service/src/backend/bigtable.rs +++ b/objectstore-service/src/backend/bigtable.rs @@ -1128,7 +1128,7 @@ mod tests { use crate::id::ObjectContext; use crate::stream; - // NB: Not run most of these tests, you need to have a BigTable emulator running. This is done + // NB: Most of these tests require a BigTable emulator running. This is done // automatically in CI. // // Refer to the readme for how to set up the emulator. @@ -1151,6 +1151,78 @@ mod tests { }) } + /// Writes a legacy-format tombstone row directly into Bigtable. + async fn write_legacy_tombstone( + backend: &BigTableBackend, + id: &ObjectId, + expiration_policy: ExpirationPolicy, + time_expires: Option, + ) -> Result<()> { + let meta = if expiration_policy.is_manual() { + r#"{"is_redirect_tombstone":true}"#.to_owned() + } else { + let policy_json = serde_json::to_string(&expiration_policy).unwrap(); + format!(r#"{{"is_redirect_tombstone":true,"expiration_policy":{policy_json}}}"#) + }; + + let (family, timestamp_micros) = if expiration_policy.is_manual() { + (FAMILY_MANUAL, -1) + } else { + let t = + time_expires.unwrap_or(SystemTime::now() + expiration_policy.expires_in().unwrap()); + let timestamp = t + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_millis(); + (FAMILY_GC, timestamp as i64 * 1000) + }; + + backend + .mutate( + id.as_storage_path().to_string().into_bytes(), + [mutation::Mutation::SetCell(mutation::SetCell { + family_name: family.to_owned(), + column_qualifier: COLUMN_METADATA.to_owned(), + timestamp_micros, + value: meta.into_bytes(), + })], + "test-setup", + ) + .await?; + + Ok(()) + } + + /// Writes a new-format tombstone row with an empty `r` value directly, + /// simulating rows written by code before this change. + async fn write_empty_redirect_tombstone( + backend: &BigTableBackend, + id: &ObjectId, + ) -> Result<()> { + let path = id.as_storage_path().to_string().into_bytes(); + let mutations = [ + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_MANUAL.to_owned(), + column_qualifier: COLUMN_REDIRECT.to_owned(), + timestamp_micros: -1, + value: b"".to_vec(), // empty — legacy format + }), + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_MANUAL.to_owned(), + column_qualifier: COLUMN_TOMBSTONE_META.to_owned(), + timestamp_micros: -1, + value: b"{}".to_vec(), + }), + ]; + + backend.mutate(path, mutations, "test-setup").await?; + + Ok(()) + } + + // --- Section 1: Object Operations --- + + /// Verifies the full roundtrip: put → get_object (payload + metadata) → get_metadata (metadata). #[tokio::test] async fn test_roundtrip() -> Result<()> { let backend = create_test_backend().await?; @@ -1167,33 +1239,27 @@ mod tests { .put_object(&id, &metadata, stream::single("hello, world")) .await?; - let (meta, stream) = backend.get_object(&id).await?.unwrap(); - + let (obj_meta, stream) = backend.get_object(&id).await?.unwrap(); let payload = stream::read_to_vec(stream).await?; - let str_payload = str::from_utf8(&payload).unwrap(); - assert_eq!(str_payload, "hello, world"); - assert_eq!(meta.content_type, metadata.content_type); - assert_eq!(meta.custom, metadata.custom); - - Ok(()) - } - - #[tokio::test] - async fn test_get_nonexistent() -> Result<()> { - let backend = create_test_backend().await?; + assert_eq!(str::from_utf8(&payload).unwrap(), "hello, world"); + assert_eq!(obj_meta.content_type, metadata.content_type); + assert_eq!(obj_meta.custom, metadata.custom); - let id = make_id(); - let result = backend.get_object(&id).await?; - assert!(result.is_none()); + let head_meta = backend.get_metadata(&id).await?.unwrap(); + assert_eq!(head_meta.content_type, metadata.content_type); + assert_eq!(head_meta.custom, metadata.custom); Ok(()) } + /// Verifies that absent rows return None or succeed silently for all read/delete operations. #[tokio::test] - async fn test_delete_nonexistent() -> Result<()> { + async fn test_nonexistent() -> Result<()> { let backend = create_test_backend().await?; let id = make_id(); + assert!(backend.get_object(&id).await?.is_none()); + assert!(backend.get_metadata(&id).await?.is_none()); backend.delete_object(&id).await?; Ok(()) @@ -1204,30 +1270,26 @@ mod tests { let backend = create_test_backend().await?; let id = make_id(); - let metadata = Metadata { + let first_metadata = Metadata { custom: BTreeMap::from_iter([("invalid".into(), "invalid".into())]), ..Default::default() }; - backend - .put_object(&id, &metadata, stream::single("hello")) + .put_object(&id, &first_metadata, stream::single("hello")) .await?; - let metadata = Metadata { + let second_metadata = Metadata { custom: BTreeMap::from_iter([("hello".into(), "world".into())]), ..Default::default() }; - backend - .put_object(&id, &metadata, stream::single("world")) + .put_object(&id, &second_metadata, stream::single("world")) .await?; let (meta, stream) = backend.get_object(&id).await?.unwrap(); - let payload = stream::read_to_vec(stream).await?; - let str_payload = str::from_utf8(&payload).unwrap(); - assert_eq!(str_payload, "world"); - assert_eq!(meta.custom, metadata.custom); + assert_eq!(str::from_utf8(&payload).unwrap(), "world"); + assert_eq!(meta.custom, second_metadata.custom); Ok(()) } @@ -1237,484 +1299,424 @@ mod tests { let backend = create_test_backend().await?; let id = make_id(); - let metadata = Metadata::default(); - backend - .put_object(&id, &metadata, stream::single("hello, world")) + .put_object(&id, &Metadata::default(), stream::single("hello, world")) .await?; - backend.delete_object(&id).await?; - let result = backend.get_object(&id).await?; - assert!(result.is_none()); + assert!(backend.get_object(&id).await?.is_none()); Ok(()) } + /// Verifies TTI bump via both `get_object` (loaded=true path) and `get_metadata` (loaded=false path). + /// + /// The bump condition is: `expire_at < now + tti - TTI_DEBOUNCE`. We write a stale + /// timestamp just inside the bump window (still in the future, so the row is not GC'd) + /// and confirm that a subsequent read returns a later expiry. #[tokio::test] - async fn test_ttl_immediate() -> Result<()> { - // NB: We create a TTL that immediately expires in this tests. This might be optimized away - // in a future implementation, so we will have to update this test accordingly. - + async fn test_tti_bump() -> Result<()> { let backend = create_test_backend().await?; - - let id = make_id(); + // TTI must exceed TTI_DEBOUNCE (1 day) for the bump condition to be reachable. + let tti = Duration::from_secs(2 * 24 * 3600); // 2 days let metadata = Metadata { - expiration_policy: ExpirationPolicy::TimeToLive(Duration::from_secs(0)), + expiration_policy: ExpirationPolicy::TimeToIdle(tti), ..Default::default() }; + // Compute a stale timestamp just inside the bump window. + let old_deadline = SystemTime::now() + tti - TTI_DEBOUNCE - Duration::from_secs(60); + let old_micros = old_deadline + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_millis() as i64 + * 1000; + + // Sub-sequence 1: get_object triggers bump (loaded=true path). + let id1 = make_id(); backend - .put_object(&id, &metadata, stream::single("hello, world")) + .put_object(&id1, &metadata, stream::single("hello, world")) .await?; + let path1 = id1.as_storage_path().to_string().into_bytes(); + backend + .mutate( + path1, + [ + mutation::Mutation::DeleteFromRow(mutation::DeleteFromRow {}), + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_GC.to_owned(), + column_qualifier: COLUMN_PAYLOAD.to_owned(), + timestamp_micros: old_micros, + value: b"hello, world".to_vec(), + }), + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_GC.to_owned(), + column_qualifier: COLUMN_METADATA.to_owned(), + timestamp_micros: old_micros, + value: serde_json::to_vec(&metadata).unwrap(), + }), + ], + "test-setup", + ) + .await?; + // get_object reads the stale row, triggers bump, and returns the pre-bump metadata. + let (pre_obj_meta, _) = backend.get_object(&id1).await?.unwrap(); + let pre_obj_expiry = pre_obj_meta.time_expires.unwrap(); + // A second get_metadata reads the freshly bumped row. + let post_obj_meta = backend.get_metadata(&id1).await?.unwrap(); + let post_obj_expiry = post_obj_meta.time_expires.unwrap(); + assert!( + post_obj_expiry > pre_obj_expiry, + "get_object bump should extend expiry: {pre_obj_expiry:?} -> {post_obj_expiry:?}" + ); - let result = backend.get_object(&id).await?; - assert!(result.is_none()); + // Sub-sequence 2: get_metadata triggers bump (loaded=false path). + let id2 = make_id(); + backend + .put_object(&id2, &metadata, stream::single("hello, world")) + .await?; + let path2 = id2.as_storage_path().to_string().into_bytes(); + backend + .mutate( + path2, + [ + mutation::Mutation::DeleteFromRow(mutation::DeleteFromRow {}), + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_GC.to_owned(), + column_qualifier: COLUMN_PAYLOAD.to_owned(), + timestamp_micros: old_micros, + value: b"hello, world".to_vec(), + }), + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_GC.to_owned(), + column_qualifier: COLUMN_METADATA.to_owned(), + timestamp_micros: old_micros, + value: serde_json::to_vec(&metadata).unwrap(), + }), + ], + "test-setup", + ) + .await?; + // First get_metadata sees the stale row and triggers a bump. + let pre_meta = backend.get_metadata(&id2).await?.unwrap(); + let pre_expiry = pre_meta.time_expires.unwrap(); + // Second get_metadata reads the freshly bumped row. + let post_meta = backend.get_metadata(&id2).await?.unwrap(); + let post_expiry = post_meta.time_expires.unwrap(); + assert!( + post_expiry > pre_expiry, + "get_metadata bump should extend expiry: {pre_expiry:?} -> {post_expiry:?}" + ); + // Payload must be intact after the loaded=false bump (which re-fetches the payload). + let (_, stream) = backend.get_object(&id2).await?.unwrap(); + let payload = stream::read_to_vec(stream).await?; + assert_eq!(&payload, b"hello, world"); Ok(()) } #[tokio::test] - async fn test_tti_immediate() -> Result<()> { - // NB: We create a TTI that immediately expires in this tests. This might be optimized away - // in a future implementation, so we will have to update this test accordingly. - + async fn test_tti_no_bump_when_fresh() -> Result<()> { let backend = create_test_backend().await?; let id = make_id(); + // TTI must exceed TTI_DEBOUNCE (1 day) for the bump condition to be reachable. + let tti = Duration::from_secs(2 * 24 * 3600); // 2 days let metadata = Metadata { - expiration_policy: ExpirationPolicy::TimeToIdle(Duration::from_secs(0)), + expiration_policy: ExpirationPolicy::TimeToIdle(tti), ..Default::default() }; - backend .put_object(&id, &metadata, stream::single("hello, world")) .await?; - let result = backend.get_object(&id).await?; - assert!(result.is_none()); + // A freshly written object has time_expires ≈ now + 2d, well outside the bump + // window (now + 2d - 1d = now + 1d). No bump should occur. + let first_expiry = backend + .get_metadata(&id) + .await? + .unwrap() + .time_expires + .unwrap(); + let second_expiry = backend + .get_metadata(&id) + .await? + .unwrap() + .time_expires + .unwrap(); + + assert_eq!( + first_expiry, second_expiry, + "fresh TTI object must not be bumped" + ); Ok(()) } + // --- Section 2: Expiration --- + #[tokio::test] - async fn test_get_metadata_returns_metadata() -> Result<()> { + async fn test_ttl_immediate() -> Result<()> { + // NB: We create a TTL that immediately expires in this test. This might be optimized away + // in a future implementation, so we will have to update this test accordingly. + let backend = create_test_backend().await?; let id = make_id(); let metadata = Metadata { - content_type: "text/plain".into(), - custom: BTreeMap::from_iter([("hello".into(), "world".into())]), + expiration_policy: ExpirationPolicy::TimeToLive(Duration::from_secs(0)), ..Default::default() }; - backend .put_object(&id, &metadata, stream::single("hello, world")) .await?; - let meta = backend.get_metadata(&id).await?.unwrap(); - assert_eq!(meta.content_type, metadata.content_type); - assert_eq!(meta.custom, metadata.custom); + assert!(backend.get_object(&id).await?.is_none()); Ok(()) } #[tokio::test] - async fn test_get_metadata_nonexistent() -> Result<()> { - let backend = create_test_backend().await?; - - let id = make_id(); - let result = backend.get_metadata(&id).await?; - assert!(result.is_none()); - - Ok(()) - } + async fn test_tti_immediate() -> Result<()> { + // NB: We create a TTI that immediately expires in this test. This might be optimized away + // in a future implementation, so we will have to update this test accordingly. - #[tokio::test] - async fn test_get_metadata_tombstone_returns_error() -> Result<()> { let backend = create_test_backend().await?; let id = make_id(); - let write = TieredWrite::Tombstone(Tombstone { - target: id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&id, None, write).await?; + let metadata = Metadata { + expiration_policy: ExpirationPolicy::TimeToIdle(Duration::from_secs(0)), + ..Default::default() + }; + backend + .put_object(&id, &metadata, stream::single("hello, world")) + .await?; - let result = backend.get_metadata(&id).await; - assert!( - matches!(result, Err(Error::UnexpectedTombstone)), - "expected UnexpectedTombstone, got {result:?}" - ); + assert!(backend.get_object(&id).await?.is_none()); Ok(()) } + // --- Section 3: Tiered Operations --- + + /// Covers all three row states for `get_tiered_object` and `get_tiered_metadata`. + /// + /// - **empty**: both return NotFound. + /// - **object**: put_object, both return the Object variant with correct payload/metadata. + /// - **tombstone**: CAS-write with a distinct `lt_id`, both return the Tombstone variant + /// with `target == lt_id`. #[tokio::test] - async fn test_get_metadata_bumps_tti() -> Result<()> { + async fn test_tiered_get() -> Result<()> { let backend = create_test_backend().await?; + // empty let id = make_id(); - // TTI must exceed TTI_DEBOUNCE (1 day) for the bump condition to be reachable. - let tti = Duration::from_secs(2 * 24 * 3600); // 2 days - let metadata = Metadata { + assert!(matches!( + backend.get_tiered_object(&id).await?, + TieredGet::NotFound + )); + assert!(matches!( + backend.get_tiered_metadata(&id).await?, + TieredMetadata::NotFound + )); + + // object + let id = make_id(); + let put_meta = Metadata { content_type: "text/plain".into(), - expiration_policy: ExpirationPolicy::TimeToIdle(tti), + custom: BTreeMap::from_iter([("k".into(), "v".into())]), ..Default::default() }; - backend - .put_object(&id, &metadata, stream::single("hello, world")) + .put_object(&id, &put_meta, stream::single("tiered payload")) .await?; - // Manually rewrite the row with a timestamp that will trigger a bump. - // The bump condition is: expire_at < now + tti - TTI_DEBOUNCE. - // Set the expiry to just under the threshold but still in the future - // (so it doesn't get filtered as expired). - let path = id.as_storage_path().to_string().into_bytes(); - let old_deadline = SystemTime::now() + tti - TTI_DEBOUNCE - Duration::from_secs(60); - let old_micros = old_deadline - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_millis() as i64 - * 1000; + let TieredGet::Object(obj_meta, obj_stream) = backend.get_tiered_object(&id).await? else { + panic!("expected TieredGet::Object"); + }; + let obj_payload = stream::read_to_vec(obj_stream).await?; + assert_eq!(str::from_utf8(&obj_payload).unwrap(), "tiered payload"); + assert_eq!(obj_meta.content_type, put_meta.content_type); + assert_eq!(obj_meta.custom, put_meta.custom); - let mutations = [ - mutation::Mutation::DeleteFromRow(mutation::DeleteFromRow {}), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_PAYLOAD.to_owned(), - timestamp_micros: old_micros, - value: b"hello, world".to_vec(), - }), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_METADATA.to_owned(), - timestamp_micros: old_micros, - value: serde_json::to_vec(&metadata).unwrap(), - }), - ]; + let TieredMetadata::Object(head_meta) = backend.get_tiered_metadata(&id).await? else { + panic!("expected TieredMetadata::Object"); + }; + assert_eq!(head_meta.content_type, put_meta.content_type); + assert_eq!(head_meta.custom, put_meta.custom); + + // tombstone + let hv_id = make_id(); + let lt_id = ObjectId::random(hv_id.context().clone()); backend - .mutate(path.clone(), mutations, "test-setup") + .compare_and_write( + &hv_id, + None, + TieredWrite::Tombstone(Tombstone { + target: lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }), + ) .await?; - // First get_metadata sees the old timestamp and triggers a TTI bump. - let pre_meta = backend.get_metadata(&id).await?.unwrap(); - let pre_expiry = pre_meta.time_expires.unwrap(); - - // Second get_metadata sees the bumped timestamp. - let post_meta = backend.get_metadata(&id).await?.unwrap(); - let post_expiry = post_meta.time_expires.unwrap(); - assert!( - post_expiry > pre_expiry, - "TTI bump should have extended the expiry: {pre_expiry:?} -> {post_expiry:?}" + let TieredGet::Tombstone(get_t) = backend.get_tiered_object(&hv_id).await? else { + panic!("expected TieredGet::Tombstone"); + }; + assert_eq!( + get_t.target, lt_id, + "get_tiered_object target must match lt_id" ); - // Verify the payload is still intact after the bump. - let (_, stream) = backend.get_object(&id).await?.unwrap(); - let payload = stream::read_to_vec(stream).await?; - assert_eq!(&payload, b"hello, world"); + let TieredMetadata::Tombstone(meta_t) = backend.get_tiered_metadata(&hv_id).await? else { + panic!("expected TieredMetadata::Tombstone"); + }; + assert_eq!( + meta_t.target, lt_id, + "get_tiered_metadata target must match lt_id" + ); Ok(()) } + /// Covers all three row states for `put_non_tombstone`. + /// + /// - **empty**: returns None, object is readable. + /// - **object**: overwrites with new payload, returns None. + /// - **tombstone**: returns Some(Tombstone) with the correct target; tombstone still intact. #[tokio::test] - async fn test_get_metadata_does_not_bump_fresh_tti() -> Result<()> { + async fn test_put_non_tombstone() -> Result<()> { let backend = create_test_backend().await?; + // empty: put_non_tombstone on absent row succeeds and makes object readable. let id = make_id(); - // TTI must exceed TTI_DEBOUNCE (1 day) for the bump condition to be reachable. - let tti = Duration::from_secs(2 * 24 * 3600); // 2 days - let metadata = Metadata { - content_type: "text/plain".into(), - expiration_policy: ExpirationPolicy::TimeToIdle(tti), - ..Default::default() - }; + let result = backend + .put_non_tombstone(&id, &Metadata::default(), Bytes::from_static(b"first")) + .await?; + assert_eq!(result, None, "expected None on empty row"); + let (_, stream) = backend.get_object(&id).await?.unwrap(); + assert_eq!(&stream::read_to_vec(stream).await?, b"first"); + // object: put_non_tombstone on existing object replaces payload, returns None. + let id = make_id(); backend - .put_object(&id, &metadata, stream::single("hello, world")) + .put_object(&id, &Metadata::default(), stream::single("old")) .await?; + let result = backend + .put_non_tombstone(&id, &Metadata::default(), Bytes::from_static(b"new")) + .await?; + assert_eq!(result, None, "expected None when overwriting object"); + let (_, stream) = backend.get_object(&id).await?.unwrap(); + assert_eq!(&stream::read_to_vec(stream).await?, b"new"); - // A freshly written object has time_expires ≈ now + 2d, which is well outside - // the bump window (now + 2d - 1d = now + 1d). No bump should occur. - let first = backend.get_metadata(&id).await?.unwrap(); - let first_expiry = first.time_expires.unwrap(); - - let second = backend.get_metadata(&id).await?.unwrap(); - let second_expiry = second.time_expires.unwrap(); - + // tombstone: put_non_tombstone returns Some(Tombstone) and leaves tombstone intact. + let hv_id = make_id(); + let lt_id = ObjectId::random(hv_id.context().clone()); + backend + .compare_and_write( + &hv_id, + None, + TieredWrite::Tombstone(Tombstone { + target: lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }), + ) + .await?; + let result = backend + .put_non_tombstone(&hv_id, &Metadata::default(), Bytes::new()) + .await?; + let returned = result.expect("expected Some(Tombstone) when row is a tombstone"); assert_eq!( - first_expiry, second_expiry, - "Fresh TTI object should not have its expiry bumped" + returned.target, lt_id, + "returned tombstone target must be lt_id" + ); + assert!( + matches!( + backend.get_tiered_metadata(&hv_id).await?, + TieredMetadata::Tombstone(_) + ), + "tombstone must still exist after put_non_tombstone" ); Ok(()) } + /// Covers all three row states for `delete_non_tombstone`. + /// + /// - **empty**: returns None. + /// - **object**: returns None, row gone. + /// - **tombstone**: returns Some(Tombstone) with correct target; tombstone still intact. + /// + /// Verifies that the `r` column is correctly detected by both the `ReadRows` column + /// filter and the `CheckAndMutate` `tombstone_predicate`. #[tokio::test] - async fn test_delete_non_tombstone_real_object() -> Result<()> { + async fn test_delete_non_tombstone() -> Result<()> { let backend = create_test_backend().await?; + // empty let id = make_id(); - let metadata = Metadata::default(); + assert_eq!(backend.delete_non_tombstone(&id).await?, None); + // object + let id = make_id(); backend - .put_object(&id, &metadata, stream::single("hello, world")) + .put_object(&id, &Metadata::default(), stream::single("hello, world")) .await?; + assert_eq!(backend.delete_non_tombstone(&id).await?, None); + assert!(backend.get_object(&id).await?.is_none()); - let result = backend.delete_non_tombstone(&id).await?; - assert_eq!(result, None); - - let get_result = backend.get_object(&id).await?; - assert!(get_result.is_none()); - - Ok(()) - } - - /// Verifies that the `r` column (now holding the LT storage path, or an empty value - /// for legacy tombstones) is correctly detected by both the `ReadRows` column filter - /// and the `CheckAndMutate` `tombstone_predicate` — confirming Bigtable treats - /// non-empty-value cells as column-present in both filter types. - #[tokio::test] - async fn test_delete_non_tombstone_tombstone() -> Result<()> { - let backend = create_test_backend().await?; - - let id = make_id(); - let write = TieredWrite::Tombstone(Tombstone { - target: id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&id, None, write).await?; - - let result = backend.delete_non_tombstone(&id).await?; - let tombstone = result.expect("Some(tombstone)"); - assert_eq!(tombstone.target, id, "tombstone target must be returned"); - - // Tombstone should still exist — delete_non_tombstone leaves it intact. - let get_result = backend.get_tiered_metadata(&id).await?; - assert!( - matches!(get_result, TieredMetadata::Tombstone(_)), - "tombstone should still exist after delete_non_tombstone" - ); - - Ok(()) - } - - #[tokio::test] - async fn test_delete_non_tombstone_nonexistent() -> Result<()> { - let backend = create_test_backend().await?; - - let id = make_id(); - let result = backend.delete_non_tombstone(&id).await?; - assert_eq!(result, None); - - Ok(()) - } - - #[tokio::test] - async fn test_hv_get_not_found() -> Result<()> { - let backend = create_test_backend().await?; - + // tombstone let id = make_id(); - assert!(matches!( - backend.get_tiered_object(&id).await?, - TieredGet::NotFound - )); - assert!(matches!( - backend.get_tiered_metadata(&id).await?, - TieredMetadata::NotFound - )); - - Ok(()) - } - - /// Writes a legacy-format tombstone row directly into Bigtable. - async fn write_legacy_tombstone( - backend: &BigTableBackend, - id: &ObjectId, - expiration_policy: ExpirationPolicy, - time_expires: Option, - ) -> Result<()> { - let meta = if expiration_policy.is_manual() { - r#"{"is_redirect_tombstone":true}"#.to_owned() - } else { - let policy_json = serde_json::to_string(&expiration_policy).unwrap(); - format!(r#"{{"is_redirect_tombstone":true,"expiration_policy":{policy_json}}}"#) - }; - - let (family, timestamp_micros) = if expiration_policy.is_manual() { - (FAMILY_MANUAL, -1) - } else { - let t = - time_expires.unwrap_or(SystemTime::now() + expiration_policy.expires_in().unwrap()); - let timestamp = t - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_millis(); - (FAMILY_GC, timestamp as i64 * 1000) - }; - backend - .mutate( - id.as_storage_path().to_string().into_bytes(), - [mutation::Mutation::SetCell(mutation::SetCell { - family_name: family.to_owned(), - column_qualifier: COLUMN_METADATA.to_owned(), - timestamp_micros, - value: meta.into_bytes(), - })], - "test-setup", + .compare_and_write( + &id, + None, + TieredWrite::Tombstone(Tombstone { + target: id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }), ) .await?; - - Ok(()) - } - - /// Write a new-format tombstone row with an empty `r` value directly, - /// simulating rows written by code before this change. - async fn write_empty_redirect_tombstone( - backend: &BigTableBackend, - id: &ObjectId, - ) -> Result<()> { - let path = id.as_storage_path().to_string().into_bytes(); - let mutations = [ - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_MANUAL.to_owned(), - column_qualifier: COLUMN_REDIRECT.to_owned(), - timestamp_micros: -1, - value: b"".to_vec(), // empty — legacy format - }), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_MANUAL.to_owned(), - column_qualifier: COLUMN_TOMBSTONE_META.to_owned(), - timestamp_micros: -1, - value: b"{}".to_vec(), - }), - ]; - - backend.mutate(path, mutations, "test-setup").await?; - - Ok(()) - } - - /// - /// Uses `Manual` expiration so `timestamp_micros = -1` (server-assigned ≈ write time) does - /// not trigger immediate expiry. - #[tokio::test] - async fn test_legacy_tombstone_compat() -> Result<()> { - let backend = create_test_backend().await?; - let id = make_id(); - - write_legacy_tombstone(&backend, &id, ExpirationPolicy::Manual, None).await?; - - let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&id).await? else { - panic!("expected tombstone"); - }; - assert_eq!(t.expiration_policy, ExpirationPolicy::Manual); - assert!(matches!( - backend.get_tiered_object(&id).await?, - TieredGet::Tombstone(_) - )); - - // Recreate a fresh tombstone to test the other conditional operations. - write_legacy_tombstone(&backend, &id, ExpirationPolicy::Manual, None).await?; - let t_opt = backend - .put_non_tombstone(&id, &Metadata::default(), Bytes::new()) - .await?; - // Legacy tombstones resolve to hv_id; target should match id. - assert_eq!(t_opt.map(|t| t.target).as_ref(), Some(&id)); - - write_legacy_tombstone(&backend, &id, ExpirationPolicy::Manual, None).await?; - let t_opt = backend.delete_non_tombstone(&id).await?; - assert_eq!(t_opt.map(|t| t.target), Some(id)); - - Ok(()) - } - - /// Legacy tombstones with a `TimeToLive` expiration policy have the policy correctly - /// deserialized from the `m` column JSON. - /// - /// A future cell timestamp (now + TTL) is required so `expires_before` does not immediately - /// filter the row: the cell timestamp doubles as the GC expiry time. - #[tokio::test] - async fn test_legacy_tombstone_expiration_policy() -> Result<()> { - let backend = create_test_backend().await?; - let id = make_id(); - - let ttl = Duration::from_secs(2 * 24 * 3600); - write_legacy_tombstone(&backend, &id, ExpirationPolicy::TimeToLive(ttl), None).await?; - - let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&id).await? else { - panic!("expected TieredMetadata::Tombstone"); - }; - assert_eq!(t.expiration_policy, ExpirationPolicy::TimeToLive(ttl)); - - Ok(()) - } - - /// A legacy tombstone with TTI policy is upgraded to the new `r`/`t` column format on read. - /// - /// The bump path in both `get_tiered_metadata` and `get_tiered_object` calls - /// `put_tombstone_row`, which rewrites the row with `r` + `t` columns. The upgraded - /// row has a fresh cell timestamp (≈ now + TTI), so `time_expires` increases. - #[tokio::test] - async fn test_legacy_tombstone_tti_upgrade() -> Result<()> { - let backend = create_test_backend().await?; - let id = make_id(); - let path = id.as_storage_path().to_string().into_bytes(); - - let tti = Duration::from_secs(2 * 24 * 3600); // must exceed TTI_DEBOUNCE (1 day) - - // Place time_expires just inside the bump window: past `now + tti - TTI_DEBOUNCE` - // but still in the future so `expires_before(now)` does not filter the row. - let old_deadline = SystemTime::now() + tti - TTI_DEBOUNCE - Duration::from_secs(60); - write_legacy_tombstone( - &backend, - &id, - ExpirationPolicy::TimeToIdle(tti), - Some(old_deadline), - ) - .await?; - - // First read detects the stale TTI and triggers `put_tombstone_row`. - let TieredMetadata::Tombstone(_) = backend.get_tiered_metadata(&id).await? else { - panic!("expected tombstone"); - }; - - // After the bump, the row is rewritten with a fresh timestamp (≈ now + TTI). - // Verify the new time_expires is later than the pre-bump deadline. - let Some(RowData::Tombstone { - time_expires: Some(new_deadline), - .. - }) = backend.read_row(&path, None, "test-verify").await? - else { - panic!("expected tombstone row after bump"); - }; - + let tombstone = backend + .delete_non_tombstone(&id) + .await? + .expect("expected Some(tombstone)"); + assert_eq!(tombstone.target, id, "tombstone target must be returned"); assert!( - new_deadline > old_deadline, - "TTI bump should have extended the tombstone expiry: {old_deadline:?} -> {new_deadline:?}" + matches!( + backend.get_tiered_metadata(&id).await?, + TieredMetadata::Tombstone(_) + ), + "tombstone must still exist after delete_non_tombstone" ); Ok(()) } + // --- Section 4: Compare-and-Write --- + + /// Creating a tombstone on an empty row succeeds; a second attempt fails. + /// + /// After creation, both tiered and legacy APIs reflect the tombstone. #[tokio::test] - async fn test_swap_create_tombstone() -> Result<()> { + async fn test_cas_create_tombstone() -> Result<()> { let backend = create_test_backend().await?; let id = make_id(); let expiration_policy = ExpirationPolicy::TimeToLive(Duration::from_secs(3600)); - let write = TieredWrite::Tombstone(Tombstone { + let tombstone = Tombstone { target: id.clone(), expiration_policy, - }); - let committed = backend.compare_and_write(&id, None, write).await?; + }; + + // First create succeeds. + let committed = backend + .compare_and_write(&id, None, TieredWrite::Tombstone(tombstone.clone())) + .await?; assert!(committed, "expected CAS success on empty row"); - // Both hv methods must surface the tombstone with the correct expiration_policy. + // Tiered reads must see the tombstone with correct policy. let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&id).await? else { - panic!("expected TieredMetadataResponse::Tombstone"); + panic!("expected TieredMetadata::Tombstone"); }; assert_eq!(t.expiration_policy, expiration_policy); assert!(matches!( @@ -1722,7 +1724,7 @@ mod tests { TieredGet::Tombstone(_) )); - // Legacy get_object / get_metadata must error rather than leak tombstone data. + // Legacy reads must error rather than leak tombstone data. assert!(matches!( backend.get_object(&id).await, Err(Error::UnexpectedTombstone) @@ -1732,277 +1734,342 @@ mod tests { Err(Error::UnexpectedTombstone) )); - Ok(()) - } - - /// Attempting to create a tombstone when one already exists returns false. - #[tokio::test] - async fn test_swap_create_tombstone_conflict() -> Result<()> { - let backend = create_test_backend().await?; - - let id = make_id(); - let tombstone = Tombstone { - target: id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }; - let first = backend - .compare_and_write(&id, None, TieredWrite::Tombstone(tombstone.clone())) - .await?; - assert!(first, "first write should succeed"); - + // Second create fails: tombstone already exists. let second = backend .compare_and_write(&id, None, TieredWrite::Tombstone(tombstone)) .await?; - assert!( - !second, - "second write should fail: tombstone already exists" - ); + assert!(!second, "second create must fail: tombstone already exists"); Ok(()) } - /// CAS-swapping an existing tombstone for a new one succeeds when the expected target matches. + /// Swapping a tombstone target: wrong expected → false, correct expected → true. #[tokio::test] - async fn test_swap_tombstone() -> Result<()> { + async fn test_cas_swap_tombstone() -> Result<()> { let backend = create_test_backend().await?; let hv_id = make_id(); let old_lt_id = ObjectId::random(hv_id.context().clone()); + let wrong_lt_id = ObjectId::random(hv_id.context().clone()); let new_lt_id = ObjectId::random(hv_id.context().clone()); - let old_write = TieredWrite::Tombstone(Tombstone { - target: old_lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&hv_id, None, old_write).await?; + backend + .compare_and_write( + &hv_id, + None, + TieredWrite::Tombstone(Tombstone { + target: old_lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }), + ) + .await?; - let new_write = TieredWrite::Tombstone(Tombstone { - target: new_lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }); + // Wrong target: CAS fails, tombstone unchanged. let swapped = backend - .compare_and_write(&hv_id, Some(&old_lt_id), new_write) + .compare_and_write( + &hv_id, + Some(&wrong_lt_id), + TieredWrite::Tombstone(Tombstone { + target: new_lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }), + ) .await?; - assert!(swapped, "expected CAS success"); - + assert!(!swapped, "expected CAS failure due to wrong target"); let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&hv_id).await? else { panic!("expected tombstone"); }; - assert_eq!(t.target, new_lt_id, "tombstone target must be updated"); - - Ok(()) - } - - /// CAS-swapping fails when the expected target does not match the current tombstone. - #[tokio::test] - async fn test_swap_tombstone_mismatch() -> Result<()> { - let backend = create_test_backend().await?; - - let hv_id = make_id(); - let actual_lt_id = ObjectId::random(hv_id.context().clone()); - let wrong_lt_id = ObjectId::random(hv_id.context().clone()); - let new_lt_id = ObjectId::random(hv_id.context().clone()); - - let old_write = TieredWrite::Tombstone(Tombstone { - target: actual_lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&hv_id, None, old_write).await?; + assert_eq!( + t.target, old_lt_id, + "target must be unchanged after mismatch" + ); - let new_write = TieredWrite::Tombstone(Tombstone { - target: new_lt_id, - expiration_policy: ExpirationPolicy::Manual, - }); + // Correct target: CAS succeeds, target updated. let swapped = backend - .compare_and_write(&hv_id, Some(&wrong_lt_id), new_write) + .compare_and_write( + &hv_id, + Some(&old_lt_id), + TieredWrite::Tombstone(Tombstone { + target: new_lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }), + ) .await?; - assert!(!swapped, "expected CAS failure due to wrong target"); - - // Row unchanged. + assert!(swapped, "expected CAS success with correct target"); let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&hv_id).await? else { panic!("expected tombstone"); }; - assert_eq!(t.target, actual_lt_id, "tombstone target must be unchanged"); + assert_eq!( + t.target, new_lt_id, + "target must be updated after successful swap" + ); Ok(()) } - /// CAS-swapping a tombstone for inline data succeeds when the target matches. + /// Swapping a tombstone for inline object data: wrong expected → false, correct → true. #[tokio::test] - async fn test_swap_inline() -> Result<()> { + async fn test_cas_swap_inline() -> Result<()> { let backend = create_test_backend().await?; let id = make_id(); let lt_id = ObjectId::random(id.context().clone()); + let wrong_id = ObjectId::random(id.context().clone()); - let old_write = TieredWrite::Tombstone(Tombstone { - target: lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&id, None, old_write).await?; + backend + .compare_and_write( + &id, + None, + TieredWrite::Tombstone(Tombstone { + target: lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }), + ) + .await?; - let payload = Bytes::from_static(b"hello inline"); - let new_write = TieredWrite::Object(Metadata::default(), payload.clone()); + // Wrong target: CAS fails, tombstone intact. let swapped = backend - .compare_and_write(&id, Some(<_id), new_write) + .compare_and_write( + &id, + Some(&wrong_id), + TieredWrite::Object(Metadata::default(), Bytes::new()), + ) .await?; - assert!(swapped, "expected CAS success"); + assert!(!swapped, "expected CAS failure with wrong target"); + assert!(matches!( + backend.get_tiered_metadata(&id).await?, + TieredMetadata::Tombstone(_) + )); - // Row is now an inline object. + // Correct target: CAS succeeds, row becomes an inline object. + let payload = Bytes::from_static(b"hello inline"); + let swapped = backend + .compare_and_write( + &id, + Some(<_id), + TieredWrite::Object(Metadata::default(), payload.clone()), + ) + .await?; + assert!(swapped, "expected CAS success with correct target"); let TieredGet::Object(_, stream) = backend.get_tiered_object(&id).await? else { panic!("expected inline object after swap"); }; - let body = crate::stream::read_to_vec(stream).await?; - assert_eq!(body, payload.as_ref()); + assert_eq!(&stream::read_to_vec(stream).await?, payload.as_ref()); Ok(()) } - /// Inline-swap fails when the expected target does not match. + /// CAS-write an object onto an empty row (expected=None, write=Object) succeeds. #[tokio::test] - async fn test_swap_inline_mismatch() -> Result<()> { + async fn test_cas_create_object_on_empty_row() -> Result<()> { let backend = create_test_backend().await?; let id = make_id(); - let lt_id = ObjectId::random(id.context().clone()); - let wrong_id = ObjectId::random(id.context().clone()); - - let old_write = TieredWrite::Tombstone(Tombstone { - target: lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&id, None, old_write).await?; - - let new_write = TieredWrite::Object(Metadata::default(), Bytes::new()); - let swapped = backend - .compare_and_write(&id, Some(&wrong_id), new_write) + let payload = Bytes::from_static(b"cas object"); + let committed = backend + .compare_and_write( + &id, + None, + TieredWrite::Object(Metadata::default(), payload.clone()), + ) .await?; - assert!(!swapped, "expected CAS failure"); + assert!(committed, "expected CAS success on empty row"); - // Tombstone still present. - assert!(matches!( - backend.get_tiered_metadata(&id).await?, - TieredMetadata::Tombstone(_) - )); + let TieredGet::Object(_, stream) = backend.get_tiered_object(&id).await? else { + panic!("expected Object after CAS-create"); + }; + assert_eq!(&stream::read_to_vec(stream).await?, payload.as_ref()); Ok(()) } - /// CAS-delete succeeds when the expected target matches. + /// CAS-delete: wrong expected → false; correct expected → true, row gone. + /// CAS-delete with Some(target) against a regular object also returns false. #[tokio::test] - async fn test_swap_delete() -> Result<()> { + async fn test_cas_delete() -> Result<()> { let backend = create_test_backend().await?; let id = make_id(); let lt_id = ObjectId::random(id.context().clone()); + let wrong_id = ObjectId::random(id.context().clone()); - let write = TieredWrite::Tombstone(Tombstone { - target: lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&id, None, write).await?; + backend + .compare_and_write( + &id, + None, + TieredWrite::Tombstone(Tombstone { + target: lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }), + ) + .await?; + // Wrong target: fails, row preserved. + let deleted = backend + .compare_and_write(&id, Some(&wrong_id), TieredWrite::Delete) + .await?; + assert!(!deleted, "expected CAS failure with wrong target"); + assert!(matches!( + backend.get_tiered_metadata(&id).await?, + TieredMetadata::Tombstone(_) + )); + + // Correct target: succeeds, row gone. let deleted = backend .compare_and_write(&id, Some(<_id), TieredWrite::Delete) .await?; assert!(deleted, "expected CAS delete success"); - assert!(matches!( backend.get_tiered_metadata(&id).await?, TieredMetadata::NotFound )); + // Regular object: CAS-delete with Some(target) returns false, object preserved. + let id2 = make_id(); + let fake_lt_id = ObjectId::random(id2.context().clone()); + backend + .put_object(&id2, &Metadata::default(), stream::single("data")) + .await?; + let deleted = backend + .compare_and_write(&id2, Some(&fake_lt_id), TieredWrite::Delete) + .await?; + assert!(!deleted, "expected false: row is not a tombstone"); + assert!(backend.get_object(&id2).await?.is_some()); + Ok(()) } - /// CAS-delete fails when the expected target does not match. + // --- Section 5: Legacy Tombstone Compatibility --- + + /// Legacy Manual and TTL tombstones are correctly read via the tiered APIs. + /// + /// Uses `Manual` expiration so `timestamp_micros = -1` (server-assigned ≈ write time) + /// does not trigger immediate expiry. #[tokio::test] - async fn test_swap_delete_mismatch() -> Result<()> { + async fn test_legacy_tombstone_reads() -> Result<()> { let backend = create_test_backend().await?; + // Manual policy: get_tiered_metadata returns Tombstone(Manual), get_tiered_object returns Tombstone. let id = make_id(); - let lt_id = ObjectId::random(id.context().clone()); - let wrong_id = ObjectId::random(id.context().clone()); - - let write = TieredWrite::Tombstone(Tombstone { - target: lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&id, None, write).await?; - - let deleted = backend - .compare_and_write(&id, Some(&wrong_id), TieredWrite::Delete) - .await?; - assert!(!deleted, "expected CAS failure"); + write_legacy_tombstone(&backend, &id, ExpirationPolicy::Manual, None).await?; - // Row preserved. + let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&id).await? else { + panic!("expected tombstone"); + }; + assert_eq!(t.expiration_policy, ExpirationPolicy::Manual); assert!(matches!( - backend.get_tiered_metadata(&id).await?, - TieredMetadata::Tombstone(_) + backend.get_tiered_object(&id).await?, + TieredGet::Tombstone(_) )); + // TTL policy: get_tiered_metadata returns Tombstone with the correct TTL policy. + // + // A future cell timestamp (now + TTL) is required so `expires_before` does not + // immediately filter the row. + let id = make_id(); + let ttl = Duration::from_secs(2 * 24 * 3600); + write_legacy_tombstone(&backend, &id, ExpirationPolicy::TimeToLive(ttl), None).await?; + + let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&id).await? else { + panic!("expected TieredMetadata::Tombstone"); + }; + assert_eq!(t.expiration_policy, ExpirationPolicy::TimeToLive(ttl)); + Ok(()) } - /// CAS-delete with Some(target) against a regular object returns false. + /// A legacy tombstone with TTI policy is upgraded to the new `r`/`t` column format on read. + /// + /// The bump path calls `put_tombstone_row`, which rewrites the row with `r` + `t` columns. + /// The upgraded row has a fresh cell timestamp (≈ now + TTI), so `time_expires` increases. #[tokio::test] - async fn test_swap_delete_regular_object() -> Result<()> { + async fn test_legacy_tombstone_tti_upgrade() -> Result<()> { let backend = create_test_backend().await?; - let id = make_id(); - let fake_lt_id = ObjectId::random(id.context().clone()); + let path = id.as_storage_path().to_string().into_bytes(); - backend - .put_object(&id, &Metadata::default(), crate::stream::single("data")) - .await?; + let tti = Duration::from_secs(2 * 24 * 3600); // must exceed TTI_DEBOUNCE (1 day) - let deleted = backend - .compare_and_write(&id, Some(&fake_lt_id), TieredWrite::Delete) - .await?; - assert!(!deleted, "expected false: row is not a tombstone"); + // Place time_expires just inside the bump window: past `now + tti - TTI_DEBOUNCE` + // but still in the future so `expires_before(now)` does not filter the row. + let old_deadline = SystemTime::now() + tti - TTI_DEBOUNCE - Duration::from_secs(60); + write_legacy_tombstone( + &backend, + &id, + ExpirationPolicy::TimeToIdle(tti), + Some(old_deadline), + ) + .await?; + + // First read detects the stale TTI and triggers `put_tombstone_row`. + let TieredMetadata::Tombstone(_) = backend.get_tiered_metadata(&id).await? else { + panic!("expected tombstone"); + }; + + // After the bump, the row is rewritten with a fresh timestamp (≈ now + TTI). + let Some(RowData::Tombstone { + time_expires: Some(new_deadline), + .. + }) = backend.read_row(&path, None, "test-verify").await? + else { + panic!("expected tombstone row after bump"); + }; - // Object preserved. - assert!(backend.get_object(&id).await?.is_some()); + assert!( + new_deadline > old_deadline, + "TTI bump should extend tombstone expiry: {old_deadline:?} -> {new_deadline:?}" + ); Ok(()) } - /// Legacy empty-redirect tombstone (`r=b""`) is matched when `expected=Some(id)`. + /// Legacy tombstones are handled correctly by all conditional write operations. + /// + /// Covers: `put_non_tombstone`, `delete_non_tombstone`, CAS-delete for both the + /// legacy-metadata format and the empty-redirect format. #[tokio::test] - async fn test_swap_legacy_empty_redirect() -> Result<()> { + async fn test_legacy_tombstone_conditional_ops() -> Result<()> { let backend = create_test_backend().await?; + + // put_non_tombstone returns Some(target == id) for a legacy tombstone. let id = make_id(); + write_legacy_tombstone(&backend, &id, ExpirationPolicy::Manual, None).await?; + let t_opt = backend + .put_non_tombstone(&id, &Metadata::default(), Bytes::new()) + .await?; + assert_eq!(t_opt.map(|t| t.target).as_ref(), Some(&id)); - write_empty_redirect_tombstone(&backend, &id).await?; + // delete_non_tombstone returns Some(target == id) for a legacy tombstone. + let id = make_id(); + write_legacy_tombstone(&backend, &id, ExpirationPolicy::Manual, None).await?; + let t_opt = backend.delete_non_tombstone(&id).await?; + assert_eq!(t_opt.map(|t| t.target).as_ref(), Some(&id)); - // Expected target = id (the legacy fallback resolves to hv_id). + // CAS-delete succeeds on a legacy-metadata tombstone (target resolves to hv_id). + let id = make_id(); + write_legacy_tombstone(&backend, &id, ExpirationPolicy::Manual, None).await?; let deleted = backend .compare_and_write(&id, Some(&id), TieredWrite::Delete) .await?; - assert!(deleted, "should match legacy empty-redirect tombstone"); - + assert!( + deleted, + "CAS-delete must succeed on legacy-metadata tombstone" + ); assert!(matches!( backend.get_tiered_metadata(&id).await?, TieredMetadata::NotFound )); - Ok(()) - } - - /// Legacy metadata-format tombstone is matched when `expected=Some(id)`. - #[tokio::test] - async fn test_swap_legacy_metadata_format() -> Result<()> { - let backend = create_test_backend().await?; + // CAS-delete succeeds on an empty-redirect tombstone (target resolves to hv_id). let id = make_id(); - - write_legacy_tombstone(&backend, &id, ExpirationPolicy::Manual, None).await?; - - // Legacy tombstones resolve to hv_id, so expected target = id. + write_empty_redirect_tombstone(&backend, &id).await?; let deleted = backend .compare_and_write(&id, Some(&id), TieredWrite::Delete) .await?; - assert!(deleted, "should match legacy-metadata tombstone"); - + assert!( + deleted, + "CAS-delete must succeed on empty-redirect tombstone" + ); assert!(matches!( backend.get_tiered_metadata(&id).await?, TieredMetadata::NotFound @@ -2011,36 +2078,7 @@ mod tests { Ok(()) } - /// The redirect pointer stored in the `r` column survives a Bigtable write and read: - /// `target` on write equals `target` on read, and the parsed `ObjectId` matches. - #[tokio::test] - async fn test_redirect_pointer_round_trip() -> Result<()> { - let backend = create_test_backend().await?; - - // Create different IDs for HV and LT - let hv_id = make_id(); - let lt_id = ObjectId::random(hv_id.context().clone()); - let tombstone = Tombstone { - target: lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }; - - backend - .compare_and_write(&hv_id, None, TieredWrite::Tombstone(tombstone)) - .await?; - - match backend.get_tiered_metadata(&hv_id).await? { - TieredMetadata::Tombstone(t) => assert_eq!(t.target, lt_id, "target must match"), - other => panic!("expected tombstone, got {other:?}"), - } - match backend.get_tiered_object(&hv_id).await? { - TieredGet::Tombstone(t) => assert_eq!(t.target, lt_id, "target must match"), - other => panic!("expected tombstone, got {other:?}"), - } - - Ok(()) - } - + /// An empty `r` value falls back to the HV id when resolving the tombstone target. #[tokio::test] async fn test_empty_redirect_falls_back_to_hv_id() -> Result<()> { let backend = create_test_backend().await?; @@ -2048,7 +2086,7 @@ mod tests { write_empty_redirect_tombstone(&backend, &id).await?; match backend.get_tiered_metadata(&id).await? { - TieredMetadata::Tombstone(t) => assert_eq!(t.target, id, "must use id"), + TieredMetadata::Tombstone(t) => assert_eq!(t.target, id, "must fall back to hv_id"), other => panic!("expected tombstone, got {other:?}"), } From f2338591b7d8657c097062e233a6122a33dd08b2 Mon Sep 17 00:00:00 2001 From: Jan Michael Auer Date: Fri, 20 Mar 2026 11:26:57 +0100 Subject: [PATCH 2/5] Improve test formatting --- objectstore-service/src/backend/bigtable.rs | 328 +++++++------------- 1 file changed, 120 insertions(+), 208 deletions(-) diff --git a/objectstore-service/src/backend/bigtable.rs b/objectstore-service/src/backend/bigtable.rs index 2137b26b..9b4125bd 100644 --- a/objectstore-service/src/backend/bigtable.rs +++ b/objectstore-service/src/backend/bigtable.rs @@ -1177,18 +1177,15 @@ mod tests { (FAMILY_GC, timestamp as i64 * 1000) }; - backend - .mutate( - id.as_storage_path().to_string().into_bytes(), - [mutation::Mutation::SetCell(mutation::SetCell { - family_name: family.to_owned(), - column_qualifier: COLUMN_METADATA.to_owned(), - timestamp_micros, - value: meta.into_bytes(), - })], - "test-setup", - ) - .await?; + let path = id.as_storage_path().to_string().into_bytes(); + let mutations = [mutation::Mutation::SetCell(mutation::SetCell { + family_name: family.to_owned(), + column_qualifier: COLUMN_METADATA.to_owned(), + timestamp_micros, + value: meta.into_bytes(), + })]; + + backend.mutate(path, mutations, "test-setup").await?; Ok(()) } @@ -1338,36 +1335,33 @@ mod tests { .put_object(&id1, &metadata, stream::single("hello, world")) .await?; let path1 = id1.as_storage_path().to_string().into_bytes(); - backend - .mutate( - path1, - [ - mutation::Mutation::DeleteFromRow(mutation::DeleteFromRow {}), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_PAYLOAD.to_owned(), - timestamp_micros: old_micros, - value: b"hello, world".to_vec(), - }), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_METADATA.to_owned(), - timestamp_micros: old_micros, - value: serde_json::to_vec(&metadata).unwrap(), - }), - ], - "test-setup", - ) - .await?; + let mutations = [ + mutation::Mutation::DeleteFromRow(mutation::DeleteFromRow {}), + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_GC.to_owned(), + column_qualifier: COLUMN_PAYLOAD.to_owned(), + timestamp_micros: old_micros, + value: b"hello, world".to_vec(), + }), + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_GC.to_owned(), + column_qualifier: COLUMN_METADATA.to_owned(), + timestamp_micros: old_micros, + value: serde_json::to_vec(&metadata).unwrap(), + }), + ]; + backend.mutate(path1, mutations, "test-setup").await?; + // get_object reads the stale row, triggers bump, and returns the pre-bump metadata. let (pre_obj_meta, _) = backend.get_object(&id1).await?.unwrap(); let pre_obj_expiry = pre_obj_meta.time_expires.unwrap(); + // A second get_metadata reads the freshly bumped row. let post_obj_meta = backend.get_metadata(&id1).await?.unwrap(); let post_obj_expiry = post_obj_meta.time_expires.unwrap(); assert!( post_obj_expiry > pre_obj_expiry, - "get_object bump should extend expiry: {pre_obj_expiry:?} -> {post_obj_expiry:?}" + "bump should extend expiry" ); // Sub-sequence 2: get_metadata triggers bump (loaded=false path). @@ -1376,37 +1370,32 @@ mod tests { .put_object(&id2, &metadata, stream::single("hello, world")) .await?; let path2 = id2.as_storage_path().to_string().into_bytes(); - backend - .mutate( - path2, - [ - mutation::Mutation::DeleteFromRow(mutation::DeleteFromRow {}), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_PAYLOAD.to_owned(), - timestamp_micros: old_micros, - value: b"hello, world".to_vec(), - }), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_METADATA.to_owned(), - timestamp_micros: old_micros, - value: serde_json::to_vec(&metadata).unwrap(), - }), - ], - "test-setup", - ) - .await?; + let mutations = [ + mutation::Mutation::DeleteFromRow(mutation::DeleteFromRow {}), + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_GC.to_owned(), + column_qualifier: COLUMN_PAYLOAD.to_owned(), + timestamp_micros: old_micros, + value: b"hello, world".to_vec(), + }), + mutation::Mutation::SetCell(mutation::SetCell { + family_name: FAMILY_GC.to_owned(), + column_qualifier: COLUMN_METADATA.to_owned(), + timestamp_micros: old_micros, + value: serde_json::to_vec(&metadata).unwrap(), + }), + ]; + backend.mutate(path2, mutations, "test-setup").await?; + // First get_metadata sees the stale row and triggers a bump. let pre_meta = backend.get_metadata(&id2).await?.unwrap(); let pre_expiry = pre_meta.time_expires.unwrap(); + // Second get_metadata reads the freshly bumped row. let post_meta = backend.get_metadata(&id2).await?.unwrap(); let post_expiry = post_meta.time_expires.unwrap(); - assert!( - post_expiry > pre_expiry, - "get_metadata bump should extend expiry: {pre_expiry:?} -> {post_expiry:?}" - ); + assert!(post_expiry > pre_expiry, "bump should extend expiry"); + // Payload must be intact after the loaded=false bump (which re-fetches the payload). let (_, stream) = backend.get_object(&id2).await?.unwrap(); let payload = stream::read_to_vec(stream).await?; @@ -1432,21 +1421,12 @@ mod tests { // A freshly written object has time_expires ≈ now + 2d, well outside the bump // window (now + 2d - 1d = now + 1d). No bump should occur. - let first_expiry = backend - .get_metadata(&id) - .await? - .unwrap() - .time_expires - .unwrap(); - let second_expiry = backend - .get_metadata(&id) - .await? - .unwrap() - .time_expires - .unwrap(); + let first = backend.get_metadata(&id).await?.unwrap(); + let second = backend.get_metadata(&id).await?.unwrap(); assert_eq!( - first_expiry, second_expiry, + first.time_expires.unwrap(), + second.time_expires.unwrap(), "fresh TTI object must not be bumped" ); @@ -1535,7 +1515,7 @@ mod tests { panic!("expected TieredGet::Object"); }; let obj_payload = stream::read_to_vec(obj_stream).await?; - assert_eq!(str::from_utf8(&obj_payload).unwrap(), "tiered payload"); + assert_eq!(obj_payload, b"tiered payload"); assert_eq!(obj_meta.content_type, put_meta.content_type); assert_eq!(obj_meta.custom, put_meta.custom); @@ -1548,32 +1528,20 @@ mod tests { // tombstone let hv_id = make_id(); let lt_id = ObjectId::random(hv_id.context().clone()); - backend - .compare_and_write( - &hv_id, - None, - TieredWrite::Tombstone(Tombstone { - target: lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }), - ) - .await?; - - let TieredGet::Tombstone(get_t) = backend.get_tiered_object(&hv_id).await? else { - panic!("expected TieredGet::Tombstone"); - }; - assert_eq!( - get_t.target, lt_id, - "get_tiered_object target must match lt_id" - ); - - let TieredMetadata::Tombstone(meta_t) = backend.get_tiered_metadata(&hv_id).await? else { - panic!("expected TieredMetadata::Tombstone"); - }; - assert_eq!( - meta_t.target, lt_id, - "get_tiered_metadata target must match lt_id" - ); + let write = TieredWrite::Tombstone(Tombstone { + target: lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }); + backend.compare_and_write(&hv_id, None, write).await?; + + match backend.get_tiered_object(&hv_id).await? { + TieredGet::Tombstone(get_t) => assert_eq!(get_t.target, lt_id,), + other => panic!("expected TieredGet::Tombstone, got {other:?}"), + } + match backend.get_tiered_metadata(&hv_id).await? { + TieredMetadata::Tombstone(meta_t) => assert_eq!(meta_t.target, lt_id,), + other => panic!("expected TieredMetadata::Tombstone, got {other:?}"), + } Ok(()) } @@ -1611,24 +1579,16 @@ mod tests { // tombstone: put_non_tombstone returns Some(Tombstone) and leaves tombstone intact. let hv_id = make_id(); let lt_id = ObjectId::random(hv_id.context().clone()); - backend - .compare_and_write( - &hv_id, - None, - TieredWrite::Tombstone(Tombstone { - target: lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }), - ) - .await?; + let write = TieredWrite::Tombstone(Tombstone { + target: lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }); + backend.compare_and_write(&hv_id, None, write).await?; let result = backend .put_non_tombstone(&hv_id, &Metadata::default(), Bytes::new()) .await?; let returned = result.expect("expected Some(Tombstone) when row is a tombstone"); - assert_eq!( - returned.target, lt_id, - "returned tombstone target must be lt_id" - ); + assert_eq!(returned.target, lt_id); assert!( matches!( backend.get_tiered_metadata(&hv_id).await?, @@ -1666,16 +1626,11 @@ mod tests { // tombstone let id = make_id(); - backend - .compare_and_write( - &id, - None, - TieredWrite::Tombstone(Tombstone { - target: id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }), - ) - .await?; + let write = TieredWrite::Tombstone(Tombstone { + target: id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }); + backend.compare_and_write(&id, None, write).await?; let tombstone = backend .delete_non_tombstone(&id) .await? @@ -1753,56 +1708,39 @@ mod tests { let wrong_lt_id = ObjectId::random(hv_id.context().clone()); let new_lt_id = ObjectId::random(hv_id.context().clone()); - backend - .compare_and_write( - &hv_id, - None, - TieredWrite::Tombstone(Tombstone { - target: old_lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }), - ) - .await?; + let write = TieredWrite::Tombstone(Tombstone { + target: old_lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }); + backend.compare_and_write(&hv_id, None, write).await?; // Wrong target: CAS fails, tombstone unchanged. + let write = TieredWrite::Tombstone(Tombstone { + target: new_lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }); let swapped = backend - .compare_and_write( - &hv_id, - Some(&wrong_lt_id), - TieredWrite::Tombstone(Tombstone { - target: new_lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }), - ) + .compare_and_write(&hv_id, Some(&wrong_lt_id), write) .await?; assert!(!swapped, "expected CAS failure due to wrong target"); - let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&hv_id).await? else { - panic!("expected tombstone"); - }; - assert_eq!( - t.target, old_lt_id, - "target must be unchanged after mismatch" - ); + match backend.get_tiered_metadata(&hv_id).await? { + TieredMetadata::Tombstone(t) => assert_eq!(t.target, old_lt_id), + other => panic!("expected tombstone, got {other:?}"), + } // Correct target: CAS succeeds, target updated. + let write = TieredWrite::Tombstone(Tombstone { + target: new_lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }); let swapped = backend - .compare_and_write( - &hv_id, - Some(&old_lt_id), - TieredWrite::Tombstone(Tombstone { - target: new_lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }), - ) + .compare_and_write(&hv_id, Some(&old_lt_id), write) .await?; assert!(swapped, "expected CAS success with correct target"); - let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&hv_id).await? else { - panic!("expected tombstone"); - }; - assert_eq!( - t.target, new_lt_id, - "target must be updated after successful swap" - ); + match backend.get_tiered_metadata(&hv_id).await? { + TieredMetadata::Tombstone(t) => assert_eq!(t.target, new_lt_id), + other => panic!("expected tombstone, got {other:?}"), + } Ok(()) } @@ -1816,24 +1754,16 @@ mod tests { let lt_id = ObjectId::random(id.context().clone()); let wrong_id = ObjectId::random(id.context().clone()); - backend - .compare_and_write( - &id, - None, - TieredWrite::Tombstone(Tombstone { - target: lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }), - ) - .await?; + let write = TieredWrite::Tombstone(Tombstone { + target: lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }); + backend.compare_and_write(&id, None, write).await?; // Wrong target: CAS fails, tombstone intact. + let write = TieredWrite::Object(Metadata::default(), Bytes::new()); let swapped = backend - .compare_and_write( - &id, - Some(&wrong_id), - TieredWrite::Object(Metadata::default(), Bytes::new()), - ) + .compare_and_write(&id, Some(&wrong_id), write) .await?; assert!(!swapped, "expected CAS failure with wrong target"); assert!(matches!( @@ -1843,13 +1773,8 @@ mod tests { // Correct target: CAS succeeds, row becomes an inline object. let payload = Bytes::from_static(b"hello inline"); - let swapped = backend - .compare_and_write( - &id, - Some(<_id), - TieredWrite::Object(Metadata::default(), payload.clone()), - ) - .await?; + let write = TieredWrite::Object(Metadata::default(), payload.clone()); + let swapped = backend.compare_and_write(&id, Some(<_id), write).await?; assert!(swapped, "expected CAS success with correct target"); let TieredGet::Object(_, stream) = backend.get_tiered_object(&id).await? else { panic!("expected inline object after swap"); @@ -1866,13 +1791,8 @@ mod tests { let id = make_id(); let payload = Bytes::from_static(b"cas object"); - let committed = backend - .compare_and_write( - &id, - None, - TieredWrite::Object(Metadata::default(), payload.clone()), - ) - .await?; + let write = TieredWrite::Object(Metadata::default(), payload.clone()); + let committed = backend.compare_and_write(&id, None, write).await?; assert!(committed, "expected CAS success on empty row"); let TieredGet::Object(_, stream) = backend.get_tiered_object(&id).await? else { @@ -1893,16 +1813,11 @@ mod tests { let lt_id = ObjectId::random(id.context().clone()); let wrong_id = ObjectId::random(id.context().clone()); - backend - .compare_and_write( - &id, - None, - TieredWrite::Tombstone(Tombstone { - target: lt_id.clone(), - expiration_policy: ExpirationPolicy::Manual, - }), - ) - .await?; + let write = TieredWrite::Tombstone(Tombstone { + target: lt_id.clone(), + expiration_policy: ExpirationPolicy::Manual, + }); + backend.compare_and_write(&id, None, write).await?; // Wrong target: fails, row preserved. let deleted = backend @@ -2007,12 +1922,9 @@ mod tests { }; // After the bump, the row is rewritten with a fresh timestamp (≈ now + TTI). - let Some(RowData::Tombstone { - time_expires: Some(new_deadline), - .. - }) = backend.read_row(&path, None, "test-verify").await? - else { - panic!("expected tombstone row after bump"); + let new_deadline = match backend.read_row(&path, None, "test-verify").await? { + Some(RowData::Tombstone { time_expires, .. }) => time_expires.unwrap(), + _ => panic!("expected tombstone row after bump"), }; assert!( From a24360eafccaf2d75e0c0cb4082d25e59aefed88 Mon Sep 17 00:00:00 2001 From: Jan Michael Auer Date: Fri, 20 Mar 2026 11:29:52 +0100 Subject: [PATCH 3/5] Simplify tti_bump tests --- objectstore-service/src/backend/bigtable.rs | 48 ++++----------------- 1 file changed, 9 insertions(+), 39 deletions(-) diff --git a/objectstore-service/src/backend/bigtable.rs b/objectstore-service/src/backend/bigtable.rs index 9b4125bd..cd57073d 100644 --- a/objectstore-service/src/backend/bigtable.rs +++ b/objectstore-service/src/backend/bigtable.rs @@ -1321,13 +1321,9 @@ mod tests { ..Default::default() }; - // Compute a stale timestamp just inside the bump window. - let old_deadline = SystemTime::now() + tti - TTI_DEBOUNCE - Duration::from_secs(60); - let old_micros = old_deadline - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_millis() as i64 - * 1000; + // Compute a fake `now` such that build_write_mutations produces a stale timestamp + // just inside the bump window: expire_at = fake_now + tti = now - TTI_DEBOUNCE - 60s. + let past_now = SystemTime::now() - TTI_DEBOUNCE - Duration::from_secs(60); // Sub-sequence 1: get_object triggers bump (loaded=true path). let id1 = make_id(); @@ -1335,22 +1331,9 @@ mod tests { .put_object(&id1, &metadata, stream::single("hello, world")) .await?; let path1 = id1.as_storage_path().to_string().into_bytes(); - let mutations = [ - mutation::Mutation::DeleteFromRow(mutation::DeleteFromRow {}), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_PAYLOAD.to_owned(), - timestamp_micros: old_micros, - value: b"hello, world".to_vec(), - }), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_METADATA.to_owned(), - timestamp_micros: old_micros, - value: serde_json::to_vec(&metadata).unwrap(), - }), - ]; - backend.mutate(path1, mutations, "test-setup").await?; + let mutations1 = + build_write_mutations(&metadata, b"hello, world".to_vec(), past_now).unwrap(); + backend.mutate(path1, mutations1, "test-setup").await?; // get_object reads the stale row, triggers bump, and returns the pre-bump metadata. let (pre_obj_meta, _) = backend.get_object(&id1).await?.unwrap(); @@ -1370,22 +1353,9 @@ mod tests { .put_object(&id2, &metadata, stream::single("hello, world")) .await?; let path2 = id2.as_storage_path().to_string().into_bytes(); - let mutations = [ - mutation::Mutation::DeleteFromRow(mutation::DeleteFromRow {}), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_PAYLOAD.to_owned(), - timestamp_micros: old_micros, - value: b"hello, world".to_vec(), - }), - mutation::Mutation::SetCell(mutation::SetCell { - family_name: FAMILY_GC.to_owned(), - column_qualifier: COLUMN_METADATA.to_owned(), - timestamp_micros: old_micros, - value: serde_json::to_vec(&metadata).unwrap(), - }), - ]; - backend.mutate(path2, mutations, "test-setup").await?; + let mutations2 = + build_write_mutations(&metadata, b"hello, world".to_vec(), past_now).unwrap(); + backend.mutate(path2, mutations2, "test-setup").await?; // First get_metadata sees the stale row and triggers a bump. let pre_meta = backend.get_metadata(&id2).await?.unwrap(); From 41d8a669b3614231eb3593abb5643a2aa225a0d0 Mon Sep 17 00:00:00 2001 From: Jan Michael Auer Date: Fri, 20 Mar 2026 11:54:36 +0100 Subject: [PATCH 4/5] Use simpler test helpers --- objectstore-service/src/backend/bigtable.rs | 135 ++++++++++---------- 1 file changed, 67 insertions(+), 68 deletions(-) diff --git a/objectstore-service/src/backend/bigtable.rs b/objectstore-service/src/backend/bigtable.rs index cd57073d..e5c23e4d 100644 --- a/objectstore-service/src/backend/bigtable.rs +++ b/objectstore-service/src/backend/bigtable.rs @@ -1151,6 +1151,31 @@ mod tests { }) } + async fn create_object( + backend: &BigTableBackend, + id: &ObjectId, + metadata: &Metadata, + payload: &[u8], + now: SystemTime, + ) -> Result<()> { + let path = id.as_storage_path().to_string().into_bytes(); + let mutations = build_write_mutations(metadata, payload.to_vec(), now)?; + backend.mutate(path, mutations, "test-setup").await?; + Ok(()) + } + + async fn create_tombstone( + backend: &BigTableBackend, + id: &ObjectId, + tombstone: &Tombstone, + now: SystemTime, + ) -> Result<()> { + let path = id.as_storage_path().to_string().into_bytes(); + let mutations = build_tombstone_mutations(tombstone, now)?; + backend.mutate(path, mutations, "test-setup").await?; + Ok(()) + } + /// Writes a legacy-format tombstone row directly into Bigtable. async fn write_legacy_tombstone( backend: &BigTableBackend, @@ -1238,7 +1263,7 @@ mod tests { let (obj_meta, stream) = backend.get_object(&id).await?.unwrap(); let payload = stream::read_to_vec(stream).await?; - assert_eq!(str::from_utf8(&payload).unwrap(), "hello, world"); + assert_eq!(payload, b"hello, world"); assert_eq!(obj_meta.content_type, metadata.content_type); assert_eq!(obj_meta.custom, metadata.custom); @@ -1271,9 +1296,7 @@ mod tests { custom: BTreeMap::from_iter([("invalid".into(), "invalid".into())]), ..Default::default() }; - backend - .put_object(&id, &first_metadata, stream::single("hello")) - .await?; + create_object(&backend, &id, &first_metadata, b"hello", SystemTime::now()).await?; let second_metadata = Metadata { custom: BTreeMap::from_iter([("hello".into(), "world".into())]), @@ -1285,7 +1308,7 @@ mod tests { let (meta, stream) = backend.get_object(&id).await?.unwrap(); let payload = stream::read_to_vec(stream).await?; - assert_eq!(str::from_utf8(&payload).unwrap(), "world"); + assert_eq!(payload, b"world"); assert_eq!(meta.custom, second_metadata.custom); Ok(()) @@ -1296,9 +1319,8 @@ mod tests { let backend = create_test_backend().await?; let id = make_id(); - backend - .put_object(&id, &Metadata::default(), stream::single("hello, world")) - .await?; + let metadata = Metadata::default(); + create_object(&backend, &id, &metadata, b"hello", SystemTime::now()).await?; backend.delete_object(&id).await?; assert!(backend.get_object(&id).await?.is_none()); @@ -1321,19 +1343,13 @@ mod tests { ..Default::default() }; - // Compute a fake `now` such that build_write_mutations produces a stale timestamp - // just inside the bump window: expire_at = fake_now + tti = now - TTI_DEBOUNCE - 60s. + // Pass a backdated `now` so the written expiry is inside the bump window: + // expire_at = past_now + tti = now - TTI_DEBOUNCE - 60s (stale but not yet expired). let past_now = SystemTime::now() - TTI_DEBOUNCE - Duration::from_secs(60); // Sub-sequence 1: get_object triggers bump (loaded=true path). let id1 = make_id(); - backend - .put_object(&id1, &metadata, stream::single("hello, world")) - .await?; - let path1 = id1.as_storage_path().to_string().into_bytes(); - let mutations1 = - build_write_mutations(&metadata, b"hello, world".to_vec(), past_now).unwrap(); - backend.mutate(path1, mutations1, "test-setup").await?; + create_object(&backend, &id1, &metadata, b"hello, world", past_now).await?; // get_object reads the stale row, triggers bump, and returns the pre-bump metadata. let (pre_obj_meta, _) = backend.get_object(&id1).await?.unwrap(); @@ -1349,13 +1365,7 @@ mod tests { // Sub-sequence 2: get_metadata triggers bump (loaded=false path). let id2 = make_id(); - backend - .put_object(&id2, &metadata, stream::single("hello, world")) - .await?; - let path2 = id2.as_storage_path().to_string().into_bytes(); - let mutations2 = - build_write_mutations(&metadata, b"hello, world".to_vec(), past_now).unwrap(); - backend.mutate(path2, mutations2, "test-setup").await?; + create_object(&backend, &id2, &metadata, b"hello, world", past_now).await?; // First get_metadata sees the stale row and triggers a bump. let pre_meta = backend.get_metadata(&id2).await?.unwrap(); @@ -1369,7 +1379,7 @@ mod tests { // Payload must be intact after the loaded=false bump (which re-fetches the payload). let (_, stream) = backend.get_object(&id2).await?.unwrap(); let payload = stream::read_to_vec(stream).await?; - assert_eq!(&payload, b"hello, world"); + assert_eq!(payload, b"hello, world"); Ok(()) } @@ -1385,9 +1395,7 @@ mod tests { expiration_policy: ExpirationPolicy::TimeToIdle(tti), ..Default::default() }; - backend - .put_object(&id, &metadata, stream::single("hello, world")) - .await?; + create_object(&backend, &id, &metadata, b"hello, world", SystemTime::now()).await?; // A freshly written object has time_expires ≈ now + 2d, well outside the bump // window (now + 2d - 1d = now + 1d). No bump should occur. @@ -1417,9 +1425,7 @@ mod tests { expiration_policy: ExpirationPolicy::TimeToLive(Duration::from_secs(0)), ..Default::default() }; - backend - .put_object(&id, &metadata, stream::single("hello, world")) - .await?; + create_object(&backend, &id, &metadata, b"hello, world", SystemTime::now()).await?; assert!(backend.get_object(&id).await?.is_none()); @@ -1438,9 +1444,7 @@ mod tests { expiration_policy: ExpirationPolicy::TimeToIdle(Duration::from_secs(0)), ..Default::default() }; - backend - .put_object(&id, &metadata, stream::single("hello, world")) - .await?; + create_object(&backend, &id, &metadata, b"hello, world", SystemTime::now()).await?; assert!(backend.get_object(&id).await?.is_none()); @@ -1477,15 +1481,13 @@ mod tests { custom: BTreeMap::from_iter([("k".into(), "v".into())]), ..Default::default() }; - backend - .put_object(&id, &put_meta, stream::single("tiered payload")) - .await?; + create_object(&backend, &id, &put_meta, b"payload", SystemTime::now()).await?; let TieredGet::Object(obj_meta, obj_stream) = backend.get_tiered_object(&id).await? else { panic!("expected TieredGet::Object"); }; let obj_payload = stream::read_to_vec(obj_stream).await?; - assert_eq!(obj_payload, b"tiered payload"); + assert_eq!(obj_payload, b"payload"); assert_eq!(obj_meta.content_type, put_meta.content_type); assert_eq!(obj_meta.custom, put_meta.custom); @@ -1498,11 +1500,11 @@ mod tests { // tombstone let hv_id = make_id(); let lt_id = ObjectId::random(hv_id.context().clone()); - let write = TieredWrite::Tombstone(Tombstone { + let tombstone = Tombstone { target: lt_id.clone(), expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&hv_id, None, write).await?; + }; + create_tombstone(&backend, &hv_id, &tombstone, SystemTime::now()).await?; match backend.get_tiered_object(&hv_id).await? { TieredGet::Tombstone(get_t) => assert_eq!(get_t.target, lt_id,), @@ -1527,8 +1529,9 @@ mod tests { // empty: put_non_tombstone on absent row succeeds and makes object readable. let id = make_id(); + let metadata = Metadata::default(); let result = backend - .put_non_tombstone(&id, &Metadata::default(), Bytes::from_static(b"first")) + .put_non_tombstone(&id, &metadata, Bytes::from_static(b"first")) .await?; assert_eq!(result, None, "expected None on empty row"); let (_, stream) = backend.get_object(&id).await?.unwrap(); @@ -1536,11 +1539,9 @@ mod tests { // object: put_non_tombstone on existing object replaces payload, returns None. let id = make_id(); - backend - .put_object(&id, &Metadata::default(), stream::single("old")) - .await?; + create_object(&backend, &id, &metadata, b"old", SystemTime::now()).await?; let result = backend - .put_non_tombstone(&id, &Metadata::default(), Bytes::from_static(b"new")) + .put_non_tombstone(&id, &metadata, Bytes::from_static(b"new")) .await?; assert_eq!(result, None, "expected None when overwriting object"); let (_, stream) = backend.get_object(&id).await?.unwrap(); @@ -1549,13 +1550,13 @@ mod tests { // tombstone: put_non_tombstone returns Some(Tombstone) and leaves tombstone intact. let hv_id = make_id(); let lt_id = ObjectId::random(hv_id.context().clone()); - let write = TieredWrite::Tombstone(Tombstone { + let tombstone = Tombstone { target: lt_id.clone(), expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&hv_id, None, write).await?; + }; + create_tombstone(&backend, &hv_id, &tombstone, SystemTime::now()).await?; let result = backend - .put_non_tombstone(&hv_id, &Metadata::default(), Bytes::new()) + .put_non_tombstone(&hv_id, &metadata, Bytes::new()) .await?; let returned = result.expect("expected Some(Tombstone) when row is a tombstone"); assert_eq!(returned.target, lt_id); @@ -1588,19 +1589,18 @@ mod tests { // object let id = make_id(); - backend - .put_object(&id, &Metadata::default(), stream::single("hello, world")) - .await?; + let metadata = Metadata::default(); + create_object(&backend, &id, &metadata, b"hello, world", SystemTime::now()).await?; assert_eq!(backend.delete_non_tombstone(&id).await?, None); assert!(backend.get_object(&id).await?.is_none()); // tombstone let id = make_id(); - let write = TieredWrite::Tombstone(Tombstone { + let tombstone = Tombstone { target: id.clone(), expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&id, None, write).await?; + }; + create_tombstone(&backend, &id, &tombstone, SystemTime::now()).await?; let tombstone = backend .delete_non_tombstone(&id) .await? @@ -1678,11 +1678,11 @@ mod tests { let wrong_lt_id = ObjectId::random(hv_id.context().clone()); let new_lt_id = ObjectId::random(hv_id.context().clone()); - let write = TieredWrite::Tombstone(Tombstone { + let tombstone = Tombstone { target: old_lt_id.clone(), expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&hv_id, None, write).await?; + }; + create_tombstone(&backend, &hv_id, &tombstone, SystemTime::now()).await?; // Wrong target: CAS fails, tombstone unchanged. let write = TieredWrite::Tombstone(Tombstone { @@ -1724,11 +1724,11 @@ mod tests { let lt_id = ObjectId::random(id.context().clone()); let wrong_id = ObjectId::random(id.context().clone()); - let write = TieredWrite::Tombstone(Tombstone { + let tombstone = Tombstone { target: lt_id.clone(), expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&id, None, write).await?; + }; + create_tombstone(&backend, &id, &tombstone, SystemTime::now()).await?; // Wrong target: CAS fails, tombstone intact. let write = TieredWrite::Object(Metadata::default(), Bytes::new()); @@ -1783,11 +1783,11 @@ mod tests { let lt_id = ObjectId::random(id.context().clone()); let wrong_id = ObjectId::random(id.context().clone()); - let write = TieredWrite::Tombstone(Tombstone { + let tombstone = Tombstone { target: lt_id.clone(), expiration_policy: ExpirationPolicy::Manual, - }); - backend.compare_and_write(&id, None, write).await?; + }; + create_tombstone(&backend, &id, &tombstone, SystemTime::now()).await?; // Wrong target: fails, row preserved. let deleted = backend @@ -1812,9 +1812,8 @@ mod tests { // Regular object: CAS-delete with Some(target) returns false, object preserved. let id2 = make_id(); let fake_lt_id = ObjectId::random(id2.context().clone()); - backend - .put_object(&id2, &Metadata::default(), stream::single("data")) - .await?; + let metadata = Metadata::default(); + create_object(&backend, &id2, &metadata, b"data", SystemTime::now()).await?; let deleted = backend .compare_and_write(&id2, Some(&fake_lt_id), TieredWrite::Delete) .await?; From 1a1e683c4868cba1818cb91af1ca7e8ad1b262b9 Mon Sep 17 00:00:00 2001 From: Jan Michael Auer Date: Fri, 20 Mar 2026 12:05:06 +0100 Subject: [PATCH 5/5] test(service): Verify redirect pointer round-trip in cas_create_tombstone Use distinct hv_id and lt_id so the target assertion proves the `r` column serialises and deserialises correctly, rather than testing the degenerate case where target == hv_id (which passes even if the fallback path is taken by mistake). Also fixes a spurious trailing comma in test_tiered_get. --- objectstore-service/src/backend/bigtable.rs | 28 +++++++++++---------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/objectstore-service/src/backend/bigtable.rs b/objectstore-service/src/backend/bigtable.rs index e5c23e4d..68c0b1e6 100644 --- a/objectstore-service/src/backend/bigtable.rs +++ b/objectstore-service/src/backend/bigtable.rs @@ -1507,7 +1507,7 @@ mod tests { create_tombstone(&backend, &hv_id, &tombstone, SystemTime::now()).await?; match backend.get_tiered_object(&hv_id).await? { - TieredGet::Tombstone(get_t) => assert_eq!(get_t.target, lt_id,), + TieredGet::Tombstone(get_t) => assert_eq!(get_t.target, lt_id), other => panic!("expected TieredGet::Tombstone, got {other:?}"), } match backend.get_tiered_metadata(&hv_id).await? { @@ -1626,42 +1626,44 @@ mod tests { async fn test_cas_create_tombstone() -> Result<()> { let backend = create_test_backend().await?; - let id = make_id(); + let hv_id = make_id(); + let lt_id = ObjectId::random(hv_id.context().clone()); let expiration_policy = ExpirationPolicy::TimeToLive(Duration::from_secs(3600)); let tombstone = Tombstone { - target: id.clone(), + target: lt_id.clone(), expiration_policy, }; // First create succeeds. let committed = backend - .compare_and_write(&id, None, TieredWrite::Tombstone(tombstone.clone())) + .compare_and_write(&hv_id, None, TieredWrite::Tombstone(tombstone.clone())) .await?; assert!(committed, "expected CAS success on empty row"); - // Tiered reads must see the tombstone with correct policy. - let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&id).await? else { + // Tiered reads must see the tombstone with correct target and policy. + let TieredMetadata::Tombstone(t) = backend.get_tiered_metadata(&hv_id).await? else { panic!("expected TieredMetadata::Tombstone"); }; + assert_eq!(t.target, lt_id, "target must round-trip via r column"); assert_eq!(t.expiration_policy, expiration_policy); - assert!(matches!( - backend.get_tiered_object(&id).await?, - TieredGet::Tombstone(_) - )); + match backend.get_tiered_object(&hv_id).await? { + TieredGet::Tombstone(t) => assert_eq!(t.target, lt_id, "round-trip via r column"), + other => panic!("expected TieredGet::Tombstone, got {other:?}"), + } // Legacy reads must error rather than leak tombstone data. assert!(matches!( - backend.get_object(&id).await, + backend.get_object(&hv_id).await, Err(Error::UnexpectedTombstone) )); assert!(matches!( - backend.get_metadata(&id).await, + backend.get_metadata(&hv_id).await, Err(Error::UnexpectedTombstone) )); // Second create fails: tombstone already exists. let second = backend - .compare_and_write(&id, None, TieredWrite::Tombstone(tombstone)) + .compare_and_write(&hv_id, None, TieredWrite::Tombstone(tombstone)) .await?; assert!(!second, "second create must fail: tombstone already exists");