pinakes-core: add database atomicity tests for DB operations

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I154fe8dc67c56fd21a734d1f984760bf6a6a6964
This commit is contained in:
raf 2026-02-09 15:38:45 +03:00
commit 95527e4bca
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
2 changed files with 260 additions and 0 deletions

View file

@ -97,3 +97,41 @@ pub fn create_test_media_item(path: PathBuf, hash: &str) -> MediaItem {
links_extracted_at: None,
}
}
/// Create a test markdown media item with a given ID
pub fn make_test_markdown_item(id: MediaId) -> MediaItem {
let now = chrono::Utc::now();
MediaItem {
id,
path: format!("/tmp/test_{}.md", id.0).into(),
file_name: format!("test_{}.md", id.0),
media_type: MediaType::Builtin(BuiltinMediaType::Markdown),
content_hash: ContentHash::new(format!("hash_{}", id.0)),
file_size: 1024,
title: Some("Test Note".to_string()),
artist: None,
album: None,
genre: None,
year: None,
duration_secs: None,
description: Some("Test markdown note".to_string()),
thumbnail_path: None,
custom_fields: HashMap::new(),
file_mtime: None,
date_taken: None,
latitude: None,
longitude: None,
camera_make: None,
camera_model: None,
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: now,
updated_at: now,
deleted_at: None,
links_extracted_at: None,
}
}

View file

@ -0,0 +1,222 @@
use pinakes_core::links::extract_links;
use pinakes_core::model::*;
use pinakes_core::storage::StorageBackend;
mod common;
/// Create test markdown content with multiple links
fn create_test_note_content(num_links: usize) -> String {
let mut content = String::from("# Test Note\n\n");
for i in 0..num_links {
content.push_str(&format!("Link {}: [[note_{}]]\n", i, i));
}
content
}
#[tokio::test]
async fn test_save_links_atomicity_success_case() {
// Setup: Create in-memory database
let storage = common::setup().await;
// Create a test note
let note_id = MediaId::new();
let item = common::make_test_markdown_item(note_id);
storage.insert_media(&item).await.unwrap();
// Extract links from test content
let content = create_test_note_content(5);
let links = extract_links(note_id, &content);
assert_eq!(links.len(), 5, "Should extract 5 links");
// Save links (first time - should succeed)
storage.save_markdown_links(note_id, &links).await.unwrap();
// Verify all links were saved
let saved_links = storage.get_outgoing_links(note_id).await.unwrap();
assert_eq!(saved_links.len(), 5, "All 5 links should be saved");
// Update with new links
let new_content = create_test_note_content(3);
let new_links = extract_links(note_id, &new_content);
// Save again (should replace old links)
storage
.save_markdown_links(note_id, &new_links)
.await
.unwrap();
// Verify old links were deleted and new links saved
let updated_links = storage.get_outgoing_links(note_id).await.unwrap();
assert_eq!(
updated_links.len(),
3,
"Should have exactly 3 links after update"
);
}
#[tokio::test]
async fn test_save_links_atomicity_with_valid_data() {
// This test verifies that the transaction commit works correctly
// by saving links multiple times and ensuring consistency
let storage = common::setup().await;
let note_id = MediaId::new();
let item = common::make_test_markdown_item(note_id);
storage.insert_media(&item).await.unwrap();
// First batch of links
let content1 = "[[note1]] and [[note2]]";
let links1 = extract_links(note_id, content1);
storage.save_markdown_links(note_id, &links1).await.unwrap();
let saved1 = storage.get_outgoing_links(note_id).await.unwrap();
assert_eq!(saved1.len(), 2, "First save: 2 links");
// Second batch (replace)
let content2 = "[[note3]] [[note4]] [[note5]]";
let links2 = extract_links(note_id, content2);
storage.save_markdown_links(note_id, &links2).await.unwrap();
let saved2 = storage.get_outgoing_links(note_id).await.unwrap();
assert_eq!(saved2.len(), 3, "Second save: 3 links (old ones deleted)");
// Third batch (empty)
storage.save_markdown_links(note_id, &[]).await.unwrap();
let saved3 = storage.get_outgoing_links(note_id).await.unwrap();
assert_eq!(saved3.len(), 0, "Third save: 0 links (all deleted)");
// Fourth batch (restore some links)
let content4 = "[[final_note]]";
let links4 = extract_links(note_id, content4);
storage.save_markdown_links(note_id, &links4).await.unwrap();
let saved4 = storage.get_outgoing_links(note_id).await.unwrap();
assert_eq!(saved4.len(), 1, "Fourth save: 1 link");
assert_eq!(saved4[0].target_path, "final_note", "Correct link target");
}
#[tokio::test]
async fn test_save_links_idempotency() {
// Verify that saving the same links multiple times is safe
let storage = common::setup().await;
let note_id = MediaId::new();
let item = common::make_test_markdown_item(note_id);
storage.insert_media(&item).await.unwrap();
let content = "[[note_a]] [[note_b]]";
let links = extract_links(note_id, content);
// Save same links 3 times
storage.save_markdown_links(note_id, &links).await.unwrap();
storage.save_markdown_links(note_id, &links).await.unwrap();
storage.save_markdown_links(note_id, &links).await.unwrap();
// Should still have exactly 2 links (not duplicated)
let saved = storage.get_outgoing_links(note_id).await.unwrap();
assert_eq!(
saved.len(),
2,
"Should have exactly 2 links (no duplicates)"
);
}
#[tokio::test]
async fn test_save_links_concurrent_updates() {
// Test that concurrent updates to different notes don't interfere
let storage = common::setup().await;
// Create two different notes
let note1_id = MediaId::new();
let note2_id = MediaId::new();
let item1 = common::make_test_markdown_item(note1_id);
let item2 = common::make_test_markdown_item(note2_id);
storage.insert_media(&item1).await.unwrap();
storage.insert_media(&item2).await.unwrap();
// Save links for both notes
let links1 = extract_links(note1_id, "[[target1]]");
let links2 = extract_links(note2_id, "[[target2]] [[target3]]");
// Execute both saves. We do so in sequence since we can't test true concurrency easily
// ...or so I think. Database tests are annoying.
storage
.save_markdown_links(note1_id, &links1)
.await
.unwrap();
storage
.save_markdown_links(note2_id, &links2)
.await
.unwrap();
// Verify both notes have correct links
let saved1 = storage.get_outgoing_links(note1_id).await.unwrap();
let saved2 = storage.get_outgoing_links(note2_id).await.unwrap();
assert_eq!(saved1.len(), 1, "Note 1 should have 1 link");
assert_eq!(saved2.len(), 2, "Note 2 should have 2 links");
// Update note 1 - should not affect note 2
let new_links1 = extract_links(note1_id, "[[target_new1]] [[target_new2]]");
storage
.save_markdown_links(note1_id, &new_links1)
.await
.unwrap();
// Verify note 1 updated but note 2 unchanged
let updated1 = storage.get_outgoing_links(note1_id).await.unwrap();
let unchanged2 = storage.get_outgoing_links(note2_id).await.unwrap();
assert_eq!(updated1.len(), 2, "Note 1 should have 2 links after update");
assert_eq!(unchanged2.len(), 2, "Note 2 should still have 2 links");
}
#[tokio::test]
async fn test_save_links_with_large_batch() {
// Test atomicity with a large number of links
let storage = common::setup().await;
let note_id = MediaId::new();
let item = common::make_test_markdown_item(note_id);
storage.insert_media(&item).await.unwrap();
// Create note with 100 links
let content = create_test_note_content(100);
let links = extract_links(note_id, &content);
assert_eq!(links.len(), 100, "Should extract 100 links");
// Save all 100 links
storage.save_markdown_links(note_id, &links).await.unwrap();
// Verify all saved
let saved = storage.get_outgoing_links(note_id).await.unwrap();
assert_eq!(saved.len(), 100, "All 100 links should be saved atomically");
// Replace with smaller set
let small_content = create_test_note_content(10);
let small_links = extract_links(note_id, &small_content);
storage
.save_markdown_links(note_id, &small_links)
.await
.unwrap();
// Verify replacement worked
let updated = storage.get_outgoing_links(note_id).await.unwrap();
assert_eq!(
updated.len(),
10,
"Should have exactly 10 links after replacement"
);
}
// XXX: Testing actual transaction rollback on error is difficult without
// mocking the database or injecting failures. The above tests verify that:
// 1. Normal operation is atomic (delete + insert works correctly)
// 2. Updates properly replace old links
// 3. Empty link sets work correctly
// 4. Large batches are handled atomically
// 5. Concurrent operations on different notes don't interfere
//
// The transaction wrapper ensures that if ANY operation fails during
// the DELETE + INSERT sequence, the entire operation rolls back,
// preventing partial states.