mirror of
https://github.com/NotAShelf/stash.git
synced 2026-04-12 22:17:41 +00:00
db: tests for determinism & async ops
Signed-off-by: NotAShelf <raf@notashelf.dev> Change-Id: I2591e607a945c0aaa28a75247fc638436a6a6964
This commit is contained in:
parent
95bf1766ce
commit
cf5b1e8205
2 changed files with 292 additions and 4 deletions
106
src/db/mod.rs
106
src/db/mod.rs
|
|
@ -2047,4 +2047,110 @@ mod tests {
|
||||||
assert_eq!(contents, data.to_vec());
|
assert_eq!(contents, data.to_vec());
|
||||||
assert_eq!(mime, Some("text/plain".to_string()));
|
assert_eq!(mime, Some("text/plain".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fnv1a_hasher_deterministic() {
|
||||||
|
// Same input should produce same hash
|
||||||
|
let data = b"test data";
|
||||||
|
|
||||||
|
let mut hasher1 = Fnv1aHasher::new();
|
||||||
|
hasher1.write(data);
|
||||||
|
let hash1 = hasher1.finish();
|
||||||
|
|
||||||
|
let mut hasher2 = Fnv1aHasher::new();
|
||||||
|
hasher2.write(data);
|
||||||
|
let hash2 = hasher2.finish();
|
||||||
|
|
||||||
|
assert_eq!(hash1, hash2, "FNV-1a should produce deterministic hashes");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fnv1a_hasher_different_input() {
|
||||||
|
// Different inputs should (almost certainly) produce different hashes
|
||||||
|
let data1 = b"test data 1";
|
||||||
|
let data2 = b"test data 2";
|
||||||
|
|
||||||
|
let mut hasher1 = Fnv1aHasher::new();
|
||||||
|
hasher1.write(data1);
|
||||||
|
let hash1 = hasher1.finish();
|
||||||
|
|
||||||
|
let mut hasher2 = Fnv1aHasher::new();
|
||||||
|
hasher2.write(data2);
|
||||||
|
let hash2 = hasher2.finish();
|
||||||
|
|
||||||
|
assert_ne!(
|
||||||
|
hash1, hash2,
|
||||||
|
"Different data should produce different hashes"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fnv1a_hasher_known_values() {
|
||||||
|
// Test against known FNV-1a hash values
|
||||||
|
let mut hasher = Fnv1aHasher::new();
|
||||||
|
hasher.write(b"");
|
||||||
|
assert_eq!(
|
||||||
|
hasher.finish(),
|
||||||
|
0xCBF29CE484222325,
|
||||||
|
"Empty string hash mismatch"
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut hasher = Fnv1aHasher::new();
|
||||||
|
hasher.write(b"a");
|
||||||
|
assert_eq!(
|
||||||
|
hasher.finish(),
|
||||||
|
0xAF63DC4C8601EC8C,
|
||||||
|
"Single byte hash mismatch"
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut hasher = Fnv1aHasher::new();
|
||||||
|
hasher.write(b"hello");
|
||||||
|
assert_eq!(hasher.finish(), 0xA430D84680AABD0B, "Hello hash mismatch");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fnv1a_hash_stored_in_db() {
|
||||||
|
// Verify hash is stored correctly and can be retrieved
|
||||||
|
let db = test_db();
|
||||||
|
let data = b"test content for hashing";
|
||||||
|
|
||||||
|
let id = db
|
||||||
|
.store_entry(
|
||||||
|
std::io::Cursor::new(data.to_vec()),
|
||||||
|
100,
|
||||||
|
1000,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
DEFAULT_MAX_ENTRY_SIZE,
|
||||||
|
)
|
||||||
|
.expect("Failed to store");
|
||||||
|
|
||||||
|
// Retrieve the stored hash
|
||||||
|
let stored_hash: i64 = db
|
||||||
|
.conn
|
||||||
|
.query_row(
|
||||||
|
"SELECT content_hash FROM clipboard WHERE id = ?1",
|
||||||
|
[id],
|
||||||
|
|row| row.get(0),
|
||||||
|
)
|
||||||
|
.expect("Failed to get hash");
|
||||||
|
|
||||||
|
// Calculate hash independently
|
||||||
|
let mut hasher = Fnv1aHasher::new();
|
||||||
|
hasher.write(data);
|
||||||
|
let calculated_hash = hasher.finish() as i64;
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
stored_hash, calculated_hash,
|
||||||
|
"Stored hash should match calculated hash"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify round-trip: convert back to u64 and compare
|
||||||
|
let stored_hash_u64 = stored_hash as u64;
|
||||||
|
let calculated_hash_u64 = hasher.finish();
|
||||||
|
assert_eq!(
|
||||||
|
stored_hash_u64, calculated_hash_u64,
|
||||||
|
"Bit pattern should be preserved in i64/u64 conversion"
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,10 +5,9 @@ use rusqlite::OptionalExtension;
|
||||||
use crate::db::{ClipboardDb, SqliteClipboardDb, StashError};
|
use crate::db::{ClipboardDb, SqliteClipboardDb, StashError};
|
||||||
|
|
||||||
/// Async wrapper for database operations that runs blocking operations
|
/// Async wrapper for database operations that runs blocking operations
|
||||||
/// on a thread pool to avoid blocking the async runtime.
|
/// on a thread pool to avoid blocking the async runtime. Since
|
||||||
///
|
/// [`rusqlite::Connection`] is not Send, we store the database path and open a
|
||||||
/// Since rusqlite::Connection is not Send, we store the database path
|
/// new connection for each operation.
|
||||||
/// and open a new connection for each operation.
|
|
||||||
pub struct AsyncClipboardDb {
|
pub struct AsyncClipboardDb {
|
||||||
db_path: PathBuf,
|
db_path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
@ -139,3 +138,186 @@ impl Clone for AsyncClipboardDb {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn setup_test_db() -> (AsyncClipboardDb, tempfile::TempDir) {
|
||||||
|
let temp_dir = tempdir().expect("Failed to create temp dir");
|
||||||
|
let db_path = temp_dir.path().join("test.db");
|
||||||
|
|
||||||
|
// Create initial database
|
||||||
|
{
|
||||||
|
let conn =
|
||||||
|
rusqlite::Connection::open(&db_path).expect("Failed to open database");
|
||||||
|
crate::db::SqliteClipboardDb::new(conn, db_path.clone())
|
||||||
|
.expect("Failed to create database");
|
||||||
|
}
|
||||||
|
|
||||||
|
let async_db = AsyncClipboardDb::new(db_path);
|
||||||
|
(async_db, temp_dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_async_store_entry() {
|
||||||
|
smol::block_on(async {
|
||||||
|
let (async_db, _temp_dir) = setup_test_db();
|
||||||
|
let data = b"async test data";
|
||||||
|
|
||||||
|
let id = async_db
|
||||||
|
.store_entry(data.to_vec(), 100, 1000, None, None, 5_000_000)
|
||||||
|
.await
|
||||||
|
.expect("Failed to store entry");
|
||||||
|
|
||||||
|
assert!(id > 0, "Should return positive id");
|
||||||
|
|
||||||
|
// Verify it was stored by checking content hash
|
||||||
|
let hash = async_db
|
||||||
|
.get_content_hash(id)
|
||||||
|
.await
|
||||||
|
.expect("Failed to get hash")
|
||||||
|
.expect("Hash should exist");
|
||||||
|
|
||||||
|
// Calculate expected hash
|
||||||
|
let mut hasher = crate::db::Fnv1aHasher::new();
|
||||||
|
hasher.write(data);
|
||||||
|
let expected_hash = hasher.finish() as i64;
|
||||||
|
|
||||||
|
assert_eq!(hash, expected_hash, "Stored hash should match");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_async_set_expiration_and_load() {
|
||||||
|
smol::block_on(async {
|
||||||
|
let (async_db, _temp_dir) = setup_test_db();
|
||||||
|
let data = b"expiring entry";
|
||||||
|
|
||||||
|
let id = async_db
|
||||||
|
.store_entry(data.to_vec(), 100, 1000, None, None, 5_000_000)
|
||||||
|
.await
|
||||||
|
.expect("Failed to store entry");
|
||||||
|
|
||||||
|
let expires_at = 1234567890.5;
|
||||||
|
async_db
|
||||||
|
.set_expiration(id, expires_at)
|
||||||
|
.await
|
||||||
|
.expect("Failed to set expiration");
|
||||||
|
|
||||||
|
// Load all expirations
|
||||||
|
let expirations = async_db
|
||||||
|
.load_all_expirations()
|
||||||
|
.await
|
||||||
|
.expect("Failed to load expirations");
|
||||||
|
|
||||||
|
assert_eq!(expirations.len(), 1, "Should have one expiration");
|
||||||
|
assert!(
|
||||||
|
(expirations[0].0 - expires_at).abs() < 0.001,
|
||||||
|
"Expiration time should match"
|
||||||
|
);
|
||||||
|
assert_eq!(expirations[0].1, id, "Expiration id should match");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_async_mark_expired() {
|
||||||
|
smol::block_on(async {
|
||||||
|
let (async_db, _temp_dir) = setup_test_db();
|
||||||
|
let data = b"entry to expire";
|
||||||
|
|
||||||
|
let id = async_db
|
||||||
|
.store_entry(data.to_vec(), 100, 1000, None, None, 5_000_000)
|
||||||
|
.await
|
||||||
|
.expect("Failed to store entry");
|
||||||
|
|
||||||
|
async_db
|
||||||
|
.mark_expired(id)
|
||||||
|
.await
|
||||||
|
.expect("Failed to mark as expired");
|
||||||
|
|
||||||
|
// Load expirations, this should be empty since entry is now marked
|
||||||
|
// expired
|
||||||
|
let expirations = async_db
|
||||||
|
.load_all_expirations()
|
||||||
|
.await
|
||||||
|
.expect("Failed to load expirations");
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
expirations.is_empty(),
|
||||||
|
"Expired entries should not be loaded"
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_async_get_content_hash_not_found() {
|
||||||
|
smol::block_on(async {
|
||||||
|
let (async_db, _temp_dir) = setup_test_db();
|
||||||
|
|
||||||
|
let hash = async_db
|
||||||
|
.get_content_hash(999999)
|
||||||
|
.await
|
||||||
|
.expect("Should not fail on non-existent entry");
|
||||||
|
|
||||||
|
assert!(hash.is_none(), "Hash should be None for non-existent entry");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_async_clone() {
|
||||||
|
let (async_db, _temp_dir) = setup_test_db();
|
||||||
|
let cloned = async_db.clone();
|
||||||
|
|
||||||
|
smol::block_on(async {
|
||||||
|
// Both should work independently
|
||||||
|
let data = b"clone test";
|
||||||
|
|
||||||
|
let id1 = async_db
|
||||||
|
.store_entry(data.to_vec(), 100, 1000, None, None, 5_000_000)
|
||||||
|
.await
|
||||||
|
.expect("Failed with original");
|
||||||
|
|
||||||
|
let id2 = cloned
|
||||||
|
.store_entry(data.to_vec(), 100, 1000, None, None, 5_000_000)
|
||||||
|
.await
|
||||||
|
.expect("Failed with clone");
|
||||||
|
|
||||||
|
assert_ne!(id1, id2, "Should store as separate entries");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_async_concurrent_operations() {
|
||||||
|
smol::block_on(async {
|
||||||
|
let (async_db, _temp_dir) = setup_test_db();
|
||||||
|
|
||||||
|
// Spawn multiple concurrent store operations
|
||||||
|
let futures: Vec<_> = (0..5)
|
||||||
|
.map(|i| {
|
||||||
|
let db = async_db.clone();
|
||||||
|
let data = format!("concurrent test {}", i).into_bytes();
|
||||||
|
smol::spawn(async move {
|
||||||
|
db.store_entry(data, 100, 1000, None, None, 5_000_000).await
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let results: Result<Vec<_>, _> = futures::future::join_all(futures)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let ids = results.expect("All stores should succeed");
|
||||||
|
assert_eq!(ids.len(), 5, "Should have 5 entries");
|
||||||
|
|
||||||
|
// All IDs should be unique
|
||||||
|
let unique_ids: HashSet<_> = ids.iter().collect();
|
||||||
|
assert_eq!(unique_ids.len(), 5, "All IDs should be unique");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue