pinakes-core: file management improvements; in-datatbase storage cleanup

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ic186f9bf08683a14562bbe43743c04706a6a6964
This commit is contained in:
raf 2026-02-05 10:39:20 +03:00
commit f5371a30bb
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
8 changed files with 698 additions and 4 deletions

View file

@ -152,6 +152,14 @@ fn row_to_media_item(row: &Row) -> rusqlite::Result<MediaItem> {
created_at: parse_datetime(&created_str),
updated_at: parse_datetime(&updated_str),
// Trash support
deleted_at: row
.get::<_, Option<String>>("deleted_at")
.ok()
.flatten()
.and_then(|s| DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc)),
})
}
@ -691,8 +699,11 @@ impl StorageBackend for SqliteBackend {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let count: i64 =
db.query_row("SELECT COUNT(*) FROM media_items", [], |row| row.get(0))?;
let count: i64 = db.query_row(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NULL",
[],
|row| row.get(0),
)?;
Ok(count as u64)
})
.await
@ -799,6 +810,7 @@ impl StorageBackend for SqliteBackend {
"SELECT id, path, file_name, media_type, content_hash, file_size, \
title, artist, album, genre, year, duration_secs, description, \
thumbnail_path, file_mtime, created_at, updated_at FROM media_items \
WHERE deleted_at IS NULL \
ORDER BY {order_by} LIMIT ?1 OFFSET ?2"
);
let mut stmt = db.prepare(&sql)?;
@ -6071,6 +6083,302 @@ impl StorageBackend for SqliteBackend {
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(())
}
// ===== File Management =====
async fn rename_media(&self, id: MediaId, new_name: &str) -> Result<String> {
// Validate the new name
if new_name.is_empty() || new_name.contains('/') || new_name.contains('\\') {
return Err(PinakesError::InvalidOperation(
"Invalid file name: must not be empty or contain path separators".into(),
));
}
let conn = self.conn.clone();
let id_str = id.0.to_string();
let new_name = new_name.to_string();
let (old_path, storage_mode) = tokio::task::spawn_blocking({
let conn = conn.clone();
let id_str = id_str.clone();
move || {
let conn = conn.lock().unwrap();
let row: (String, String) = conn.query_row(
"SELECT path, storage_mode FROM media_items WHERE id = ?1 AND deleted_at IS NULL",
params![id_str],
|row| Ok((row.get(0)?, row.get(1)?)),
)?;
Ok::<_, rusqlite::Error>(row)
}
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
let old_path_buf = std::path::PathBuf::from(&old_path);
let parent = old_path_buf.parent().unwrap_or(std::path::Path::new(""));
let new_path = parent.join(&new_name);
let new_path_str = new_path.to_string_lossy().to_string();
// For external storage, actually rename the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to rename file: {}", e),
))
})?;
}
// Update the database
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"UPDATE media_items SET file_name = ?1, path = ?2, updated_at = ?3 WHERE id = ?4",
params![new_name, new_path_str, now, id_str],
)?;
Ok::<_, rusqlite::Error>(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(old_path)
}
async fn move_media(&self, id: MediaId, new_directory: &std::path::Path) -> Result<String> {
let conn = self.conn.clone();
let id_str = id.0.to_string();
let new_dir = new_directory.to_path_buf();
let (old_path, file_name, storage_mode) = tokio::task::spawn_blocking({
let conn = conn.clone();
let id_str = id_str.clone();
move || {
let conn = conn.lock().unwrap();
let row: (String, String, String) = conn.query_row(
"SELECT path, file_name, storage_mode FROM media_items WHERE id = ?1 AND deleted_at IS NULL",
params![id_str],
|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)),
)?;
Ok::<_, rusqlite::Error>(row)
}
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
let old_path_buf = std::path::PathBuf::from(&old_path);
let new_path = new_dir.join(&file_name);
let new_path_str = new_path.to_string_lossy().to_string();
// Ensure the target directory exists
if !new_dir.exists() {
tokio::fs::create_dir_all(&new_dir).await?;
}
// For external storage, actually move the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to move file: {}", e),
))
})?;
}
// Update the database
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"UPDATE media_items SET path = ?1, updated_at = ?2 WHERE id = ?3",
params![new_path_str, now, id_str],
)?;
Ok::<_, rusqlite::Error>(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(old_path)
}
// ===== Trash / Soft Delete =====
async fn soft_delete_media(&self, id: MediaId) -> Result<()> {
let conn = self.conn.clone();
let id_str = id.0.to_string();
let now = chrono::Utc::now().to_rfc3339();
let rows_affected = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"UPDATE media_items SET deleted_at = ?1, updated_at = ?1 WHERE id = ?2 AND deleted_at IS NULL",
params![now, id_str],
)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {} not found or already deleted",
id
)));
}
Ok(())
}
async fn restore_media(&self, id: MediaId) -> Result<()> {
let conn = self.conn.clone();
let id_str = id.0.to_string();
let now = chrono::Utc::now().to_rfc3339();
let rows_affected = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"UPDATE media_items SET deleted_at = NULL, updated_at = ?1 WHERE id = ?2 AND deleted_at IS NOT NULL",
params![now, id_str],
)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {} not found in trash",
id
)));
}
Ok(())
}
async fn list_trash(&self, pagination: &Pagination) -> Result<Vec<MediaItem>> {
let conn = self.conn.clone();
let offset = pagination.offset;
let limit = pagination.limit;
let items = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, created_at, updated_at, file_mtime,
date_taken, latitude, longitude, camera_make, camera_model, rating,
storage_mode, original_filename, uploaded_at, storage_key,
perceptual_hash, deleted_at
FROM media_items
WHERE deleted_at IS NOT NULL
ORDER BY deleted_at DESC
LIMIT ?1 OFFSET ?2",
)?;
let rows = stmt.query_map(params![limit as i64, offset as i64], row_to_media_item)?;
let mut items = Vec::new();
for row in rows {
items.push(row?);
}
Ok::<_, rusqlite::Error>(items)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(items)
}
async fn empty_trash(&self) -> Result<u64> {
let conn = self.conn.clone();
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
// First, get the IDs to clean up related data
let mut stmt =
conn.prepare("SELECT id FROM media_items WHERE deleted_at IS NOT NULL")?;
let ids: Vec<String> = stmt
.query_map([], |row| row.get(0))?
.filter_map(|r| r.ok())
.collect();
// Delete related data
for id in &ids {
conn.execute("DELETE FROM media_tags WHERE media_id = ?1", params![id])?;
conn.execute(
"DELETE FROM collection_items WHERE media_id = ?1",
params![id],
)?;
conn.execute("DELETE FROM custom_fields WHERE media_id = ?1", params![id])?;
}
// Delete the media items
let count = conn.execute("DELETE FROM media_items WHERE deleted_at IS NOT NULL", [])?;
Ok::<_, rusqlite::Error>(count as u64)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(count)
}
async fn purge_old_trash(&self, before: chrono::DateTime<chrono::Utc>) -> Result<u64> {
let conn = self.conn.clone();
let before_str = before.to_rfc3339();
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
// First, get the IDs to clean up related data
let mut stmt = conn.prepare(
"SELECT id FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < ?1",
)?;
let ids: Vec<String> = stmt
.query_map(params![before_str], |row| row.get(0))?
.filter_map(|r| r.ok())
.collect();
// Delete related data
for id in &ids {
conn.execute("DELETE FROM media_tags WHERE media_id = ?1", params![id])?;
conn.execute(
"DELETE FROM collection_items WHERE media_id = ?1",
params![id],
)?;
conn.execute("DELETE FROM custom_fields WHERE media_id = ?1", params![id])?;
}
// Delete the media items
let count = conn.execute(
"DELETE FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < ?1",
params![before_str],
)?;
Ok::<_, rusqlite::Error>(count as u64)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(count)
}
async fn count_trash(&self) -> Result<u64> {
let conn = self.conn.clone();
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let count: i64 = conn.query_row(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NOT NULL",
[],
|row| row.get(0),
)?;
Ok::<_, rusqlite::Error>(count as u64)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(count)
}
}
// Helper function to parse a share row