pinakes-core: file management improvements; in-datatbase storage cleanup
Signed-off-by: NotAShelf <raf@notashelf.dev> Change-Id: Ic186f9bf08683a14562bbe43743c04706a6a6964
This commit is contained in:
parent
f34c78b238
commit
f5371a30bb
8 changed files with 698 additions and 4 deletions
|
|
@ -197,6 +197,9 @@ fn row_to_media_item(row: &Row) -> Result<MediaItem> {
|
|||
|
||||
created_at: row.get("created_at"),
|
||||
updated_at: row.get("updated_at"),
|
||||
|
||||
// Trash support
|
||||
deleted_at: row.try_get("deleted_at").ok().flatten(),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -674,7 +677,10 @@ impl StorageBackend for PostgresBackend {
|
|||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
let row = client
|
||||
.query_one("SELECT COUNT(*) FROM media_items", &[])
|
||||
.query_one(
|
||||
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NULL",
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
let count: i64 = row.get(0);
|
||||
Ok(count as u64)
|
||||
|
|
@ -783,8 +789,11 @@ impl StorageBackend for PostgresBackend {
|
|||
"SELECT id, path, file_name, media_type, content_hash, file_size,
|
||||
title, artist, album, genre, year, duration_secs, description,
|
||||
thumbnail_path, file_mtime, date_taken, latitude, longitude,
|
||||
camera_make, camera_model, rating, perceptual_hash, created_at, updated_at
|
||||
camera_make, camera_model, rating, perceptual_hash,
|
||||
storage_mode, original_filename, uploaded_at, storage_key,
|
||||
created_at, updated_at, deleted_at
|
||||
FROM media_items
|
||||
WHERE deleted_at IS NULL
|
||||
ORDER BY {order_by}
|
||||
LIMIT $1 OFFSET $2"
|
||||
);
|
||||
|
|
@ -5727,6 +5736,306 @@ impl StorageBackend for PostgresBackend {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ===== File Management =====
|
||||
|
||||
async fn rename_media(&self, id: MediaId, new_name: &str) -> Result<String> {
|
||||
// Validate the new name
|
||||
if new_name.is_empty() || new_name.contains('/') || new_name.contains('\\') {
|
||||
return Err(PinakesError::InvalidOperation(
|
||||
"Invalid file name: must not be empty or contain path separators".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
// Get the current path and storage mode
|
||||
let row = client
|
||||
.query_one(
|
||||
"SELECT path, storage_mode FROM media_items WHERE id = $1 AND deleted_at IS NULL",
|
||||
&[&id.0],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
let old_path: String = row.get(0);
|
||||
let storage_mode: String = row.get(1);
|
||||
|
||||
let old_path_buf = std::path::PathBuf::from(&old_path);
|
||||
let parent = old_path_buf.parent().unwrap_or(std::path::Path::new(""));
|
||||
let new_path = parent.join(new_name);
|
||||
let new_path_str = new_path.to_string_lossy().to_string();
|
||||
|
||||
// For external storage, actually rename the file on disk
|
||||
if storage_mode == "external" && old_path_buf.exists() {
|
||||
tokio::fs::rename(&old_path_buf, &new_path)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
PinakesError::Io(std::io::Error::new(
|
||||
e.kind(),
|
||||
format!("Failed to rename file: {}", e),
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
// Update the database
|
||||
client
|
||||
.execute(
|
||||
"UPDATE media_items SET file_name = $1, path = $2, updated_at = NOW() WHERE id = $3",
|
||||
&[&new_name, &new_path_str, &id.0],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
Ok(old_path)
|
||||
}
|
||||
|
||||
async fn move_media(&self, id: MediaId, new_directory: &std::path::Path) -> Result<String> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
// Get the current path, file_name, and storage mode
|
||||
let row = client
|
||||
.query_one(
|
||||
"SELECT path, file_name, storage_mode FROM media_items WHERE id = $1 AND deleted_at IS NULL",
|
||||
&[&id.0],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
let old_path: String = row.get(0);
|
||||
let file_name: String = row.get(1);
|
||||
let storage_mode: String = row.get(2);
|
||||
|
||||
let old_path_buf = std::path::PathBuf::from(&old_path);
|
||||
let new_path = new_directory.join(&file_name);
|
||||
let new_path_str = new_path.to_string_lossy().to_string();
|
||||
|
||||
// Ensure the target directory exists
|
||||
if !new_directory.exists() {
|
||||
tokio::fs::create_dir_all(new_directory).await?;
|
||||
}
|
||||
|
||||
// For external storage, actually move the file on disk
|
||||
if storage_mode == "external" && old_path_buf.exists() {
|
||||
tokio::fs::rename(&old_path_buf, &new_path)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
PinakesError::Io(std::io::Error::new(
|
||||
e.kind(),
|
||||
format!("Failed to move file: {}", e),
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
// Update the database
|
||||
client
|
||||
.execute(
|
||||
"UPDATE media_items SET path = $1, updated_at = NOW() WHERE id = $2",
|
||||
&[&new_path_str, &id.0],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
Ok(old_path)
|
||||
}
|
||||
|
||||
// ===== Trash / Soft Delete =====
|
||||
|
||||
async fn soft_delete_media(&self, id: MediaId) -> Result<()> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let rows_affected = client
|
||||
.execute(
|
||||
"UPDATE media_items SET deleted_at = NOW(), updated_at = NOW() WHERE id = $1 AND deleted_at IS NULL",
|
||||
&[&id.0],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
if rows_affected == 0 {
|
||||
return Err(PinakesError::NotFound(format!(
|
||||
"Media item {} not found or already deleted",
|
||||
id
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn restore_media(&self, id: MediaId) -> Result<()> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let rows_affected = client
|
||||
.execute(
|
||||
"UPDATE media_items SET deleted_at = NULL, updated_at = NOW() WHERE id = $1 AND deleted_at IS NOT NULL",
|
||||
&[&id.0],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
if rows_affected == 0 {
|
||||
return Err(PinakesError::NotFound(format!(
|
||||
"Media item {} not found in trash",
|
||||
id
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_trash(&self, pagination: &Pagination) -> Result<Vec<MediaItem>> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let rows = client
|
||||
.query(
|
||||
"SELECT id, path, file_name, media_type, content_hash, file_size,
|
||||
title, artist, album, genre, year, duration_secs, description,
|
||||
thumbnail_path, created_at, updated_at, file_mtime,
|
||||
date_taken, latitude, longitude, camera_make, camera_model, rating,
|
||||
storage_mode, original_filename, uploaded_at, storage_key,
|
||||
perceptual_hash, deleted_at
|
||||
FROM media_items
|
||||
WHERE deleted_at IS NOT NULL
|
||||
ORDER BY deleted_at DESC
|
||||
LIMIT $1 OFFSET $2",
|
||||
&[&(pagination.limit as i64), &(pagination.offset as i64)],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
let mut items = Vec::new();
|
||||
for row in rows {
|
||||
items.push(row_to_media_item(&row)?);
|
||||
}
|
||||
|
||||
Ok(items)
|
||||
}
|
||||
|
||||
async fn empty_trash(&self) -> Result<u64> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
// First, get the IDs to clean up related data
|
||||
let id_rows = client
|
||||
.query(
|
||||
"SELECT id FROM media_items WHERE deleted_at IS NOT NULL",
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
// Delete related data
|
||||
for row in &id_rows {
|
||||
let id: uuid::Uuid = row.get(0);
|
||||
client
|
||||
.execute("DELETE FROM media_tags WHERE media_id = $1", &[&id])
|
||||
.await
|
||||
.ok();
|
||||
client
|
||||
.execute("DELETE FROM collection_items WHERE media_id = $1", &[&id])
|
||||
.await
|
||||
.ok();
|
||||
client
|
||||
.execute("DELETE FROM custom_fields WHERE media_id = $1", &[&id])
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
|
||||
// Delete the media items
|
||||
let count = client
|
||||
.execute("DELETE FROM media_items WHERE deleted_at IS NOT NULL", &[])
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
async fn purge_old_trash(&self, before: chrono::DateTime<chrono::Utc>) -> Result<u64> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
// First, get the IDs to clean up related data
|
||||
let id_rows = client
|
||||
.query(
|
||||
"SELECT id FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < $1",
|
||||
&[&before],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
// Delete related data
|
||||
for row in &id_rows {
|
||||
let id: uuid::Uuid = row.get(0);
|
||||
client
|
||||
.execute("DELETE FROM media_tags WHERE media_id = $1", &[&id])
|
||||
.await
|
||||
.ok();
|
||||
client
|
||||
.execute("DELETE FROM collection_items WHERE media_id = $1", &[&id])
|
||||
.await
|
||||
.ok();
|
||||
client
|
||||
.execute("DELETE FROM custom_fields WHERE media_id = $1", &[&id])
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
|
||||
// Delete the media items
|
||||
let count = client
|
||||
.execute(
|
||||
"DELETE FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < $1",
|
||||
&[&before],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
async fn count_trash(&self) -> Result<u64> {
|
||||
let client = self
|
||||
.pool
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
|
||||
|
||||
let row = client
|
||||
.query_one(
|
||||
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NOT NULL",
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.map_err(|e| PinakesError::Database(e.to_string()))?;
|
||||
|
||||
let count: i64 = row.get(0);
|
||||
Ok(count as u64)
|
||||
}
|
||||
}
|
||||
|
||||
impl PostgresBackend {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue