pinakes-core: file management improvements; in-datatbase storage cleanup

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ic186f9bf08683a14562bbe43743c04706a6a6964
This commit is contained in:
raf 2026-02-05 10:39:20 +03:00
commit f5371a30bb
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
8 changed files with 698 additions and 4 deletions

View file

@ -203,6 +203,9 @@ pub async fn import_file_with_options(
created_at: now,
updated_at: now,
// New items are not deleted
deleted_at: None,
};
storage.insert_media(&item).await?;

View file

@ -151,6 +151,9 @@ pub struct MediaItem {
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
/// Soft delete timestamp. If set, the item is in the trash.
pub deleted_at: Option<DateTime<Utc>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]

View file

@ -741,6 +741,54 @@ pub trait StorageBackend: Send + Sync + 'static {
/// Mark all notifications as read for a user
async fn mark_all_notifications_read(&self, user_id: UserId) -> Result<()>;
// ===== File Management =====
/// Rename a media item (changes file_name and updates path accordingly).
/// For external storage, this actually renames the file on disk.
/// For managed storage, this only updates the metadata.
/// Returns the old path for sync log recording.
async fn rename_media(&self, id: MediaId, new_name: &str) -> Result<String>;
/// Move a media item to a new directory.
/// For external storage, this actually moves the file on disk.
/// For managed storage, this only updates the path in metadata.
/// Returns the old path for sync log recording.
async fn move_media(&self, id: MediaId, new_directory: &std::path::Path) -> Result<String>;
/// Batch move multiple media items to a new directory.
async fn batch_move_media(
&self,
ids: &[MediaId],
new_directory: &std::path::Path,
) -> Result<Vec<(MediaId, String)>> {
let mut results = Vec::new();
for id in ids {
let old_path = self.move_media(*id, new_directory).await?;
results.push((*id, old_path));
}
Ok(results)
}
// ===== Trash / Soft Delete =====
/// Soft delete a media item (set deleted_at timestamp).
async fn soft_delete_media(&self, id: MediaId) -> Result<()>;
/// Restore a soft-deleted media item.
async fn restore_media(&self, id: MediaId) -> Result<()>;
/// List all soft-deleted media items.
async fn list_trash(&self, pagination: &Pagination) -> Result<Vec<MediaItem>>;
/// Permanently delete all items in trash.
async fn empty_trash(&self) -> Result<u64>;
/// Permanently delete items in trash older than the specified date.
async fn purge_old_trash(&self, before: DateTime<Utc>) -> Result<u64>;
/// Count items in trash.
async fn count_trash(&self) -> Result<u64>;
}
/// Comprehensive library statistics.

View file

@ -197,6 +197,9 @@ fn row_to_media_item(row: &Row) -> Result<MediaItem> {
created_at: row.get("created_at"),
updated_at: row.get("updated_at"),
// Trash support
deleted_at: row.try_get("deleted_at").ok().flatten(),
})
}
@ -674,7 +677,10 @@ impl StorageBackend for PostgresBackend {
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let row = client
.query_one("SELECT COUNT(*) FROM media_items", &[])
.query_one(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NULL",
&[],
)
.await?;
let count: i64 = row.get(0);
Ok(count as u64)
@ -783,8 +789,11 @@ impl StorageBackend for PostgresBackend {
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, file_mtime, date_taken, latitude, longitude,
camera_make, camera_model, rating, perceptual_hash, created_at, updated_at
camera_make, camera_model, rating, perceptual_hash,
storage_mode, original_filename, uploaded_at, storage_key,
created_at, updated_at, deleted_at
FROM media_items
WHERE deleted_at IS NULL
ORDER BY {order_by}
LIMIT $1 OFFSET $2"
);
@ -5727,6 +5736,306 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ===== File Management =====
async fn rename_media(&self, id: MediaId, new_name: &str) -> Result<String> {
// Validate the new name
if new_name.is_empty() || new_name.contains('/') || new_name.contains('\\') {
return Err(PinakesError::InvalidOperation(
"Invalid file name: must not be empty or contain path separators".into(),
));
}
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Get the current path and storage mode
let row = client
.query_one(
"SELECT path, storage_mode FROM media_items WHERE id = $1 AND deleted_at IS NULL",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let old_path: String = row.get(0);
let storage_mode: String = row.get(1);
let old_path_buf = std::path::PathBuf::from(&old_path);
let parent = old_path_buf.parent().unwrap_or(std::path::Path::new(""));
let new_path = parent.join(new_name);
let new_path_str = new_path.to_string_lossy().to_string();
// For external storage, actually rename the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to rename file: {}", e),
))
})?;
}
// Update the database
client
.execute(
"UPDATE media_items SET file_name = $1, path = $2, updated_at = NOW() WHERE id = $3",
&[&new_name, &new_path_str, &id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(old_path)
}
async fn move_media(&self, id: MediaId, new_directory: &std::path::Path) -> Result<String> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// Get the current path, file_name, and storage mode
let row = client
.query_one(
"SELECT path, file_name, storage_mode FROM media_items WHERE id = $1 AND deleted_at IS NULL",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let old_path: String = row.get(0);
let file_name: String = row.get(1);
let storage_mode: String = row.get(2);
let old_path_buf = std::path::PathBuf::from(&old_path);
let new_path = new_directory.join(&file_name);
let new_path_str = new_path.to_string_lossy().to_string();
// Ensure the target directory exists
if !new_directory.exists() {
tokio::fs::create_dir_all(new_directory).await?;
}
// For external storage, actually move the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to move file: {}", e),
))
})?;
}
// Update the database
client
.execute(
"UPDATE media_items SET path = $1, updated_at = NOW() WHERE id = $2",
&[&new_path_str, &id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(old_path)
}
// ===== Trash / Soft Delete =====
async fn soft_delete_media(&self, id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows_affected = client
.execute(
"UPDATE media_items SET deleted_at = NOW(), updated_at = NOW() WHERE id = $1 AND deleted_at IS NULL",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {} not found or already deleted",
id
)));
}
Ok(())
}
async fn restore_media(&self, id: MediaId) -> Result<()> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows_affected = client
.execute(
"UPDATE media_items SET deleted_at = NULL, updated_at = NOW() WHERE id = $1 AND deleted_at IS NOT NULL",
&[&id.0],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {} not found in trash",
id
)));
}
Ok(())
}
async fn list_trash(&self, pagination: &Pagination) -> Result<Vec<MediaItem>> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let rows = client
.query(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, created_at, updated_at, file_mtime,
date_taken, latitude, longitude, camera_make, camera_model, rating,
storage_mode, original_filename, uploaded_at, storage_key,
perceptual_hash, deleted_at
FROM media_items
WHERE deleted_at IS NOT NULL
ORDER BY deleted_at DESC
LIMIT $1 OFFSET $2",
&[&(pagination.limit as i64), &(pagination.offset as i64)],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let mut items = Vec::new();
for row in rows {
items.push(row_to_media_item(&row)?);
}
Ok(items)
}
async fn empty_trash(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// First, get the IDs to clean up related data
let id_rows = client
.query(
"SELECT id FROM media_items WHERE deleted_at IS NOT NULL",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
// Delete related data
for row in &id_rows {
let id: uuid::Uuid = row.get(0);
client
.execute("DELETE FROM media_tags WHERE media_id = $1", &[&id])
.await
.ok();
client
.execute("DELETE FROM collection_items WHERE media_id = $1", &[&id])
.await
.ok();
client
.execute("DELETE FROM custom_fields WHERE media_id = $1", &[&id])
.await
.ok();
}
// Delete the media items
let count = client
.execute("DELETE FROM media_items WHERE deleted_at IS NOT NULL", &[])
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(count)
}
async fn purge_old_trash(&self, before: chrono::DateTime<chrono::Utc>) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
// First, get the IDs to clean up related data
let id_rows = client
.query(
"SELECT id FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < $1",
&[&before],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
// Delete related data
for row in &id_rows {
let id: uuid::Uuid = row.get(0);
client
.execute("DELETE FROM media_tags WHERE media_id = $1", &[&id])
.await
.ok();
client
.execute("DELETE FROM collection_items WHERE media_id = $1", &[&id])
.await
.ok();
client
.execute("DELETE FROM custom_fields WHERE media_id = $1", &[&id])
.await
.ok();
}
// Delete the media items
let count = client
.execute(
"DELETE FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < $1",
&[&before],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
Ok(count)
}
async fn count_trash(&self) -> Result<u64> {
let client = self
.pool
.get()
.await
.map_err(|e| PinakesError::Database(format!("pool error: {e}")))?;
let row = client
.query_one(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NOT NULL",
&[],
)
.await
.map_err(|e| PinakesError::Database(e.to_string()))?;
let count: i64 = row.get(0);
Ok(count as u64)
}
}
impl PostgresBackend {

View file

@ -152,6 +152,14 @@ fn row_to_media_item(row: &Row) -> rusqlite::Result<MediaItem> {
created_at: parse_datetime(&created_str),
updated_at: parse_datetime(&updated_str),
// Trash support
deleted_at: row
.get::<_, Option<String>>("deleted_at")
.ok()
.flatten()
.and_then(|s| DateTime::parse_from_rfc3339(&s).ok())
.map(|dt| dt.with_timezone(&Utc)),
})
}
@ -691,8 +699,11 @@ impl StorageBackend for SqliteBackend {
let db = conn
.lock()
.map_err(|e| PinakesError::Database(e.to_string()))?;
let count: i64 =
db.query_row("SELECT COUNT(*) FROM media_items", [], |row| row.get(0))?;
let count: i64 = db.query_row(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NULL",
[],
|row| row.get(0),
)?;
Ok(count as u64)
})
.await
@ -799,6 +810,7 @@ impl StorageBackend for SqliteBackend {
"SELECT id, path, file_name, media_type, content_hash, file_size, \
title, artist, album, genre, year, duration_secs, description, \
thumbnail_path, file_mtime, created_at, updated_at FROM media_items \
WHERE deleted_at IS NULL \
ORDER BY {order_by} LIMIT ?1 OFFSET ?2"
);
let mut stmt = db.prepare(&sql)?;
@ -6071,6 +6083,302 @@ impl StorageBackend for SqliteBackend {
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(())
}
// ===== File Management =====
async fn rename_media(&self, id: MediaId, new_name: &str) -> Result<String> {
// Validate the new name
if new_name.is_empty() || new_name.contains('/') || new_name.contains('\\') {
return Err(PinakesError::InvalidOperation(
"Invalid file name: must not be empty or contain path separators".into(),
));
}
let conn = self.conn.clone();
let id_str = id.0.to_string();
let new_name = new_name.to_string();
let (old_path, storage_mode) = tokio::task::spawn_blocking({
let conn = conn.clone();
let id_str = id_str.clone();
move || {
let conn = conn.lock().unwrap();
let row: (String, String) = conn.query_row(
"SELECT path, storage_mode FROM media_items WHERE id = ?1 AND deleted_at IS NULL",
params![id_str],
|row| Ok((row.get(0)?, row.get(1)?)),
)?;
Ok::<_, rusqlite::Error>(row)
}
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
let old_path_buf = std::path::PathBuf::from(&old_path);
let parent = old_path_buf.parent().unwrap_or(std::path::Path::new(""));
let new_path = parent.join(&new_name);
let new_path_str = new_path.to_string_lossy().to_string();
// For external storage, actually rename the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to rename file: {}", e),
))
})?;
}
// Update the database
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"UPDATE media_items SET file_name = ?1, path = ?2, updated_at = ?3 WHERE id = ?4",
params![new_name, new_path_str, now, id_str],
)?;
Ok::<_, rusqlite::Error>(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(old_path)
}
async fn move_media(&self, id: MediaId, new_directory: &std::path::Path) -> Result<String> {
let conn = self.conn.clone();
let id_str = id.0.to_string();
let new_dir = new_directory.to_path_buf();
let (old_path, file_name, storage_mode) = tokio::task::spawn_blocking({
let conn = conn.clone();
let id_str = id_str.clone();
move || {
let conn = conn.lock().unwrap();
let row: (String, String, String) = conn.query_row(
"SELECT path, file_name, storage_mode FROM media_items WHERE id = ?1 AND deleted_at IS NULL",
params![id_str],
|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)),
)?;
Ok::<_, rusqlite::Error>(row)
}
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
let old_path_buf = std::path::PathBuf::from(&old_path);
let new_path = new_dir.join(&file_name);
let new_path_str = new_path.to_string_lossy().to_string();
// Ensure the target directory exists
if !new_dir.exists() {
tokio::fs::create_dir_all(&new_dir).await?;
}
// For external storage, actually move the file on disk
if storage_mode == "external" && old_path_buf.exists() {
tokio::fs::rename(&old_path_buf, &new_path)
.await
.map_err(|e| {
PinakesError::Io(std::io::Error::new(
e.kind(),
format!("Failed to move file: {}", e),
))
})?;
}
// Update the database
let now = chrono::Utc::now().to_rfc3339();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"UPDATE media_items SET path = ?1, updated_at = ?2 WHERE id = ?3",
params![new_path_str, now, id_str],
)?;
Ok::<_, rusqlite::Error>(())
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(old_path)
}
// ===== Trash / Soft Delete =====
async fn soft_delete_media(&self, id: MediaId) -> Result<()> {
let conn = self.conn.clone();
let id_str = id.0.to_string();
let now = chrono::Utc::now().to_rfc3339();
let rows_affected = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"UPDATE media_items SET deleted_at = ?1, updated_at = ?1 WHERE id = ?2 AND deleted_at IS NULL",
params![now, id_str],
)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {} not found or already deleted",
id
)));
}
Ok(())
}
async fn restore_media(&self, id: MediaId) -> Result<()> {
let conn = self.conn.clone();
let id_str = id.0.to_string();
let now = chrono::Utc::now().to_rfc3339();
let rows_affected = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
conn.execute(
"UPDATE media_items SET deleted_at = NULL, updated_at = ?1 WHERE id = ?2 AND deleted_at IS NOT NULL",
params![now, id_str],
)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
if rows_affected == 0 {
return Err(PinakesError::NotFound(format!(
"Media item {} not found in trash",
id
)));
}
Ok(())
}
async fn list_trash(&self, pagination: &Pagination) -> Result<Vec<MediaItem>> {
let conn = self.conn.clone();
let offset = pagination.offset;
let limit = pagination.limit;
let items = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT id, path, file_name, media_type, content_hash, file_size,
title, artist, album, genre, year, duration_secs, description,
thumbnail_path, created_at, updated_at, file_mtime,
date_taken, latitude, longitude, camera_make, camera_model, rating,
storage_mode, original_filename, uploaded_at, storage_key,
perceptual_hash, deleted_at
FROM media_items
WHERE deleted_at IS NOT NULL
ORDER BY deleted_at DESC
LIMIT ?1 OFFSET ?2",
)?;
let rows = stmt.query_map(params![limit as i64, offset as i64], row_to_media_item)?;
let mut items = Vec::new();
for row in rows {
items.push(row?);
}
Ok::<_, rusqlite::Error>(items)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(items)
}
async fn empty_trash(&self) -> Result<u64> {
let conn = self.conn.clone();
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
// First, get the IDs to clean up related data
let mut stmt =
conn.prepare("SELECT id FROM media_items WHERE deleted_at IS NOT NULL")?;
let ids: Vec<String> = stmt
.query_map([], |row| row.get(0))?
.filter_map(|r| r.ok())
.collect();
// Delete related data
for id in &ids {
conn.execute("DELETE FROM media_tags WHERE media_id = ?1", params![id])?;
conn.execute(
"DELETE FROM collection_items WHERE media_id = ?1",
params![id],
)?;
conn.execute("DELETE FROM custom_fields WHERE media_id = ?1", params![id])?;
}
// Delete the media items
let count = conn.execute("DELETE FROM media_items WHERE deleted_at IS NOT NULL", [])?;
Ok::<_, rusqlite::Error>(count as u64)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(count)
}
async fn purge_old_trash(&self, before: chrono::DateTime<chrono::Utc>) -> Result<u64> {
let conn = self.conn.clone();
let before_str = before.to_rfc3339();
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
// First, get the IDs to clean up related data
let mut stmt = conn.prepare(
"SELECT id FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < ?1",
)?;
let ids: Vec<String> = stmt
.query_map(params![before_str], |row| row.get(0))?
.filter_map(|r| r.ok())
.collect();
// Delete related data
for id in &ids {
conn.execute("DELETE FROM media_tags WHERE media_id = ?1", params![id])?;
conn.execute(
"DELETE FROM collection_items WHERE media_id = ?1",
params![id],
)?;
conn.execute("DELETE FROM custom_fields WHERE media_id = ?1", params![id])?;
}
// Delete the media items
let count = conn.execute(
"DELETE FROM media_items WHERE deleted_at IS NOT NULL AND deleted_at < ?1",
params![before_str],
)?;
Ok::<_, rusqlite::Error>(count as u64)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(count)
}
async fn count_trash(&self) -> Result<u64> {
let conn = self.conn.clone();
let count = tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let count: i64 = conn.query_row(
"SELECT COUNT(*) FROM media_items WHERE deleted_at IS NOT NULL",
[],
|row| row.get(0),
)?;
Ok::<_, rusqlite::Error>(count as u64)
})
.await
.map_err(|e| PinakesError::Database(e.to_string()))??;
Ok(count)
}
}
// Helper function to parse a share row

View file

@ -97,6 +97,7 @@ pub async fn process_upload<R: AsyncRead + Unpin>(
storage_key: Some(content_hash.0.clone()),
created_at: now,
updated_at: now,
deleted_at: None,
};
// Store the media item

View file

@ -0,0 +1,11 @@
-- V18: File Management (Rename, Move, Trash)
-- Adds soft delete support for trash/recycle bin functionality
-- Add deleted_at column for soft delete (trash)
ALTER TABLE media_items ADD COLUMN deleted_at TIMESTAMPTZ;
-- Index for efficient trash queries
CREATE INDEX idx_media_deleted_at ON media_items(deleted_at);
-- Partial index for listing non-deleted items (most common query pattern)
CREATE INDEX idx_media_not_deleted ON media_items(id) WHERE deleted_at IS NULL;

View file

@ -0,0 +1,11 @@
-- V18: File Management (Rename, Move, Trash)
-- Adds soft delete support for trash/recycle bin functionality
-- Add deleted_at column for soft delete (trash)
ALTER TABLE media_items ADD COLUMN deleted_at TEXT;
-- Index for efficient trash queries
CREATE INDEX idx_media_deleted_at ON media_items(deleted_at);
-- Index for listing non-deleted items (most common query pattern)
CREATE INDEX idx_media_not_deleted ON media_items(id) WHERE deleted_at IS NULL;