From 4e91cb6679ac023af3efaa1c2c0e578085e35d11 Mon Sep 17 00:00:00 2001 From: NotAShelf Date: Sun, 8 Mar 2026 00:42:10 +0300 Subject: [PATCH] pinakes-core: add backup, session refresh, share permissions restructure, and fix integrity Signed-off-by: NotAShelf Change-Id: I17da1cf8403bd11d2a6ea31138f97e776a6a6964 --- crates/pinakes-core/src/sharing.rs | 150 +- crates/pinakes-core/src/storage/mod.rs | 295 +- crates/pinakes-core/src/storage/postgres.rs | 983 +++-- crates/pinakes-core/src/storage/sqlite.rs | 3962 ++++++++++--------- 4 files changed, 3093 insertions(+), 2297 deletions(-) diff --git a/crates/pinakes-core/src/sharing.rs b/crates/pinakes-core/src/sharing.rs index 92c8cc9..62856bc 100644 --- a/crates/pinakes-core/src/sharing.rs +++ b/crates/pinakes-core/src/sharing.rs @@ -20,6 +20,7 @@ pub struct ShareId(pub Uuid); impl ShareId { /// Creates a new share ID. + #[must_use] pub fn new() -> Self { Self(Uuid::now_v7()) } @@ -49,7 +50,8 @@ pub enum ShareTarget { impl ShareTarget { /// Returns the type of target being shared. - pub fn target_type(&self) -> &'static str { + #[must_use] + pub const fn target_type(&self) -> &'static str { match self { Self::Media { .. } => "media", Self::Collection { .. } => "collection", @@ -59,7 +61,8 @@ impl ShareTarget { } /// Returns the ID of the target being shared. - pub fn target_id(&self) -> Uuid { + #[must_use] + pub const fn target_id(&self) -> Uuid { match self { Self::Media { media_id } => media_id.0, Self::Collection { collection_id } => *collection_id, @@ -91,7 +94,8 @@ pub enum ShareRecipient { impl ShareRecipient { /// Returns the type of recipient. - pub fn recipient_type(&self) -> &'static str { + #[must_use] + pub const fn recipient_type(&self) -> &'static str { match self { Self::PublicLink { .. } => "public_link", Self::User { .. } => "user", @@ -101,75 +105,117 @@ impl ShareRecipient { } } +/// Read-access permissions granted by a share. +#[derive( + Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize, +)] +pub struct ShareViewPermissions { + /// Can view the content + pub can_view: bool, + /// Can download the content + pub can_download: bool, + /// Can reshare with others + pub can_reshare: bool, +} + +/// Write-access permissions granted by a share. +#[derive( + Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize, +)] +pub struct ShareMutatePermissions { + /// Can edit the content/metadata + pub can_edit: bool, + /// Can delete the content + pub can_delete: bool, + /// Can add new items (for collections) + pub can_add: bool, +} + /// Permissions granted by a share. #[derive( Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize, )] pub struct SharePermissions { - /// Can view the content - pub can_view: bool, - /// Can download the content - pub can_download: bool, - /// Can edit the content/metadata - pub can_edit: bool, - /// Can delete the content - pub can_delete: bool, - /// Can reshare with others - pub can_reshare: bool, - /// Can add new items (for collections) - pub can_add: bool, + #[serde(flatten)] + pub view: ShareViewPermissions, + #[serde(flatten)] + pub mutate: ShareMutatePermissions, } impl SharePermissions { /// Creates a new share with view-only permissions. + #[must_use] pub fn view_only() -> Self { Self { - can_view: true, - ..Default::default() + view: ShareViewPermissions { + can_view: true, + ..Default::default() + }, + mutate: ShareMutatePermissions::default(), } } /// Creates a new share with download permissions. + #[must_use] pub fn download() -> Self { Self { - can_view: true, - can_download: true, - ..Default::default() + view: ShareViewPermissions { + can_view: true, + can_download: true, + ..Default::default() + }, + mutate: ShareMutatePermissions::default(), } } /// Creates a new share with edit permissions. + #[must_use] pub fn edit() -> Self { Self { - can_view: true, - can_download: true, - can_edit: true, - can_add: true, - ..Default::default() + view: ShareViewPermissions { + can_view: true, + can_download: true, + ..Default::default() + }, + mutate: ShareMutatePermissions { + can_edit: true, + can_add: true, + ..Default::default() + }, } } /// Creates a new share with full permissions. - pub fn full() -> Self { + #[must_use] + pub const fn full() -> Self { Self { - can_view: true, - can_download: true, - can_edit: true, - can_delete: true, - can_reshare: true, - can_add: true, + view: ShareViewPermissions { + can_view: true, + can_download: true, + can_reshare: true, + }, + mutate: ShareMutatePermissions { + can_edit: true, + can_delete: true, + can_add: true, + }, } } /// Merges two permission sets, taking the most permissive values. - pub fn merge(&self, other: &Self) -> Self { + #[must_use] + pub const fn merge(&self, other: &Self) -> Self { Self { - can_view: self.can_view || other.can_view, - can_download: self.can_download || other.can_download, - can_edit: self.can_edit || other.can_edit, - can_delete: self.can_delete || other.can_delete, - can_reshare: self.can_reshare || other.can_reshare, - can_add: self.can_add || other.can_add, + view: ShareViewPermissions { + can_view: self.view.can_view || other.view.can_view, + can_download: self.view.can_download || other.view.can_download, + can_reshare: self.view.can_reshare || other.view.can_reshare, + }, + mutate: ShareMutatePermissions { + can_edit: self.mutate.can_edit || other.mutate.can_edit, + can_delete: self.mutate.can_delete || other.mutate.can_delete, + can_add: self.mutate.can_add || other.mutate.can_add, + }, } } } @@ -196,6 +242,7 @@ pub struct Share { impl Share { /// Create a new public link share. + #[must_use] pub fn new_public_link( owner_id: UserId, target: ShareTarget, @@ -224,6 +271,7 @@ impl Share { } /// Create a new user share. + #[must_use] pub fn new_user_share( owner_id: UserId, target: ShareTarget, @@ -251,16 +299,19 @@ impl Share { } /// Checks if the share has expired. + #[must_use] pub fn is_expired(&self) -> bool { - self.expires_at.map(|exp| exp < Utc::now()).unwrap_or(false) + self.expires_at.is_some_and(|exp| exp < Utc::now()) } /// Checks if this is a public link share. - pub fn is_public(&self) -> bool { + #[must_use] + pub const fn is_public(&self) -> bool { matches!(self.recipient, ShareRecipient::PublicLink { .. }) } /// Returns the public token if this is a public link share. + #[must_use] pub fn public_token(&self) -> Option<&str> { match &self.recipient { ShareRecipient::PublicLink { token, .. } => Some(token), @@ -308,7 +359,7 @@ impl std::str::FromStr for ShareActivityAction { "revoked" => Ok(Self::Revoked), "expired" => Ok(Self::Expired), "password_failed" => Ok(Self::PasswordFailed), - _ => Err(format!("unknown share activity action: {}", s)), + _ => Err(format!("unknown share activity action: {s}")), } } } @@ -327,6 +378,7 @@ pub struct ShareActivity { impl ShareActivity { /// Creates a new share activity entry. + #[must_use] pub fn new(share_id: ShareId, action: ShareActivityAction) -> Self { Self { id: Uuid::now_v7(), @@ -340,18 +392,21 @@ impl ShareActivity { } /// Sets the actor who performed the activity. - pub fn with_actor(mut self, actor_id: UserId) -> Self { + #[must_use] + pub const fn with_actor(mut self, actor_id: UserId) -> Self { self.actor_id = Some(actor_id); self } /// Sets the IP address of the actor. + #[must_use] pub fn with_ip(mut self, ip: &str) -> Self { self.actor_ip = Some(ip.to_string()); self } /// Sets additional details about the activity. + #[must_use] pub fn with_details(mut self, details: &str) -> Self { self.details = Some(details.to_string()); self @@ -391,7 +446,7 @@ impl std::str::FromStr for ShareNotificationType { "share_revoked" => Ok(Self::ShareRevoked), "share_expiring" => Ok(Self::ShareExpiring), "share_accessed" => Ok(Self::ShareAccessed), - _ => Err(format!("unknown share notification type: {}", s)), + _ => Err(format!("unknown share notification type: {s}")), } } } @@ -409,6 +464,7 @@ pub struct ShareNotification { impl ShareNotification { /// Creates a new share notification. + #[must_use] pub fn new( user_id: UserId, share_id: ShareId, @@ -426,17 +482,23 @@ impl ShareNotification { } /// Generates a random share token. +#[must_use] pub fn generate_share_token() -> String { // Use UUIDv4 for random tokens - simple string representation Uuid::new_v4().simple().to_string() } /// Hashes a share password using Argon2id. +/// +/// # Errors +/// +/// Returns an error if hashing fails. pub fn hash_share_password(password: &str) -> Result { crate::users::auth::hash_password(password) } /// Verifies a share password against an Argon2id hash. +#[must_use] pub fn verify_share_password(password: &str, hash: &str) -> bool { crate::users::auth::verify_password(password, hash).unwrap_or(false) } diff --git a/crates/pinakes-core/src/storage/mod.rs b/crates/pinakes-core/src/storage/mod.rs index d965b0d..6557ee1 100644 --- a/crates/pinakes-core/src/storage/mod.rs +++ b/crates/pinakes-core/src/storage/mod.rs @@ -11,7 +11,19 @@ use crate::{ analytics::UsageEvent, enrichment::ExternalMetadata, error::Result, - model::*, + model::{ + AuditEntry, + Collection, + CollectionKind, + ContentHash, + CustomField, + ManagedBlob, + ManagedStorageStats, + MediaId, + MediaItem, + Pagination, + Tag, + }, playlists::Playlist, search::{SearchRequest, SearchResults}, social::{Comment, Rating, ShareLink}, @@ -46,47 +58,99 @@ pub struct SessionData { #[async_trait::async_trait] pub trait StorageBackend: Send + Sync + 'static { // Migrations + + /// Apply all pending database migrations. + /// Called on server startup to ensure the schema is up to date. async fn run_migrations(&self) -> Result<()>; // Root directories + + /// Register a root directory for media scanning. async fn add_root_dir(&self, path: PathBuf) -> Result<()>; + + /// List all registered root directories. async fn list_root_dirs(&self) -> Result>; + + /// Remove a root directory registration. + /// Does not delete any media items found under this directory. async fn remove_root_dir(&self, path: &std::path::Path) -> Result<()>; // Media CRUD + + /// Insert a new media item into the database. + /// Returns `Database` error if an item with the same ID already exists. async fn insert_media(&self, item: &MediaItem) -> Result<()>; + + /// Retrieve a media item by its ID. + /// Returns `NotFound` if no item exists with the given ID. async fn get_media(&self, id: MediaId) -> Result; + + /// Return the total number of media items in the database. async fn count_media(&self) -> Result; + + /// Look up a media item by its content hash. + /// Returns `None` if no item matches the hash. async fn get_media_by_hash( &self, hash: &ContentHash, ) -> Result>; - /// Get a media item by its file path (used for incremental scanning) + + /// Get a media item by its file path (used for incremental scanning). + /// Returns `None` if no item exists at the given path. async fn get_media_by_path( &self, path: &std::path::Path, ) -> Result>; + + /// List media items with pagination (offset and limit). async fn list_media(&self, pagination: &Pagination) -> Result>; + + /// Update an existing media item's metadata. + /// Returns `NotFound` if the item does not exist. async fn update_media(&self, item: &MediaItem) -> Result<()>; + + /// Permanently delete a media item by ID. + /// Returns `NotFound` if the item does not exist. async fn delete_media(&self, id: MediaId) -> Result<()>; + + /// Delete all media items from the database. Returns the number deleted. async fn delete_all_media(&self) -> Result; // Tags + + /// Create a new tag with an optional parent for hierarchical tagging. async fn create_tag( &self, name: &str, parent_id: Option, ) -> Result; + + /// Retrieve a tag by its ID. Returns `NotFound` if it does not exist. async fn get_tag(&self, id: Uuid) -> Result; + + /// List all tags in the database. async fn list_tags(&self) -> Result>; + + /// Delete a tag by ID. Also removes all media-tag associations. async fn delete_tag(&self, id: Uuid) -> Result<()>; + + /// Associate a tag with a media item. No-op if already tagged. async fn tag_media(&self, media_id: MediaId, tag_id: Uuid) -> Result<()>; + + /// Remove a tag association from a media item. async fn untag_media(&self, media_id: MediaId, tag_id: Uuid) -> Result<()>; + + /// Get all tags associated with a media item. async fn get_media_tags(&self, media_id: MediaId) -> Result>; + + /// Get all descendant tags of a parent tag (recursive). async fn get_tag_descendants(&self, tag_id: Uuid) -> Result>; // Collections + + /// Create a new collection. Smart collections use `filter_query` for + /// automatic membership; manual collections use explicit add/remove. async fn create_collection( &self, name: &str, @@ -94,30 +158,49 @@ pub trait StorageBackend: Send + Sync + 'static { description: Option<&str>, filter_query: Option<&str>, ) -> Result; + + /// Retrieve a collection by ID. Returns `NotFound` if it does not exist. async fn get_collection(&self, id: Uuid) -> Result; + + /// List all collections. async fn list_collections(&self) -> Result>; + + /// Delete a collection and all its membership entries. async fn delete_collection(&self, id: Uuid) -> Result<()>; + + /// Add a media item to a manual collection at the given position. async fn add_to_collection( &self, collection_id: Uuid, media_id: MediaId, position: i32, ) -> Result<()>; + + /// Remove a media item from a collection. async fn remove_from_collection( &self, collection_id: Uuid, media_id: MediaId, ) -> Result<()>; + + /// Get all media items in a collection, ordered by position. async fn get_collection_members( &self, collection_id: Uuid, ) -> Result>; // Search + + /// Execute a full-text search with filters, sorting, and pagination. + /// Uses FTS5 on `SQLite` and tsvector/trigram on `PostgreSQL`. async fn search(&self, request: &SearchRequest) -> Result; // Audit + + /// Record an audit log entry for an action on a media item. async fn record_audit(&self, entry: &AuditEntry) -> Result<()>; + + /// List audit entries, optionally filtered to a specific media item. async fn list_audit_entries( &self, media_id: Option, @@ -125,16 +208,22 @@ pub trait StorageBackend: Send + Sync + 'static { ) -> Result>; // Custom fields + + /// Set a custom field on a media item (upserts if the field name exists). async fn set_custom_field( &self, media_id: MediaId, name: &str, field: &CustomField, ) -> Result<()>; + + /// Get all custom fields for a media item, keyed by field name. async fn get_custom_fields( &self, media_id: MediaId, ) -> Result>; + + /// Delete a custom field from a media item by name. async fn delete_custom_field( &self, media_id: MediaId, @@ -142,8 +231,13 @@ pub trait StorageBackend: Send + Sync + 'static { ) -> Result<()>; // Batch operations (transactional where supported) + + /// Delete multiple media items in a single transaction. + /// Returns the number of items actually deleted. async fn batch_delete_media(&self, ids: &[MediaId]) -> Result; + /// Apply multiple tags to multiple media items. + /// Returns the number of new associations created. async fn batch_tag_media( &self, media_ids: &[MediaId], @@ -151,11 +245,15 @@ pub trait StorageBackend: Send + Sync + 'static { ) -> Result; // Integrity + + /// List all media item IDs, paths, and content hashes. + /// Used by integrity checks to verify files still exist on disk. async fn list_media_paths( &self, ) -> Result>; - // Batch metadata update (must be implemented per backend for bulk SQL) + /// Update metadata fields on multiple media items at once. + /// Only non-`None` fields are applied. Returns the number updated. #[allow(clippy::too_many_arguments)] async fn batch_update_media( &self, @@ -169,6 +267,8 @@ pub trait StorageBackend: Send + Sync + 'static { ) -> Result; // Saved searches + + /// Persist a search query so it can be re-executed later. async fn save_search( &self, id: uuid::Uuid, @@ -176,20 +276,41 @@ pub trait StorageBackend: Send + Sync + 'static { query: &str, sort_order: Option<&str>, ) -> Result<()>; + + /// List all saved searches. async fn list_saved_searches(&self) -> Result>; + + /// Get a single saved search by ID. + async fn get_saved_search( + &self, + id: uuid::Uuid, + ) -> Result; + + /// Delete a saved search by ID. async fn delete_saved_search(&self, id: uuid::Uuid) -> Result<()>; // Duplicates + + /// Find groups of media items with identical content hashes. async fn find_duplicates(&self) -> Result>>; + + /// Find groups of visually similar media using perceptual hashing. + /// `threshold` is the maximum Hamming distance to consider a match. async fn find_perceptual_duplicates( &self, threshold: u32, ) -> Result>>; // Database management + + /// Collect aggregate database statistics (counts, size, backend name). async fn database_stats(&self) -> Result; + + /// Reclaim unused disk space (VACUUM for `SQLite`, VACUUM FULL for Postgres). async fn vacuum(&self) -> Result<()>; + + /// Delete all data from all tables. Destructive; used in tests. async fn clear_all_data(&self) -> Result<()>; // Thumbnail helpers @@ -200,18 +321,29 @@ pub trait StorageBackend: Send + Sync + 'static { ) -> Result>; // Library statistics + + /// Compute comprehensive library statistics (sizes, counts by type, + /// top tags/collections, duplicates). async fn library_statistics(&self) -> Result; // User Management + + /// List all registered users. async fn list_users(&self) -> Result>; + + /// Get a user by ID. Returns `NotFound` if no such user exists. async fn get_user( &self, id: crate::users::UserId, ) -> Result; + + /// Look up a user by username. Returns `NotFound` if not found. async fn get_user_by_username( &self, username: &str, ) -> Result; + + /// Create a new user with the given credentials and role. async fn create_user( &self, username: &str, @@ -219,6 +351,9 @@ pub trait StorageBackend: Send + Sync + 'static { role: crate::config::UserRole, profile: Option, ) -> Result; + + /// Update a user's password, role, or profile. Only non-`None` fields + /// are applied. async fn update_user( &self, id: crate::users::UserId, @@ -226,17 +361,25 @@ pub trait StorageBackend: Send + Sync + 'static { role: Option, profile: Option, ) -> Result; + + /// Delete a user and all associated sessions. async fn delete_user(&self, id: crate::users::UserId) -> Result<()>; + + /// Get the library access grants for a user. async fn get_user_libraries( &self, user_id: crate::users::UserId, ) -> Result>; + + /// Grant a user access to a library root path with the given permission. async fn grant_library_access( &self, user_id: crate::users::UserId, root_path: &str, permission: crate::users::LibraryPermission, ) -> Result<()>; + + /// Revoke a user's access to a library root path. async fn revoke_library_access( &self, user_id: crate::users::UserId, @@ -255,23 +398,21 @@ pub trait StorageBackend: Send + Sync + 'static { // Default implementation: get the media item's path and check against // user's library access let media = self.get_media(media_id).await?; - let path_str = media.path.to_string_lossy().to_string(); - // Get user's library permissions let libraries = self.get_user_libraries(user_id).await?; // If user has no library restrictions, they have no access (unless they're // admin) This default impl requires at least one matching library - // permission + // permission. Use Path::starts_with for component-wise matching to prevent + // prefix collisions (e.g. /data/user1 vs /data/user1-other). for lib in &libraries { - if path_str.starts_with(&lib.root_path) { + if media.path.starts_with(std::path::Path::new(&lib.root_path)) { return Ok(lib.permission); } } Err(crate::error::PinakesError::Authorization(format!( - "user {} has no access to media {}", - user_id, media_id + "user {user_id} has no access to media {media_id}" ))) } @@ -299,6 +440,8 @@ pub trait StorageBackend: Send + Sync + 'static { } } + /// Rate a media item (1-5 stars) with an optional text review. + /// Upserts: replaces any existing rating by the same user. async fn rate_media( &self, user_id: UserId, @@ -306,14 +449,21 @@ pub trait StorageBackend: Send + Sync + 'static { stars: u8, review: Option<&str>, ) -> Result; + + /// Get all ratings for a media item. async fn get_media_ratings(&self, media_id: MediaId) -> Result>; + + /// Get a specific user's rating for a media item, if any. async fn get_user_rating( &self, user_id: UserId, media_id: MediaId, ) -> Result>; + + /// Delete a rating by ID. async fn delete_rating(&self, id: Uuid) -> Result<()>; + /// Add a comment on a media item, optionally as a reply to another comment. async fn add_comment( &self, user_id: UserId, @@ -321,31 +471,44 @@ pub trait StorageBackend: Send + Sync + 'static { text: &str, parent_id: Option, ) -> Result; + + /// Get all comments for a media item. async fn get_media_comments(&self, media_id: MediaId) -> Result>; + + /// Delete a comment by ID. async fn delete_comment(&self, id: Uuid) -> Result<()>; + /// Mark a media item as a favorite for a user. async fn add_favorite( &self, user_id: UserId, media_id: MediaId, ) -> Result<()>; + + /// Remove a media item from a user's favorites. async fn remove_favorite( &self, user_id: UserId, media_id: MediaId, ) -> Result<()>; + + /// Get a user's favorited media items with pagination. async fn get_user_favorites( &self, user_id: UserId, pagination: &Pagination, ) -> Result>; + + /// Check whether a media item is in a user's favorites. async fn is_favorite( &self, user_id: UserId, media_id: MediaId, ) -> Result; + /// Create a public share link for a media item with optional password + /// and expiry. async fn create_share_link( &self, media_id: MediaId, @@ -354,10 +517,17 @@ pub trait StorageBackend: Send + Sync + 'static { password_hash: Option<&str>, expires_at: Option>, ) -> Result; + + /// Look up a share link by its token. Returns `NotFound` if invalid. async fn get_share_link(&self, token: &str) -> Result; + + /// Increment the view counter for a share link. async fn increment_share_views(&self, token: &str) -> Result<()>; + + /// Delete a share link by ID. async fn delete_share_link(&self, id: Uuid) -> Result<()>; + /// Create a new playlist. Smart playlists auto-populate via `filter_query`. async fn create_playlist( &self, owner_id: UserId, @@ -367,11 +537,18 @@ pub trait StorageBackend: Send + Sync + 'static { is_smart: bool, filter_query: Option<&str>, ) -> Result; + + /// Get a playlist by ID. Returns `NotFound` if it does not exist. async fn get_playlist(&self, id: Uuid) -> Result; + + /// List playlists, optionally filtered to a specific owner. async fn list_playlists( &self, owner_id: Option, ) -> Result>; + + /// Update a playlist's name, description, or visibility. + /// Only non-`None` fields are applied. async fn update_playlist( &self, id: Uuid, @@ -379,22 +556,32 @@ pub trait StorageBackend: Send + Sync + 'static { description: Option<&str>, is_public: Option, ) -> Result; + + /// Delete a playlist and all its item associations. async fn delete_playlist(&self, id: Uuid) -> Result<()>; + + /// Add a media item to a playlist at the given position. async fn add_to_playlist( &self, playlist_id: Uuid, media_id: MediaId, position: i32, ) -> Result<()>; + + /// Remove a media item from a playlist. async fn remove_from_playlist( &self, playlist_id: Uuid, media_id: MediaId, ) -> Result<()>; + + /// Get all media items in a playlist, ordered by position. async fn get_playlist_items( &self, playlist_id: Uuid, ) -> Result>; + + /// Move a media item to a new position within a playlist. async fn reorder_playlist( &self, playlist_id: Uuid, @@ -402,69 +589,106 @@ pub trait StorageBackend: Send + Sync + 'static { new_position: i32, ) -> Result<()>; + /// Record a usage/analytics event (play, view, download, etc.). async fn record_usage_event(&self, event: &UsageEvent) -> Result<()>; + + /// Query usage events, optionally filtered by media item and/or user. async fn get_usage_events( &self, media_id: Option, user_id: Option, limit: u64, ) -> Result>; + + /// Get the most-viewed media items with their view counts. async fn get_most_viewed(&self, limit: u64) -> Result>; + + /// Get media items recently viewed by a user, most recent first. async fn get_recently_viewed( &self, user_id: UserId, limit: u64, ) -> Result>; + + /// Update playback/watch progress for a user on a media item (in seconds). async fn update_watch_progress( &self, user_id: UserId, media_id: MediaId, progress_secs: f64, ) -> Result<()>; + + /// Get the stored watch progress (in seconds) for a user/media pair. async fn get_watch_progress( &self, user_id: UserId, media_id: MediaId, ) -> Result>; + + /// Delete usage events older than the given timestamp. + /// Returns the number of events deleted. async fn cleanup_old_events(&self, before: DateTime) -> Result; + /// Add a subtitle track for a media item. async fn add_subtitle(&self, subtitle: &Subtitle) -> Result<()>; + + /// Get all subtitle tracks for a media item. async fn get_media_subtitles( &self, media_id: MediaId, ) -> Result>; + + /// Delete a subtitle track by ID. async fn delete_subtitle(&self, id: Uuid) -> Result<()>; + + /// Adjust the timing offset (in milliseconds) for a subtitle track. async fn update_subtitle_offset( &self, id: Uuid, offset_ms: i64, ) -> Result<()>; + /// Store metadata fetched from an external source (e.g., `MusicBrainz`, + /// `TMDb`). async fn store_external_metadata( &self, meta: &ExternalMetadata, ) -> Result<()>; + + /// Get all external metadata records for a media item. async fn get_external_metadata( &self, media_id: MediaId, ) -> Result>; + + /// Delete an external metadata record by ID. async fn delete_external_metadata(&self, id: Uuid) -> Result<()>; + /// Create a new transcoding session for a media item. async fn create_transcode_session( &self, session: &TranscodeSession, ) -> Result<()>; + + /// Get a transcoding session by ID. async fn get_transcode_session(&self, id: Uuid) -> Result; + + /// List transcoding sessions, optionally filtered to a media item. async fn list_transcode_sessions( &self, media_id: Option, ) -> Result>; + + /// Update the status and progress of a transcoding session. async fn update_transcode_status( &self, id: Uuid, status: TranscodeStatus, progress: f32, ) -> Result<()>; + + /// Delete transcode sessions that expired before the given timestamp. + /// Returns the number of sessions cleaned up. async fn cleanup_expired_transcodes( &self, before: DateTime, @@ -479,16 +703,26 @@ pub trait StorageBackend: Send + Sync + 'static { session_token: &str, ) -> Result>; - /// Update the last_accessed timestamp for a session + /// Update the `last_accessed` timestamp for a session async fn touch_session(&self, session_token: &str) -> Result<()>; + /// Extend a session's expiry time. + /// Only extends sessions that have not already expired. + /// Returns the new expiry time, or None if the session was not found or + /// already expired. + async fn extend_session( + &self, + session_token: &str, + new_expires_at: DateTime, + ) -> Result>>; + /// Delete a specific session async fn delete_session(&self, session_token: &str) -> Result<()>; /// Delete all sessions for a specific user async fn delete_user_sessions(&self, username: &str) -> Result; - /// Delete all expired sessions (where expires_at < now) + /// Delete all expired sessions (where `expires_at` < now) async fn delete_expired_sessions(&self) -> Result; /// List all active sessions (optionally filtered by username) @@ -533,7 +767,7 @@ pub trait StorageBackend: Send + Sync + 'static { /// List all series with book counts async fn list_series(&self) -> Result>; - /// Get all books in a series, ordered by series_index + /// Get all books in a series, ordered by `series_index` async fn get_series_books(&self, series_name: &str) -> Result>; @@ -591,10 +825,10 @@ pub trait StorageBackend: Send + Sync + 'static { /// deleted. async fn decrement_blob_ref(&self, hash: &ContentHash) -> Result; - /// Update the last_verified timestamp for a blob + /// Update the `last_verified` timestamp for a blob async fn update_blob_verified(&self, hash: &ContentHash) -> Result<()>; - /// List orphaned blobs (reference_count = 0) + /// List orphaned blobs (`reference_count` = 0) async fn list_orphaned_blobs(&self) -> Result>; /// Delete a blob record @@ -635,7 +869,7 @@ pub trait StorageBackend: Send + Sync + 'static { /// Delete a sync device async fn delete_device(&self, id: crate::sync::DeviceId) -> Result<()>; - /// Update the last_seen_at timestamp for a device + /// Update the `last_seen_at` timestamp for a device async fn touch_device(&self, id: crate::sync::DeviceId) -> Result<()>; /// Record a change in the sync log @@ -830,13 +1064,19 @@ pub trait StorageBackend: Send + Sync + 'static { user_id: UserId, ) -> Result>; - /// Mark a notification as read - async fn mark_notification_read(&self, id: Uuid) -> Result<()>; + /// Mark a notification as read. Scoped to `user_id` to prevent cross-user + /// modification. Silently no-ops if the notification belongs to a different + /// user, which avoids leaking notification existence via error responses. + async fn mark_notification_read( + &self, + id: Uuid, + user_id: UserId, + ) -> Result<()>; /// Mark all notifications as read for a user async fn mark_all_notifications_read(&self, user_id: UserId) -> Result<()>; - /// Rename a media item (changes file_name and updates path accordingly). + /// Rename a media item (changes `file_name` and updates path accordingly). /// For external storage, this actually renames the file on disk. /// For managed storage, this only updates the metadata. /// Returns the old path for sync log recording. @@ -866,7 +1106,7 @@ pub trait StorageBackend: Send + Sync + 'static { Ok(results) } - /// Soft delete a media item (set deleted_at timestamp). + /// Soft delete a media item (set `deleted_at` timestamp). async fn soft_delete_media(&self, id: MediaId) -> Result<()>; /// Restore a soft-deleted media item. @@ -919,15 +1159,24 @@ pub trait StorageBackend: Send + Sync + 'static { depth: u32, ) -> Result; - /// Resolve unresolved links by matching target_path against media item paths. - /// Returns the number of links that were resolved. + /// Resolve unresolved links by matching `target_path` against media item + /// paths. Returns the number of links that were resolved. async fn resolve_links(&self) -> Result; - /// Update the links_extracted_at timestamp for a media item. + /// Update the `links_extracted_at` timestamp for a media item. async fn mark_links_extracted(&self, media_id: MediaId) -> Result<()>; - /// Get count of unresolved links (links where target_media_id is NULL). + /// Get count of unresolved links (links where `target_media_id` is NULL). async fn count_unresolved_links(&self) -> Result; + + /// Create a backup of the database to the specified path. + /// Default implementation returns unsupported; `SQLite` overrides with + /// VACUUM INTO. + async fn backup(&self, _dest: &std::path::Path) -> Result<()> { + Err(crate::error::PinakesError::InvalidOperation( + "backup not supported for this storage backend".to_string(), + )) + } } /// Comprehensive library statistics. diff --git a/crates/pinakes-core/src/storage/postgres.rs b/crates/pinakes-core/src/storage/postgres.rs index b2c934b..e0caeee 100644 --- a/crates/pinakes-core/src/storage/postgres.rs +++ b/crates/pinakes-core/src/storage/postgres.rs @@ -11,8 +11,23 @@ use crate::{ config::PostgresConfig, error::{PinakesError, Result}, media_type::MediaType, - model::*, - search::*, + model::{ + AuditAction, + AuditEntry, + Collection, + CollectionKind, + ContentHash, + CustomField, + CustomFieldType, + ManagedBlob, + ManagedStorageStats, + MediaId, + MediaItem, + Pagination, + StorageMode, + Tag, + }, + search::{SearchQuery, SearchRequest, SearchResults, SortOrder}, storage::StorageBackend, }; @@ -30,6 +45,12 @@ fn escape_like_pattern(input: &str) -> String { } impl PostgresBackend { + /// Creates a new `PostgresBackend` connected to the given configuration. + /// + /// # Errors + /// + /// Returns an error if the TLS connector cannot be built, the connection pool + /// cannot be created, or the initial connectivity check fails. pub async fn new(config: &PostgresConfig) -> Result { let mut pool_config = PoolConfig::new(); pool_config.host = Some(config.host.clone()); @@ -125,7 +146,7 @@ fn media_type_from_string(s: &str) -> Result { .map_err(|_| PinakesError::Database(format!("unknown media type: {s}"))) } -fn audit_action_to_string(action: &AuditAction) -> String { +fn audit_action_to_string(action: AuditAction) -> String { // AuditAction uses serde rename_all = "snake_case" serde_json::to_value(action) .ok() @@ -138,7 +159,7 @@ fn audit_action_from_string(s: &str) -> Result { .map_err(|_| PinakesError::Database(format!("unknown audit action: {s}"))) } -fn collection_kind_to_string(kind: &CollectionKind) -> String { +fn collection_kind_to_string(kind: CollectionKind) -> String { serde_json::to_value(kind) .ok() .and_then(|v| v.as_str().map(String::from)) @@ -151,7 +172,7 @@ fn collection_kind_from_string(s: &str) -> Result { ) } -fn custom_field_type_to_string(ft: &CustomFieldType) -> String { +fn custom_field_type_to_string(ft: CustomFieldType) -> String { serde_json::to_value(ft) .ok() .and_then(|v| v.as_str().map(String::from)) @@ -183,7 +204,7 @@ fn row_to_media_item(row: &Row) -> Result { file_name: row.get("file_name"), media_type, content_hash: ContentHash(row.get("content_hash")), - file_size: row.get::<_, i64>("file_size") as u64, + file_size: row.get::<_, i64>("file_size").cast_unsigned(), title: row.get("title"), artist: row.get("artist"), album: row.get("album"), @@ -223,13 +244,13 @@ fn row_to_media_item(row: &Row) -> Result { }) } -fn row_to_tag(row: &Row) -> Result { - Ok(Tag { +fn row_to_tag(row: &Row) -> Tag { + Tag { id: row.get("id"), name: row.get("name"), parent_id: row.get("parent_id"), created_at: row.get("created_at"), - }) + } } fn row_to_collection(row: &Row) -> Result { @@ -262,14 +283,15 @@ fn row_to_audit_entry(row: &Row) -> Result { } /// Recursively builds a tsquery string and collects parameters for a -/// SearchQuery. +/// `SearchQuery`. /// /// Returns a tuple of: /// - `sql_fragment`: the WHERE clause fragment (may include $N placeholders) /// - `params`: boxed parameter values matching the placeholders -/// - `type_filters`: collected TypeFilter values to append as extra WHERE +/// - `type_filters`: collected `TypeFilter` values to append as extra WHERE +/// clauses +/// - `tag_filters`: collected `TagFilter` values to append as extra WHERE /// clauses -/// - `tag_filters`: collected TagFilter values to append as extra WHERE clauses /// /// `param_offset` is the current 1-based parameter index; the function returns /// the next available offset. @@ -329,22 +351,22 @@ fn build_search_inner( let words: Vec<&str> = sanitized.split_whitespace().collect(); if let Some((last, rest)) = words.split_last() { let prefix_parts: Vec = - rest.iter().map(|w| w.to_string()).collect(); + rest.iter().map(std::string::ToString::to_string).collect(); if prefix_parts.is_empty() { - format!("{}:*", last) + format!("{last}:*") } else { format!("{} & {}:*", prefix_parts.join(" & "), last) } } else { - format!("{}:*", sanitized) + format!("{sanitized}:*") } } else { - format!("{}:*", sanitized) + format!("{sanitized}:*") }; params.push(Box::new(text.clone())); params.push(Box::new(prefix_query)); - params.push(Box::new(format!("%{}%", escape_like_pattern(&text)))); + params.push(Box::new(format!("%{}%", escape_like_pattern(text)))); params.push(Box::new(text.clone())); params.push(Box::new(text.clone())); params.push(Box::new(text.clone())); @@ -386,7 +408,7 @@ fn build_search_inner( params.push(Box::new(term.clone())); params.push(Box::new(term.clone())); params.push(Box::new(term.clone())); - params.push(Box::new(format!("%{}%", escape_like_pattern(&term)))); + params.push(Box::new(format!("%{}%", escape_like_pattern(term)))); Ok(format!( "(similarity(COALESCE(title, ''), ${idx_title}) > 0.3 OR \ similarity(COALESCE(artist, ''), ${idx_artist}) > 0.3 OR \ @@ -515,7 +537,7 @@ fn build_search_inner( } } -/// Convert a DateValue to a PostgreSQL datetime comparison expression +/// Convert a `DateValue` to a `PostgreSQL` datetime comparison expression fn date_value_to_postgres_expr( col: &str, value: &crate::search::DateValue, @@ -555,20 +577,20 @@ fn date_value_to_postgres_expr( } } -fn sort_order_clause(sort: &SortOrder) -> &'static str { +const fn sort_order_clause(sort: SortOrder) -> &'static str { match sort { - SortOrder::Relevance => "created_at DESC", // fallback when no FTS SortOrder::DateAsc => "created_at ASC", - SortOrder::DateDesc => "created_at DESC", SortOrder::NameAsc => "file_name ASC", SortOrder::NameDesc => "file_name DESC", SortOrder::SizeAsc => "file_size ASC", SortOrder::SizeDesc => "file_size DESC", + // Relevance falls back to created_at DESC when no FTS + SortOrder::Relevance | SortOrder::DateDesc => "created_at DESC", } } /// Returns a relevance-aware ORDER BY when there's an active FTS query. -fn sort_order_clause_with_rank(sort: &SortOrder, has_fts: bool) -> String { +fn sort_order_clause_with_rank(sort: SortOrder, has_fts: bool) -> String { match sort { SortOrder::Relevance if has_fts => { "ts_rank(search_vector, query) DESC".to_string() @@ -656,7 +678,7 @@ impl StorageBackend for PostgresBackend { let media_type_str = media_type_to_string(&item.media_type); let path_str = item.path.to_string_lossy().to_string(); - let file_size = item.file_size as i64; + let file_size = item.file_size.cast_signed(); client .execute( @@ -705,7 +727,7 @@ impl StorageBackend for PostgresBackend { // Insert custom fields for (name, field) in &item.custom_fields { - let ft = custom_field_type_to_string(&field.field_type); + let ft = custom_field_type_to_string(field.field_type); client .execute( "INSERT INTO custom_fields (media_id, field_name, field_type, \ @@ -735,7 +757,7 @@ impl StorageBackend for PostgresBackend { ) .await?; let count: i64 = row.get(0); - Ok(count as u64) + Ok(count.cast_unsigned()) } async fn get_media(&self, id: MediaId) -> Result { @@ -878,8 +900,8 @@ impl StorageBackend for PostgresBackend { let rows = client .query(&sql, &[ - &(pagination.limit as i64), - &(pagination.offset as i64), + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), ]) .await?; @@ -925,7 +947,7 @@ impl StorageBackend for PostgresBackend { } async fn update_media(&self, item: &MediaItem) -> Result<()> { - let client = self + let mut client = self .pool .get() .await @@ -933,9 +955,21 @@ impl StorageBackend for PostgresBackend { let media_type_str = media_type_to_string(&item.media_type); let path_str = item.path.to_string_lossy().to_string(); - let file_size = item.file_size as i64; + let file_size = item.file_size.cast_signed(); + let thumbnail_path = item + .thumbnail_path + .as_ref() + .map(|p| p.to_string_lossy().to_string()); - let rows_affected = client + // Wrap the UPDATE + custom-fields replace in a single transaction so a + // crash between the DELETE and the INSERTs cannot leave the row with no + // custom fields. + let txn = client + .transaction() + .await + .map_err(|e| PinakesError::Database(format!("begin transaction: {e}")))?; + + let rows_affected = txn .execute( "UPDATE media_items SET path = $2, file_name = $3, media_type = $4, content_hash = \ @@ -945,8 +979,8 @@ impl StorageBackend for PostgresBackend { year = $11, duration_secs = $12, description = $13, thumbnail_path = $14, date_taken = $15, latitude = $16, \ longitude = $17, - camera_make = $18, camera_model = $19, rating = $20, \ - perceptual_hash = $21, updated_at = $22 + camera_make = $18, camera_model = $19, rating = $20, + perceptual_hash = $21, updated_at = $22 WHERE id = $1", &[ &item.id.0, @@ -962,10 +996,7 @@ impl StorageBackend for PostgresBackend { &item.year, &item.duration_secs, &item.description, - &item - .thumbnail_path - .as_ref() - .map(|p| p.to_string_lossy().to_string()), + &thumbnail_path, &item.date_taken, &item.latitude, &item.longitude, @@ -982,16 +1013,16 @@ impl StorageBackend for PostgresBackend { return Err(PinakesError::NotFound(format!("media item {}", item.id))); } - // Replace custom fields: delete all then re-insert - client + // Replace custom fields atomically within the same transaction + txn .execute("DELETE FROM custom_fields WHERE media_id = $1", &[&item .id .0]) .await?; for (name, field) in &item.custom_fields { - let ft = custom_field_type_to_string(&field.field_type); - client + let ft = custom_field_type_to_string(field.field_type); + txn .execute( "INSERT INTO custom_fields (media_id, field_name, field_type, \ field_value) @@ -1001,6 +1032,10 @@ impl StorageBackend for PostgresBackend { .await?; } + txn.commit().await.map_err(|e| { + PinakesError::Database(format!("commit transaction: {e}")) + })?; + Ok(()) } @@ -1036,7 +1071,7 @@ impl StorageBackend for PostgresBackend { client.execute("DELETE FROM media_items", &[]).await?; - Ok(count as u64) + Ok(count.cast_unsigned()) } // Batch Operations @@ -1223,7 +1258,7 @@ impl StorageBackend for PostgresBackend { .await? .ok_or_else(|| PinakesError::TagNotFound(id.to_string()))?; - row_to_tag(&row) + Ok(row_to_tag(&row)) } async fn list_tags(&self) -> Result> { @@ -1240,7 +1275,7 @@ impl StorageBackend for PostgresBackend { ) .await?; - rows.iter().map(row_to_tag).collect() + Ok(rows.iter().map(row_to_tag).collect()) } async fn delete_tag(&self, id: Uuid) -> Result<()> { @@ -1314,7 +1349,7 @@ impl StorageBackend for PostgresBackend { ) .await?; - rows.iter().map(row_to_tag).collect() + Ok(rows.iter().map(row_to_tag).collect()) } async fn get_tag_descendants(&self, tag_id: Uuid) -> Result> { @@ -1341,7 +1376,7 @@ impl StorageBackend for PostgresBackend { ) .await?; - rows.iter().map(row_to_tag).collect() + Ok(rows.iter().map(row_to_tag).collect()) } // Collections @@ -1360,7 +1395,7 @@ impl StorageBackend for PostgresBackend { let id = Uuid::now_v7(); let now = Utc::now(); - let kind_str = collection_kind_to_string(&kind); + let kind_str = collection_kind_to_string(kind); client .execute( @@ -1628,7 +1663,7 @@ impl StorageBackend for PostgresBackend { format!("{where_clause} AND {}", extra_where.join(" AND ")) }; - let order_by = sort_order_clause_with_rank(&request.sort, has_fts); + let order_by = sort_order_clause_with_rank(request.sort, has_fts); // For relevance sorting with FTS, we need a CTE or subquery to define // 'query' @@ -1698,8 +1733,8 @@ impl StorageBackend for PostgresBackend { let total_count: i64 = count_row.get(0); // Add pagination params - params.push(Box::new(request.pagination.limit as i64)); - params.push(Box::new(request.pagination.offset as i64)); + params.push(Box::new(request.pagination.limit.cast_signed())); + params.push(Box::new(request.pagination.offset.cast_signed())); let select_params: Vec<&(dyn ToSql + Sync)> = params .iter() @@ -1747,7 +1782,7 @@ impl StorageBackend for PostgresBackend { Ok(SearchResults { items, - total_count: total_count as u64, + total_count: total_count.cast_unsigned(), }) } @@ -1759,7 +1794,7 @@ impl StorageBackend for PostgresBackend { .await .map_err(|e| PinakesError::Database(format!("pool error: {e}")))?; - let action_str = audit_action_to_string(&entry.action); + let action_str = audit_action_to_string(entry.action); let media_id = entry.media_id.map(|m| m.0); client @@ -1801,8 +1836,8 @@ impl StorageBackend for PostgresBackend { LIMIT $2 OFFSET $3", &[ &mid.0, - &(pagination.limit as i64), - &(pagination.offset as i64), + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), ], ) .await? @@ -1814,7 +1849,10 @@ impl StorageBackend for PostgresBackend { FROM audit_log ORDER BY timestamp DESC LIMIT $1 OFFSET $2", - &[&(pagination.limit as i64), &(pagination.offset as i64)], + &[ + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), + ], ) .await? }, @@ -1836,7 +1874,7 @@ impl StorageBackend for PostgresBackend { .await .map_err(|e| PinakesError::Database(format!("pool error: {e}")))?; - let ft = custom_field_type_to_string(&field.field_type); + let ft = custom_field_type_to_string(field.field_type); client .execute( @@ -1914,10 +1952,22 @@ impl StorageBackend for PostgresBackend { let rows = client .query( - "SELECT * FROM media_items WHERE content_hash IN ( - SELECT content_hash FROM media_items GROUP BY content_hash \ - HAVING COUNT(*) > 1 - ) ORDER BY content_hash, created_at", + "SELECT id, path, file_name, media_type, content_hash, file_size, + title, artist, album, genre, year, duration_secs, \ + description, + thumbnail_path, file_mtime, date_taken, latitude, \ + longitude, + camera_make, camera_model, rating, perceptual_hash, + storage_mode, original_filename, uploaded_at, \ + storage_key, + created_at, updated_at, deleted_at, links_extracted_at + FROM media_items + WHERE deleted_at IS NULL AND content_hash IN ( + SELECT content_hash FROM media_items + WHERE deleted_at IS NULL + GROUP BY content_hash HAVING COUNT(*) > 1 + ) + ORDER BY content_hash, created_at", &[], ) .await?; @@ -1964,7 +2014,7 @@ impl StorageBackend for PostgresBackend { let mut current_hash = String::new(); for item in items { if item.content_hash.0 != current_hash { - current_hash = item.content_hash.0.clone(); + current_hash.clone_from(&item.content_hash.0); groups.push(Vec::new()); } if let Some(group) = groups.last_mut() { @@ -1979,6 +2029,7 @@ impl StorageBackend for PostgresBackend { &self, threshold: u32, ) -> Result>> { + use image_hasher::ImageHash; let client = self .pool .get() @@ -1991,7 +2042,9 @@ impl StorageBackend for PostgresBackend { "SELECT id, path, file_name, media_type, content_hash, file_size, title, artist, album, genre, year, duration_secs, description, thumbnail_path, file_mtime, date_taken, latitude, longitude, - camera_make, camera_model, rating, perceptual_hash, created_at, updated_at + camera_make, camera_model, rating, perceptual_hash, + storage_mode, original_filename, uploaded_at, storage_key, + created_at, updated_at, deleted_at, links_extracted_at FROM media_items WHERE perceptual_hash IS NOT NULL ORDER BY id", &[], ) @@ -2035,7 +2088,6 @@ impl StorageBackend for PostgresBackend { } // Compare each pair and build groups - use image_hasher::ImageHash; let mut groups: Vec> = Vec::new(); let mut grouped_indices: std::collections::HashSet = std::collections::HashSet::new(); @@ -2119,11 +2171,11 @@ impl StorageBackend for PostgresBackend { .get(0); Ok(crate::storage::DatabaseStats { - media_count: media_count as u64, - tag_count: tag_count as u64, - collection_count: collection_count as u64, - audit_count: audit_count as u64, - database_size_bytes: database_size_bytes as u64, + media_count: media_count.cast_unsigned(), + tag_count: tag_count.cast_unsigned(), + collection_count: collection_count.cast_unsigned(), + audit_count: audit_count.cast_unsigned(), + database_size_bytes: database_size_bytes.cast_unsigned(), backend_name: "postgres".to_string(), }) } @@ -2232,6 +2284,32 @@ impl StorageBackend for PostgresBackend { Ok(results) } + async fn get_saved_search( + &self, + id: Uuid, + ) -> Result { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("pool error: {e}")))?; + let row = client + .query_opt( + "SELECT id, name, query, sort_order, created_at FROM saved_searches \ + WHERE id = $1", + &[&id], + ) + .await? + .ok_or_else(|| PinakesError::NotFound(format!("saved search {id}")))?; + Ok(crate::model::SavedSearch { + id: row.get(0), + name: row.get(1), + query: row.get(2), + sort_order: row.get(3), + created_at: row.get(4), + }) + } + async fn delete_saved_search(&self, id: Uuid) -> Result<()> { let client = self .pool @@ -2359,7 +2437,7 @@ impl StorageBackend for PostgresBackend { ) .await? .ok_or_else(|| { - PinakesError::NotFound(format!("user with username {}", username)) + PinakesError::NotFound(format!("user with username {username}")) })?; let user_id: uuid::Uuid = row.get::<_, uuid::Uuid>(0); let profile = self.load_user_profile(user_id).await?; @@ -2414,7 +2492,7 @@ impl StorageBackend for PostgresBackend { crate::users::UserProfile { avatar_path: None, bio: None, - preferences: Default::default(), + preferences: crate::users::UserPreferences::default(), } }; @@ -2449,7 +2527,7 @@ impl StorageBackend for PostgresBackend { let mut param_idx = 2; let pw_update = if password_hash.is_some() { - let s = format!("password_hash = ${}", param_idx); + let s = format!("password_hash = ${param_idx}"); param_idx += 1; Some(s) } else { @@ -2619,7 +2697,7 @@ impl StorageBackend for PostgresBackend { .map_err(|e| PinakesError::Database(format!("pool error: {e}")))?; let id = Uuid::now_v7(); let now = Utc::now(); - let stars_i32 = stars as i32; + let stars_i32 = i32::from(stars); let row = client .query_one( "INSERT INTO ratings (id, user_id, media_id, stars, review_text, \ @@ -2665,7 +2743,7 @@ impl StorageBackend for PostgresBackend { id: row.get("id"), user_id: crate::users::UserId(row.get("user_id")), media_id: MediaId(row.get("media_id")), - stars: row.get::<_, i32>("stars") as u8, + stars: u8::try_from(row.get::<_, i32>("stars")).unwrap_or(0), review_text: row.get("review_text"), created_at: row.get("created_at"), } @@ -2696,7 +2774,7 @@ impl StorageBackend for PostgresBackend { id: row.get("id"), user_id: crate::users::UserId(row.get("user_id")), media_id: MediaId(row.get("media_id")), - stars: row.get::<_, i32>("stars") as u8, + stars: u8::try_from(row.get::<_, i32>("stars")).unwrap_or(0), review_text: row.get("review_text"), created_at: row.get("created_at"), } @@ -2847,14 +2925,15 @@ impl StorageBackend for PostgresBackend { m.file_size, m.title, m.artist, m.album, m.genre, m.year, \ m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \ m.date_taken, m.latitude, m.longitude, m.camera_make, \ - m.camera_model, m.rating, m.perceptual_hash, m.created_at, \ - m.updated_at FROM media_items m JOIN favorites f ON m.id = \ - f.media_id WHERE f.user_id = $1 ORDER BY f.created_at DESC LIMIT $2 \ - OFFSET $3", + m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \ + m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \ + m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items m \ + JOIN favorites f ON m.id = f.media_id WHERE f.user_id = $1 ORDER BY \ + f.created_at DESC LIMIT $2 OFFSET $3", &[ &user_id.0, - &(pagination.limit as i64), - &(pagination.offset as i64), + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), ], ) .await?; @@ -2987,7 +3066,9 @@ impl StorageBackend for PostgresBackend { token: row.get("token"), password_hash: row.get("password_hash"), expires_at: row.get("expires_at"), - view_count: row.get::<_, i32>("view_count") as u64, + view_count: u64::from( + u32::try_from(row.get::<_, i32>("view_count")).unwrap_or(0), + ), created_at: row.get("created_at"), }) } @@ -3261,9 +3342,11 @@ impl StorageBackend for PostgresBackend { m.file_size, m.title, m.artist, m.album, m.genre, m.year, \ m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \ m.date_taken, m.latitude, m.longitude, m.camera_make, \ - m.camera_model, m.rating, m.perceptual_hash, m.created_at, \ - m.updated_at FROM media_items m JOIN playlist_items pi ON m.id = \ - pi.media_id WHERE pi.playlist_id = $1 ORDER BY pi.position ASC", + m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \ + m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \ + m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items m \ + JOIN playlist_items pi ON m.id = pi.media_id WHERE pi.playlist_id = \ + $1 ORDER BY pi.position ASC", &[&playlist_id], ) .await?; @@ -3391,11 +3474,11 @@ impl StorageBackend for PostgresBackend { } else { format!("WHERE {}", conditions.join(" AND ")) }; - params.push(Box::new(limit as i64)); + params.push(Box::new(limit.cast_signed())); let sql = format!( "SELECT id, media_id, user_id, event_type, timestamp, duration_secs, \ - context_json FROM usage_events {} ORDER BY timestamp DESC LIMIT ${idx}", - where_clause + context_json FROM usage_events {where_clause} ORDER BY timestamp DESC \ + LIMIT ${idx}" ); let param_refs: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = params .iter() @@ -3438,23 +3521,26 @@ impl StorageBackend for PostgresBackend { m.file_size, m.title, m.artist, m.album, m.genre, m.year, \ m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \ m.date_taken, m.latitude, m.longitude, m.camera_make, \ - m.camera_model, m.rating, m.perceptual_hash, m.created_at, \ - m.updated_at, COUNT(ue.id) as view_count FROM media_items m JOIN \ - usage_events ue ON m.id = ue.media_id WHERE ue.event_type IN \ - ('view', 'play') GROUP BY m.id, m.path, m.file_name, m.media_type, \ - m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, \ - m.year, m.duration_secs, m.description, m.thumbnail_path, \ - m.file_mtime, m.date_taken, m.latitude, m.longitude, m.camera_make, \ - m.camera_model, m.rating, m.perceptual_hash, m.created_at, \ - m.updated_at ORDER BY view_count DESC LIMIT $1", - &[&(limit as i64)], + m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \ + m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \ + m.updated_at, m.deleted_at, m.links_extracted_at, COUNT(ue.id) AS \ + view_count FROM media_items m JOIN usage_events ue ON m.id = \ + ue.media_id WHERE ue.event_type IN ('view', 'play') GROUP BY m.id, \ + m.path, m.file_name, m.media_type, m.content_hash, m.file_size, \ + m.title, m.artist, m.album, m.genre, m.year, m.duration_secs, \ + m.description, m.thumbnail_path, m.file_mtime, m.date_taken, \ + m.latitude, m.longitude, m.camera_make, m.camera_model, m.rating, \ + m.perceptual_hash, m.storage_mode, m.original_filename, \ + m.uploaded_at, m.storage_key, m.created_at, m.updated_at, \ + m.deleted_at, m.links_extracted_at ORDER BY view_count DESC LIMIT $1", + &[&limit.cast_signed()], ) .await?; let mut results = Vec::new(); for row in &rows { let item = row_to_media_item(row)?; - let count: i64 = row.get(24); - results.push((item, count as u64)); + let count: i64 = row.get("view_count"); + results.push((item, count.cast_unsigned())); } // Batch-load custom fields @@ -3506,16 +3592,20 @@ impl StorageBackend for PostgresBackend { m.file_size, m.title, m.artist, m.album, m.genre, m.year, \ m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \ m.date_taken, m.latitude, m.longitude, m.camera_make, \ - m.camera_model, m.rating, m.perceptual_hash, m.created_at, \ - m.updated_at FROM media_items m JOIN usage_events ue ON m.id = \ - ue.media_id WHERE ue.user_id = $1 AND ue.event_type IN ('view', \ - 'play') GROUP BY m.id, m.path, m.file_name, m.media_type, \ - m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, \ - m.year, m.duration_secs, m.description, m.thumbnail_path, \ - m.file_mtime, m.date_taken, m.latitude, m.longitude, m.camera_make, \ - m.camera_model, m.rating, m.perceptual_hash, m.created_at, \ - m.updated_at ORDER BY MAX(ue.timestamp) DESC LIMIT $2", - &[&user_id.0, &(limit as i64)], + m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \ + m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \ + m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items m \ + JOIN usage_events ue ON m.id = ue.media_id WHERE ue.user_id = $1 AND \ + ue.event_type IN ('view', 'play') GROUP BY m.id, m.path, \ + m.file_name, m.media_type, m.content_hash, m.file_size, m.title, \ + m.artist, m.album, m.genre, m.year, m.duration_secs, m.description, \ + m.thumbnail_path, m.file_mtime, m.date_taken, m.latitude, \ + m.longitude, m.camera_make, m.camera_model, m.rating, \ + m.perceptual_hash, m.storage_mode, m.original_filename, \ + m.uploaded_at, m.storage_key, m.created_at, m.updated_at, \ + m.deleted_at, m.links_extracted_at ORDER BY MAX(ue.timestamp) DESC \ + LIMIT $2", + &[&user_id.0, &limit.cast_signed()], ) .await?; let mut items: Vec = rows @@ -3629,8 +3719,10 @@ impl StorageBackend for PostgresBackend { .file_path .as_ref() .map(|p| p.to_string_lossy().to_string()); - let track_index = subtitle.track_index.map(|i| i as i32); - let offset_ms = subtitle.offset_ms as i32; + let track_index = subtitle + .track_index + .map(|i| i32::try_from(i).unwrap_or(i32::MAX)); + let offset_ms = i32::try_from(subtitle.offset_ms).unwrap_or(i32::MAX); client .execute( "INSERT INTO subtitles (id, media_id, language, format, file_path, \ @@ -3686,8 +3778,8 @@ impl StorageBackend for PostgresBackend { is_embedded: row.get("is_embedded"), track_index: row .get::<_, Option>("track_index") - .map(|i| i as usize), - offset_ms: row.get::<_, i32>("offset_ms") as i64, + .map(|i| usize::try_from(i).unwrap_or(0)), + offset_ms: i64::from(row.get::<_, i32>("offset_ms")), created_at: row.get("created_at"), } }) @@ -3717,7 +3809,7 @@ impl StorageBackend for PostgresBackend { .get() .await .map_err(|e| PinakesError::Database(format!("pool error: {e}")))?; - let offset = offset_ms as i32; + let offset = i32::try_from(offset_ms).unwrap_or(i32::MAX); client .execute("UPDATE subtitles SET offset_ms = $1 WHERE id = $2", &[ &offset, &id, @@ -3743,7 +3835,7 @@ impl StorageBackend for PostgresBackend { meta.id, e ); - serde_json::Value::Object(Default::default()) + serde_json::Value::Object(serde_json::Map::default()) }); client .execute( @@ -3828,7 +3920,7 @@ impl StorageBackend for PostgresBackend { let cache_path = session.cache_path.to_string_lossy().to_string(); let status = session.status.as_str().to_string(); let error_message = session.status.error_message().map(String::from); - let progress = session.progress as f64; + let progress = f64::from(session.progress); client .execute( "INSERT INTO transcode_sessions (id, media_id, user_id, profile, \ @@ -3862,9 +3954,9 @@ impl StorageBackend for PostgresBackend { .map_err(|e| PinakesError::Database(format!("pool error: {e}")))?; let rows = client .query( - "SELECT id, media_id, user_id, profile, cache_path, status, progress, \ - error_message, created_at, expires_at FROM transcode_sessions WHERE \ - id = $1", + "SELECT id, media_id, user_id, profile, cache_path, status, \ + progress::real, error_message, created_at, expires_at FROM \ + transcode_sessions WHERE id = $1", &[&id], ) .await?; @@ -3873,7 +3965,6 @@ impl StorageBackend for PostgresBackend { })?; let status_str: String = row.get("status"); let error_msg: Option = row.get("error_message"); - let progress: f64 = row.get("progress"); Ok(crate::transcode::TranscodeSession { id: row.get("id"), media_id: MediaId(row.get("media_id")), @@ -3888,7 +3979,7 @@ impl StorageBackend for PostgresBackend { &status_str, error_msg.as_deref(), ), - progress: progress as f32, + progress: row.get(6), created_at: row.get("created_at"), expires_at: row.get("expires_at"), duration_secs: None, @@ -3910,7 +4001,7 @@ impl StorageBackend for PostgresBackend { client .query( "SELECT id, media_id, user_id, profile, cache_path, status, \ - progress, error_message, created_at, expires_at FROM \ + progress::real, error_message, created_at, expires_at FROM \ transcode_sessions WHERE media_id = $1 ORDER BY created_at DESC", &[&mid.0], ) @@ -3920,7 +4011,7 @@ impl StorageBackend for PostgresBackend { client .query( "SELECT id, media_id, user_id, profile, cache_path, status, \ - progress, error_message, created_at, expires_at FROM \ + progress::real, error_message, created_at, expires_at FROM \ transcode_sessions ORDER BY created_at DESC", &[], ) @@ -3933,7 +4024,6 @@ impl StorageBackend for PostgresBackend { .map(|row| { let status_str: String = row.get("status"); let error_msg: Option = row.get("error_message"); - let progress: f64 = row.get("progress"); crate::transcode::TranscodeSession { id: row.get("id"), media_id: MediaId(row.get("media_id")), @@ -3948,7 +4038,7 @@ impl StorageBackend for PostgresBackend { &status_str, error_msg.as_deref(), ), - progress: progress as f32, + progress: row.get(6), created_at: row.get("created_at"), expires_at: row.get("expires_at"), duration_secs: None, @@ -3972,7 +4062,7 @@ impl StorageBackend for PostgresBackend { .map_err(|e| PinakesError::Database(format!("pool error: {e}")))?; let status_str = status.as_str().to_string(); let error_message = status.error_message().map(String::from); - let progress_f64 = progress as f64; + let progress_f64 = f64::from(progress); client .execute( "UPDATE transcode_sessions SET status = $1, progress = $2, \ @@ -4080,6 +4170,32 @@ impl StorageBackend for PostgresBackend { Ok(()) } + async fn extend_session( + &self, + session_token: &str, + new_expires_at: chrono::DateTime, + ) -> Result>> { + let client = self + .pool + .get() + .await + .map_err(|e| PinakesError::Database(format!("pool error: {e}")))?; + + let now = chrono::Utc::now(); + let rows = client + .execute( + "UPDATE sessions SET expires_at = $1, last_accessed = $2 WHERE \ + session_token = $3 AND expires_at > NOW()", + &[&new_expires_at, &now, &session_token], + ) + .await?; + if rows > 0 { + Ok(Some(new_expires_at)) + } else { + Ok(None) + } + } + async fn delete_session(&self, session_token: &str) -> Result<()> { let client = self .pool @@ -4417,14 +4533,17 @@ impl StorageBackend for PostgresBackend { GROUP BY author_name ORDER BY book_count DESC, author_name LIMIT $1 OFFSET $2", - &[&(pagination.limit as i64), &(pagination.offset as i64)], + &[ + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), + ], ) .await?; Ok( rows .iter() - .map(|r| (r.get(0), r.get::<_, i64>(1) as u64)) + .map(|r| (r.get(0), r.get::<_, i64>(1).cast_unsigned())) .collect(), ) } @@ -4450,7 +4569,7 @@ impl StorageBackend for PostgresBackend { Ok( rows .iter() - .map(|r| (r.get(0), r.get::<_, i64>(1) as u64)) + .map(|r| (r.get(0), r.get::<_, i64>(1).cast_unsigned())) .collect(), ) } @@ -4472,7 +4591,13 @@ impl StorageBackend for PostgresBackend { m.year, m.duration_secs, m.description, m.thumbnail_path, \ m.file_mtime, - m.created_at, m.updated_at + m.date_taken, m.latitude, m.longitude, + m.camera_make, m.camera_model, m.rating, \ + m.perceptual_hash, + m.storage_mode, m.original_filename, m.uploaded_at, \ + m.storage_key, + m.created_at, m.updated_at, m.deleted_at, \ + m.links_extracted_at FROM media_items m INNER JOIN book_metadata b ON m.id = b.media_id WHERE b.series_name = $1 @@ -4503,7 +4628,7 @@ impl StorageBackend for PostgresBackend { VALUES ($1, $2, $3, NOW()) ON CONFLICT(user_id, media_id) DO UPDATE SET progress_secs = $3, last_watched_at = NOW()", - &[&user_id, &media_id.0, &(current_page as f64)], + &[&user_id, &media_id.0, &f64::from(current_page)], ) .await?; Ok(()) @@ -4522,7 +4647,8 @@ impl StorageBackend for PostgresBackend { let row = client .query_opt( - "SELECT wh.progress_secs, bm.page_count, wh.last_watched_at + "SELECT CAST(wh.progress_secs AS INTEGER), bm.page_count, \ + wh.last_watched_at FROM watch_history wh LEFT JOIN book_metadata bm ON wh.media_id = bm.media_id WHERE wh.user_id = $1 AND wh.media_id = $2", @@ -4531,17 +4657,15 @@ impl StorageBackend for PostgresBackend { .await?; Ok(row.map(|r| { - let current_page = r.get::<_, f64>(0) as i32; + let current_page: i32 = r.get(0); let total_pages: Option = r.get(1); - let progress_percent = if let Some(total) = total_pages { + let progress_percent = total_pages.map_or(0.0, |total| { if total > 0 { - (current_page as f64 / total as f64 * 100.0).min(100.0) + (f64::from(current_page) / f64::from(total) * 100.0).min(100.0) } else { 0.0 } - } else { - 0.0 - }; + }); crate::model::ReadingProgress { media_id, @@ -4568,7 +4692,8 @@ impl StorageBackend for PostgresBackend { // Query books with reading progress for this user let rows = client .query( - "SELECT m.*, wh.progress_secs, bm.page_count + "SELECT m.*, CAST(wh.progress_secs AS INTEGER) AS progress_page, \ + bm.page_count FROM media_items m INNER JOIN watch_history wh ON m.id = wh.media_id LEFT JOIN book_metadata bm ON m.id = bm.media_id @@ -4585,28 +4710,26 @@ impl StorageBackend for PostgresBackend { let item = row_to_media_item(&row)?; // Get progress info - let current_page: f64 = row.get("progress_secs"); - let current_page = current_page as i32; + let current_page: i32 = row.get("progress_page"); let total_pages: Option = row.get("page_count"); // Calculate status based on progress - let calculated_status = if let Some(total) = total_pages { - if total > 0 { - let percent = (current_page as f64 / total as f64 * 100.0).min(100.0); - if percent >= 100.0 { - crate::model::ReadingStatus::Completed - } else if percent > 0.0 { - crate::model::ReadingStatus::Reading + let calculated_status = + total_pages.map_or(crate::model::ReadingStatus::Reading, |total| { + if total > 0 { + let percent = + (f64::from(current_page) / f64::from(total) * 100.0).min(100.0); + if percent >= 100.0 { + crate::model::ReadingStatus::Completed + } else if percent > 0.0 { + crate::model::ReadingStatus::Reading + } else { + crate::model::ReadingStatus::ToRead + } } else { - crate::model::ReadingStatus::ToRead + crate::model::ReadingStatus::Reading } - } else { - crate::model::ReadingStatus::Reading - } - } else { - // No total pages known, assume reading - crate::model::ReadingStatus::Reading - }; + }); // Filter by status if specified match status { @@ -4652,7 +4775,13 @@ impl StorageBackend for PostgresBackend { m.year, m.duration_secs, m.description, m.thumbnail_path, \ m.file_mtime, - m.created_at, m.updated_at + m.date_taken, m.latitude, m.longitude, + m.camera_make, m.camera_model, m.rating, \ + m.perceptual_hash, + m.storage_mode, m.original_filename, m.uploaded_at, \ + m.storage_key, + m.created_at, m.updated_at, m.deleted_at, \ + m.links_extracted_at FROM media_items m INNER JOIN book_metadata bm ON m.id = bm.media_id INNER JOIN book_authors ba ON m.id = ba.media_id @@ -4667,8 +4796,8 @@ impl StorageBackend for PostgresBackend { &series_pattern, &publisher_pattern, &l, - &(pagination.limit as i64), - &(pagination.offset as i64), + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), ], ) .await? @@ -4687,25 +4816,35 @@ impl StorageBackend for PostgresBackend { m.year, m.duration_secs, m.description, m.thumbnail_path, \ m.file_mtime, - m.created_at, m.updated_at + m.date_taken, m.latitude, m.longitude, + m.camera_make, m.camera_model, m.rating, \ + m.perceptual_hash, + m.storage_mode, m.original_filename, m.uploaded_at, \ + m.storage_key, + m.created_at, m.updated_at, m.deleted_at, \ + m.links_extracted_at FROM media_items m INNER JOIN book_metadata bm ON m.id = bm.media_id ORDER BY m.title LIMIT $1 OFFSET $2", - &[&(pagination.limit as i64), &(pagination.offset as i64)], + &[ + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), + ], ) .await? } else { // For other combinations, use dynamic query (simplified - just filter by // what's provided) - let mut query = "SELECT DISTINCT m.id, m.path, m.file_name, \ - m.media_type, m.content_hash, - m.file_size, m.title, m.artist, m.album, m.genre, m.year, - m.duration_secs, m.description, m.thumbnail_path, \ - m.file_mtime, - m.created_at, m.updated_at - FROM media_items m - INNER JOIN book_metadata bm ON m.id = bm.media_id WHERE 1=1" - .to_string(); + let mut query = + "SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, \ + m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, \ + m.year, m.duration_secs, m.description, m.thumbnail_path, \ + m.file_mtime, m.date_taken, m.latitude, m.longitude, m.camera_make, \ + m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \ + m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \ + m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items m \ + INNER JOIN book_metadata bm ON m.id = bm.media_id WHERE 1=1" + .to_string(); if isbn.is_some() { query.push_str(" AND (bm.isbn = $1 OR bm.isbn13 = $1)"); @@ -4716,15 +4855,15 @@ impl StorageBackend for PostgresBackend { client .query(&query, &[ &i, - &(pagination.limit as i64), - &(pagination.offset as i64), + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), ]) .await? } else { client .query(&query, &[ - &(pagination.limit as i64), - &(pagination.offset as i64), + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), ]) .await? } @@ -4755,7 +4894,7 @@ impl StorageBackend for PostgresBackend { &item.file_name, &media_type_to_string(&item.media_type), &item.content_hash.0, - &(item.file_size as i64), + &(item.file_size.cast_signed()), &item.title, &item.artist, &item.album, @@ -4805,9 +4944,9 @@ impl StorageBackend for PostgresBackend { if let Some(row) = existing { return Ok(ManagedBlob { content_hash: ContentHash(row.get(0)), - file_size: row.get::<_, i64>(1) as u64, + file_size: row.get::<_, i64>(1).cast_unsigned(), mime_type: row.get(2), - reference_count: row.get::<_, i32>(3) as u32, + reference_count: row.get::<_, i32>(3).cast_unsigned(), stored_at: row.get(4), last_verified: row.get(5), }); @@ -4820,7 +4959,7 @@ impl StorageBackend for PostgresBackend { "INSERT INTO managed_blobs (content_hash, file_size, mime_type, \ reference_count, stored_at) VALUES ($1, $2, $3, 1, $4)", - &[&hash.0, &(size as i64), &mime_type, &now], + &[&hash.0, &(size.cast_signed()), &mime_type, &now], ) .await .map_err(|e| PinakesError::Database(e.to_string()))?; @@ -4853,9 +4992,9 @@ impl StorageBackend for PostgresBackend { Ok(row.map(|r| { ManagedBlob { content_hash: ContentHash(r.get(0)), - file_size: r.get::<_, i64>(1) as u64, + file_size: r.get::<_, i64>(1).cast_unsigned(), mime_type: r.get(2), - reference_count: r.get::<_, i32>(3) as u32, + reference_count: r.get::<_, i32>(3).cast_unsigned(), stored_at: r.get(4), last_verified: r.get(5), } @@ -4902,7 +5041,7 @@ impl StorageBackend for PostgresBackend { .await .map_err(|e| PinakesError::Database(e.to_string()))?; - let count: i32 = row.map(|r| r.get(0)).unwrap_or(0); + let count: i32 = row.map_or(0, |r| r.get(0)); Ok(count <= 0) } @@ -4944,9 +5083,9 @@ impl StorageBackend for PostgresBackend { .map(|r| { ManagedBlob { content_hash: ContentHash(r.get(0)), - file_size: r.get::<_, i64>(1) as u64, + file_size: r.get::<_, i64>(1).cast_unsigned(), mime_type: r.get(2), - reference_count: r.get::<_, i32>(3) as u32, + reference_count: r.get::<_, i32>(3).cast_unsigned(), stored_at: r.get(4), last_verified: r.get(5), } @@ -4975,27 +5114,30 @@ impl StorageBackend for PostgresBackend { PinakesError::Database(format!("failed to get connection: {e}")) })?; - let total_blobs: i64 = client - .query_one("SELECT COUNT(*) FROM managed_blobs", &[]) - .await - .map_err(|e| PinakesError::Database(e.to_string()))? - .get(0); - - let total_size: i64 = client - .query_one("SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs", &[]) - .await - .map_err(|e| PinakesError::Database(e.to_string()))? - .get(0); - - let unique_size: i64 = client + let stats_row = client .query_one( - "SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs WHERE \ - reference_count = 1", + "SELECT + COUNT(*), + COALESCE(SUM(file_size), 0), + COALESCE(SUM(CASE WHEN reference_count = 1 THEN file_size ELSE 0 \ + END), 0), + CASE WHEN SUM(file_size) > 0 + THEN SUM(CASE WHEN reference_count = 1 THEN file_size::double \ + precision ELSE 0 END) / SUM(file_size)::double precision + ELSE 1.0 + END, + COUNT(*) FILTER (WHERE reference_count <= 0) + FROM managed_blobs", &[], ) .await - .map_err(|e| PinakesError::Database(e.to_string()))? - .get(0); + .map_err(|e| PinakesError::Database(e.to_string()))?; + + let total_blobs: i64 = stats_row.get(0); + let total_size: i64 = stats_row.get(1); + let unique_size: i64 = stats_row.get(2); + let dedup_ratio: f64 = stats_row.get(3); + let orphaned_blobs: i64 = stats_row.get(4); let managed_media_count: i64 = client .query_one( @@ -5006,28 +5148,13 @@ impl StorageBackend for PostgresBackend { .map_err(|e| PinakesError::Database(e.to_string()))? .get(0); - let orphaned_blobs: i64 = client - .query_one( - "SELECT COUNT(*) FROM managed_blobs WHERE reference_count <= 0", - &[], - ) - .await - .map_err(|e| PinakesError::Database(e.to_string()))? - .get(0); - - let dedup_ratio = if total_size > 0 { - unique_size as f64 / total_size as f64 - } else { - 1.0 - }; - Ok(ManagedStorageStats { - total_blobs: total_blobs as u64, - total_size_bytes: total_size as u64, - unique_size_bytes: unique_size as u64, + total_blobs: total_blobs.cast_unsigned(), + total_size_bytes: total_size.cast_unsigned(), + unique_size_bytes: unique_size.cast_unsigned(), deduplication_ratio: dedup_ratio, - managed_media_count: managed_media_count as u64, - orphaned_blobs: orphaned_blobs as u64, + managed_media_count: managed_media_count.cast_unsigned(), + orphaned_blobs: orphaned_blobs.cast_unsigned(), }) } @@ -5280,7 +5407,7 @@ impl StorageBackend for PostgresBackend { &change.media_id.map(|m| m.0), &change.path, &change.content_hash.as_ref().map(|h| h.0.clone()), - &change.file_size.map(|s| s as i64), + &change.file_size.map(u64::cast_signed), &change.metadata_json, &change.changed_by_device.map(|d| d.0), &change.timestamp, @@ -5306,7 +5433,7 @@ impl StorageBackend for PostgresBackend { "SELECT id, sequence, change_type, media_id, path, content_hash, file_size, metadata_json, changed_by_device, timestamp FROM sync_log WHERE sequence > $1 ORDER BY sequence LIMIT $2", - &[&cursor, &(limit as i64)], + &[&cursor, &limit.cast_signed()], ) .await .map_err(|e| PinakesError::Database(e.to_string()))?; @@ -5325,7 +5452,9 @@ impl StorageBackend for PostgresBackend { media_id: r.get::<_, Option>(3).map(MediaId), path: r.get(4), content_hash: r.get::<_, Option>(5).map(ContentHash), - file_size: r.get::<_, Option>(6).map(|s| s as u64), + file_size: r + .get::<_, Option>(6) + .map(i64::cast_unsigned), metadata_json: r.get(7), changed_by_device: r .get::<_, Option>(8) @@ -5509,9 +5638,9 @@ impl StorageBackend for PostgresBackend { &session.device_id.0, &session.target_path, &session.expected_hash.0, - &(session.expected_size as i64), - &(session.chunk_size as i64), - &(session.chunk_count as i64), + &session.expected_size.cast_signed(), + &session.chunk_size.cast_signed(), + &session.chunk_count.cast_signed(), &session.status.to_string(), &session.created_at, &session.expires_at, @@ -5549,9 +5678,9 @@ impl StorageBackend for PostgresBackend { device_id: crate::sync::DeviceId(row.get(1)), target_path: row.get(2), expected_hash: ContentHash(row.get(3)), - expected_size: row.get::<_, i64>(4) as u64, - chunk_size: row.get::<_, i64>(5) as u64, - chunk_count: row.get::<_, i64>(6) as u64, + expected_size: row.get::<_, i64>(4).cast_unsigned(), + chunk_size: row.get::<_, i64>(5).cast_unsigned(), + chunk_count: row.get::<_, i64>(6).cast_unsigned(), status: row .get::<_, String>(7) .parse() @@ -5605,9 +5734,9 @@ impl StorageBackend for PostgresBackend { hash = EXCLUDED.hash, received_at = EXCLUDED.received_at", &[ &upload_id, - &(chunk.chunk_index as i64), - &(chunk.offset as i64), - &(chunk.size as i64), + &chunk.chunk_index.cast_signed(), + &chunk.offset.cast_signed(), + &chunk.size.cast_signed(), &chunk.hash, &chunk.received_at, ], @@ -5641,9 +5770,9 @@ impl StorageBackend for PostgresBackend { .map(|r| { crate::sync::ChunkInfo { upload_id: r.get(0), - chunk_index: r.get::<_, i64>(1) as u64, - offset: r.get::<_, i64>(2) as u64, - size: r.get::<_, i64>(3) as u64, + chunk_index: r.get::<_, i64>(1).cast_unsigned(), + offset: r.get::<_, i64>(2).cast_unsigned(), + size: r.get::<_, i64>(3).cast_unsigned(), hash: r.get(4), received_at: r.get(5), } @@ -5838,15 +5967,15 @@ impl StorageBackend for PostgresBackend { &recipient_user_id, &public_token, &password_hash, - &share.permissions.can_view, - &share.permissions.can_download, - &share.permissions.can_edit, - &share.permissions.can_delete, - &share.permissions.can_reshare, - &share.permissions.can_add, + &share.permissions.view.can_view, + &share.permissions.view.can_download, + &share.permissions.mutate.can_edit, + &share.permissions.mutate.can_delete, + &share.permissions.view.can_reshare, + &share.permissions.mutate.can_add, &share.note, &share.expires_at, - &(share.access_count as i64), + &share.access_count.cast_signed(), &share.inherit_to_children, &share.parent_share_id.map(|s| s.0), &share.created_at, @@ -5883,7 +6012,7 @@ impl StorageBackend for PostgresBackend { .await .map_err(|e| PinakesError::Database(e.to_string()))?; - pg_row_to_share(&row) + Ok(pg_row_to_share(&row)) } async fn get_share_by_token( @@ -5910,7 +6039,7 @@ impl StorageBackend for PostgresBackend { .await .map_err(|e| PinakesError::Database(e.to_string()))?; - pg_row_to_share(&row) + Ok(pg_row_to_share(&row)) } async fn list_shares_by_owner( @@ -5936,14 +6065,14 @@ impl StorageBackend for PostgresBackend { LIMIT $2 OFFSET $3", &[ &owner_id.0, - &(pagination.limit as i64), - &(pagination.offset as i64), + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), ], ) .await .map_err(|e| PinakesError::Database(e.to_string()))?; - rows.iter().map(pg_row_to_share).collect() + Ok(rows.iter().map(pg_row_to_share).collect()) } async fn list_shares_for_user( @@ -5969,14 +6098,14 @@ impl StorageBackend for PostgresBackend { DESC LIMIT $2 OFFSET $3", &[ &user_id.0, - &(pagination.limit as i64), - &(pagination.offset as i64), + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), ], ) .await .map_err(|e| PinakesError::Database(e.to_string()))?; - rows.iter().map(pg_row_to_share).collect() + Ok(rows.iter().map(pg_row_to_share).collect()) } async fn list_shares_for_target( @@ -6006,7 +6135,7 @@ impl StorageBackend for PostgresBackend { .await .map_err(|e| PinakesError::Database(e.to_string()))?; - rows.iter().map(pg_row_to_share).collect() + Ok(rows.iter().map(pg_row_to_share).collect()) } async fn update_share( @@ -6027,12 +6156,12 @@ impl StorageBackend for PostgresBackend { inherit_to_children = $9, updated_at = $10 WHERE id = $11", &[ - &share.permissions.can_view, - &share.permissions.can_download, - &share.permissions.can_edit, - &share.permissions.can_delete, - &share.permissions.can_reshare, - &share.permissions.can_add, + &share.permissions.view.can_view, + &share.permissions.view.can_download, + &share.permissions.mutate.can_edit, + &share.permissions.mutate.can_delete, + &share.permissions.view.can_reshare, + &share.permissions.mutate.can_add, &share.note, &share.expires_at, &share.inherit_to_children, @@ -6110,7 +6239,7 @@ impl StorageBackend for PostgresBackend { ) if *share_user == uid => { return Ok(Some(share.permissions)); }, - _ => continue, + _ => {}, } } @@ -6252,8 +6381,8 @@ impl StorageBackend for PostgresBackend { DESC LIMIT $2 OFFSET $3", &[ &share_id.0, - &(pagination.limit as i64), - &(pagination.offset as i64), + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), ], ) .await @@ -6346,15 +6475,20 @@ impl StorageBackend for PostgresBackend { ) } - async fn mark_notification_read(&self, id: Uuid) -> Result<()> { + async fn mark_notification_read( + &self, + id: Uuid, + user_id: crate::users::UserId, + ) -> Result<()> { let client = self.pool.get().await.map_err(|e| { PinakesError::Database(format!("failed to get connection: {e}")) })?; client .execute( - "UPDATE share_notifications SET is_read = true WHERE id = $1", - &[&id], + "UPDATE share_notifications SET is_read = true WHERE id = $1 AND \ + user_id = $2", + &[&id, &user_id.0], ) .await .map_err(|e| PinakesError::Database(e.to_string()))?; @@ -6411,7 +6545,9 @@ impl StorageBackend for PostgresBackend { let storage_mode: String = row.get(1); let old_path_buf = std::path::PathBuf::from(&old_path); - let parent = old_path_buf.parent().unwrap_or(std::path::Path::new("")); + let parent = old_path_buf + .parent() + .unwrap_or_else(|| std::path::Path::new("")); let new_path = parent.join(new_name); let new_path_str = new_path.to_string_lossy().to_string(); @@ -6422,7 +6558,7 @@ impl StorageBackend for PostgresBackend { .map_err(|e| { PinakesError::Io(std::io::Error::new( e.kind(), - format!("Failed to rename file: {}", e), + format!("Failed to rename file: {e}"), )) })?; } @@ -6481,7 +6617,7 @@ impl StorageBackend for PostgresBackend { .map_err(|e| { PinakesError::Io(std::io::Error::new( e.kind(), - format!("Failed to move file: {}", e), + format!("Failed to move file: {e}"), )) })?; } @@ -6516,8 +6652,7 @@ impl StorageBackend for PostgresBackend { if rows_affected == 0 { return Err(PinakesError::NotFound(format!( - "Media item {} not found or already deleted", - id + "Media item {id} not found or already deleted" ))); } @@ -6542,8 +6677,7 @@ impl StorageBackend for PostgresBackend { if rows_affected == 0 { return Err(PinakesError::NotFound(format!( - "Media item {} not found in trash", - id + "Media item {id} not found in trash" ))); } @@ -6575,7 +6709,10 @@ impl StorageBackend for PostgresBackend { WHERE deleted_at IS NOT NULL ORDER BY deleted_at DESC LIMIT $1 OFFSET $2", - &[&(pagination.limit as i64), &(pagination.offset as i64)], + &[ + &(pagination.limit.cast_signed()), + &(pagination.offset.cast_signed()), + ], ) .await .map_err(|e| PinakesError::Database(e.to_string()))?; @@ -6607,18 +6744,15 @@ impl StorageBackend for PostgresBackend { // Delete related data for row in &id_rows { let id: uuid::Uuid = row.get(0); - client + let _ = client .execute("DELETE FROM media_tags WHERE media_id = $1", &[&id]) - .await - .ok(); - client + .await; + let _ = client .execute("DELETE FROM collection_items WHERE media_id = $1", &[&id]) - .await - .ok(); - client + .await; + let _ = client .execute("DELETE FROM custom_fields WHERE media_id = $1", &[&id]) - .await - .ok(); + .await; } // Delete the media items @@ -6653,18 +6787,15 @@ impl StorageBackend for PostgresBackend { // Delete related data for row in &id_rows { let id: uuid::Uuid = row.get(0); - client + let _ = client .execute("DELETE FROM media_tags WHERE media_id = $1", &[&id]) - .await - .ok(); - client + .await; + let _ = client .execute("DELETE FROM collection_items WHERE media_id = $1", &[&id]) - .await - .ok(); - client + .await; + let _ = client .execute("DELETE FROM custom_fields WHERE media_id = $1", &[&id]) - .await - .ok(); + .await; } // Delete the media items @@ -6696,7 +6827,7 @@ impl StorageBackend for PostgresBackend { .map_err(|e| PinakesError::Database(e.to_string()))?; let count: i64 = row.get(0); - Ok(count as u64) + Ok(count.cast_unsigned()) } async fn save_markdown_links( @@ -6879,81 +7010,81 @@ impl StorageBackend for PostgresBackend { let depth = depth.min(5); // Limit depth let mut nodes = Vec::new(); let mut edges = Vec::new(); - let mut node_ids: std::collections::HashSet = - std::collections::HashSet::new(); + let node_ids: std::collections::HashSet = + if let Some(center) = center_id { + // BFS to find connected nodes within depth + let mut frontier = vec![center.0.to_string()]; + let mut visited = std::collections::HashSet::new(); + visited.insert(center.0.to_string()); - if let Some(center) = center_id { - // BFS to find connected nodes within depth - let mut frontier = vec![center.0.to_string()]; - let mut visited = std::collections::HashSet::new(); - visited.insert(center.0.to_string()); + for _ in 0..depth { + if frontier.is_empty() { + break; + } + let mut next_frontier = Vec::new(); - for _ in 0..depth { - if frontier.is_empty() { - break; - } - let mut next_frontier = Vec::new(); - - for node_id in &frontier { - // Get outgoing links - let rows = client - .query( - "SELECT target_media_id FROM markdown_links + for node_id in &frontier { + // Get outgoing links + let rows = client + .query( + "SELECT target_media_id FROM markdown_links WHERE source_media_id = $1 AND target_media_id IS \ - NOT NULL", - &[node_id], - ) - .await - .map_err(|e| PinakesError::Database(e.to_string()))?; + NOT NULL", + &[node_id], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; - for row in rows { - let id: String = row.get(0); - if !visited.contains(&id) { - visited.insert(id.clone()); - next_frontier.push(id); + for row in rows { + let id: String = row.get(0); + if !visited.contains(&id) { + visited.insert(id.clone()); + next_frontier.push(id); + } } - } - // Get incoming links - let rows = client - .query( - "SELECT source_media_id FROM markdown_links + // Get incoming links + let rows = client + .query( + "SELECT source_media_id FROM markdown_links WHERE target_media_id = $1", - &[node_id], - ) - .await - .map_err(|e| PinakesError::Database(e.to_string()))?; + &[node_id], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; - for row in rows { - let id: String = row.get(0); - if !visited.contains(&id) { - visited.insert(id.clone()); - next_frontier.push(id); + for row in rows { + let id: String = row.get(0); + if !visited.contains(&id) { + visited.insert(id.clone()); + next_frontier.push(id); + } } } + + frontier = next_frontier; } - frontier = next_frontier; - } - - node_ids = visited; - } else { - // Get all markdown files with links (limit to 500) - let rows = client - .query( - "SELECT DISTINCT id FROM media_items + visited + } else { + // Get all markdown files with links (limit to 500) + let rows = client + .query( + "SELECT DISTINCT id FROM media_items WHERE media_type = 'markdown' AND deleted_at IS NULL LIMIT 500", - &[], - ) - .await - .map_err(|e| PinakesError::Database(e.to_string()))?; + &[], + ) + .await + .map_err(|e| PinakesError::Database(e.to_string()))?; - for row in rows { - let id: String = row.get(0); - node_ids.insert(id); - } - } + let mut collected = std::collections::HashSet::new(); + for row in rows { + let id: String = row.get(0); + collected.insert(id); + } + collected + }; // Build nodes with metadata for node_id in &node_ids { @@ -6997,8 +7128,8 @@ impl StorageBackend for PostgresBackend { label, title, media_type, - link_count: link_count as u32, - backlink_count: backlink_count as u32, + link_count: u32::try_from(link_count).unwrap_or(0), + backlink_count: u32::try_from(backlink_count).unwrap_or(0), }); } } @@ -7130,7 +7261,7 @@ impl StorageBackend for PostgresBackend { .map_err(|e| PinakesError::Database(e.to_string()))?; let count: i64 = row.get(0); - Ok(count as u64) + Ok(count.cast_unsigned()) } } @@ -7151,25 +7282,25 @@ impl PostgresBackend { &[&user_id], ) .await?; - match row { - Some(row) => { + Ok(row.map_or_else( + || { + crate::users::UserProfile { + avatar_path: None, + bio: None, + preferences: crate::users::UserPreferences::default(), + } + }, + |row| { let prefs_json: serde_json::Value = row.get::<_, serde_json::Value>(2); let preferences: crate::users::UserPreferences = serde_json::from_value(prefs_json).unwrap_or_default(); - Ok(crate::users::UserProfile { + crate::users::UserProfile { avatar_path: row.get(0), bio: row.get(1), preferences, - }) + } }, - None => { - Ok(crate::users::UserProfile { - avatar_path: None, - bio: None, - preferences: Default::default(), - }) - }, - } + )) } async fn library_statistics_inner(&self) -> Result { @@ -7181,7 +7312,8 @@ impl PostgresBackend { let row = client .query_one( - "SELECT COUNT(*), COALESCE(SUM(file_size), 0) FROM media_items", + "SELECT COUNT(*), COALESCE(SUM(file_size), 0) FROM media_items WHERE \ + deleted_at IS NULL", &[], ) .await?; @@ -7195,8 +7327,8 @@ impl PostgresBackend { let rows = client .query( - "SELECT media_type, COUNT(*) FROM media_items GROUP BY media_type \ - ORDER BY COUNT(*) DESC", + "SELECT media_type, COUNT(*) FROM media_items WHERE deleted_at IS \ + NULL GROUP BY media_type ORDER BY COUNT(*) DESC", &[], ) .await?; @@ -7205,14 +7337,15 @@ impl PostgresBackend { .map(|r| { let mt: String = r.get(0); let cnt: i64 = r.get(1); - (mt, cnt as u64) + (mt, cnt.cast_unsigned()) }) .collect(); let rows = client .query( "SELECT media_type, COALESCE(SUM(file_size), 0) FROM media_items \ - GROUP BY media_type ORDER BY SUM(file_size) DESC", + WHERE deleted_at IS NULL GROUP BY media_type ORDER BY SUM(file_size) \ + DESC", &[], ) .await?; @@ -7221,22 +7354,22 @@ impl PostgresBackend { .map(|r| { let mt: String = r.get(0); let sz: i64 = r.get(1); - (mt, sz as u64) + (mt, sz.cast_unsigned()) }) .collect(); let newest: Option = client .query_opt( - "SELECT created_at::text FROM media_items ORDER BY created_at DESC \ - LIMIT 1", + "SELECT created_at::text FROM media_items WHERE deleted_at IS NULL \ + ORDER BY created_at DESC LIMIT 1", &[], ) .await? .map(|r| r.get(0)); let oldest: Option = client .query_opt( - "SELECT created_at::text FROM media_items ORDER BY created_at ASC \ - LIMIT 1", + "SELECT created_at::text FROM media_items WHERE deleted_at IS NULL \ + ORDER BY created_at ASC LIMIT 1", &[], ) .await? @@ -7254,7 +7387,7 @@ impl PostgresBackend { .map(|r| { let name: String = r.get(0); let cnt: i64 = r.get(1); - (name, cnt as u64) + (name, cnt.cast_unsigned()) }) .collect(); @@ -7271,7 +7404,7 @@ impl PostgresBackend { .map(|r| { let name: String = r.get(0); let cnt: i64 = r.get(1); - (name, cnt as u64) + (name, cnt.cast_unsigned()) }) .collect(); @@ -7293,24 +7426,24 @@ impl PostgresBackend { .get(0); Ok(super::LibraryStatistics { - total_media: total_media as u64, - total_size_bytes: total_size as u64, - avg_file_size_bytes: avg_size as u64, + total_media: total_media.cast_unsigned(), + total_size_bytes: total_size.cast_unsigned(), + avg_file_size_bytes: avg_size.cast_unsigned(), media_by_type, storage_by_type, newest_item: newest, oldest_item: oldest, top_tags, top_collections, - total_tags: total_tags as u64, - total_collections: total_collections as u64, - total_duplicates: total_duplicates as u64, + total_tags: total_tags.cast_unsigned(), + total_collections: total_collections.cast_unsigned(), + total_duplicates: total_duplicates.cast_unsigned(), }) } } -/// Helper function to parse a share row from PostgreSQL -fn pg_row_to_share(row: &Row) -> Result { +/// Helper function to parse a share row from `PostgreSQL` +fn pg_row_to_share(row: &Row) -> crate::sharing::Share { let id: Uuid = row.get(0); let target_type: String = row.get(1); let target_id: Uuid = row.get(2); @@ -7321,11 +7454,6 @@ fn pg_row_to_share(row: &Row) -> Result { let password_hash: Option = row.get(7); let target = match target_type.as_str() { - "media" => { - crate::sharing::ShareTarget::Media { - media_id: MediaId(target_id), - } - }, "collection" => { crate::sharing::ShareTarget::Collection { collection_id: target_id, @@ -7345,12 +7473,6 @@ fn pg_row_to_share(row: &Row) -> Result { }; let recipient = match recipient_type.as_str() { - "public_link" => { - crate::sharing::ShareRecipient::PublicLink { - token: public_token.unwrap_or_default(), - password_hash, - } - }, "user" => { crate::sharing::ShareRecipient::User { user_id: crate::users::UserId(recipient_user_id.unwrap_or(Uuid::nil())), @@ -7369,22 +7491,26 @@ fn pg_row_to_share(row: &Row) -> Result { }, }; - Ok(crate::sharing::Share { + crate::sharing::Share { id: crate::sharing::ShareId(id), target, owner_id: crate::users::UserId(owner_id), recipient, permissions: crate::sharing::SharePermissions { - can_view: row.get(8), - can_download: row.get(9), - can_edit: row.get(10), - can_delete: row.get(11), - can_reshare: row.get(12), - can_add: row.get(13), + view: crate::sharing::ShareViewPermissions { + can_view: row.get(8), + can_download: row.get(9), + can_reshare: row.get(12), + }, + mutate: crate::sharing::ShareMutatePermissions { + can_edit: row.get(10), + can_delete: row.get(11), + can_add: row.get(13), + }, }, note: row.get(14), expires_at: row.get(15), - access_count: row.get::<_, i64>(16) as u64, + access_count: row.get::<_, i64>(16).cast_unsigned(), last_accessed: row.get(17), inherit_to_children: row.get(18), parent_share_id: row @@ -7392,22 +7518,22 @@ fn pg_row_to_share(row: &Row) -> Result { .map(crate::sharing::ShareId), created_at: row.get(20), updated_at: row.get(21), - }) + } } -/// Check if a SearchQuery tree contains any FullText or Prefix node (i.e. uses -/// the FTS index). +/// Check if a `SearchQuery` tree contains any `FullText` or Prefix node (i.e. +/// uses the FTS index). fn query_has_fts(query: &SearchQuery) -> bool { match query { SearchQuery::FullText(t) => !t.is_empty(), SearchQuery::Prefix(_) => true, - SearchQuery::Fuzzy(_) => false, - SearchQuery::FieldMatch { .. } => false, - SearchQuery::TypeFilter(_) => false, - SearchQuery::TagFilter(_) => false, - SearchQuery::RangeQuery { .. } => false, - SearchQuery::CompareQuery { .. } => false, - SearchQuery::DateQuery { .. } => false, + SearchQuery::Fuzzy(_) + | SearchQuery::FieldMatch { .. } + | SearchQuery::TypeFilter(_) + | SearchQuery::TagFilter(_) + | SearchQuery::RangeQuery { .. } + | SearchQuery::CompareQuery { .. } + | SearchQuery::DateQuery { .. } => false, SearchQuery::And(children) | SearchQuery::Or(children) => { children.iter().any(query_has_fts) }, @@ -7415,9 +7541,9 @@ fn query_has_fts(query: &SearchQuery) -> bool { } } -/// Find the 1-based parameter index of the first FullText query parameter. -/// Used to pass the same text to ts_rank for relevance sorting. -/// Falls back to 1 if not found (should not happen when has_fts is true). +/// Find the 1-based parameter index of the first `FullText` query parameter. +/// Used to pass the same text to `ts_rank` for relevance sorting. +/// Falls back to 1 if not found (should not happen when `has_fts` is true). fn find_first_fts_param(query: &SearchQuery) -> i32 { fn find_inner(query: &SearchQuery, offset: &mut i32) -> Option { match query { @@ -7439,11 +7565,13 @@ fn find_first_fts_param(query: &SearchQuery) -> i32 { *offset += 5; // Fuzzy now uses 5 params (sim_title, sim_artist, sim_album, sim_filename, ilike) None }, - SearchQuery::FieldMatch { .. } => { + SearchQuery::FieldMatch { .. } | SearchQuery::CompareQuery { .. } => { *offset += 1; None }, - SearchQuery::TypeFilter(_) | SearchQuery::TagFilter(_) => None, + SearchQuery::TypeFilter(_) + | SearchQuery::TagFilter(_) + | SearchQuery::DateQuery { .. } => None, SearchQuery::RangeQuery { start, end, .. } => { // Range queries use 0-2 params depending on bounds if start.is_some() { @@ -7454,11 +7582,6 @@ fn find_first_fts_param(query: &SearchQuery) -> i32 { } None }, - SearchQuery::CompareQuery { .. } => { - *offset += 1; - None - }, - SearchQuery::DateQuery { .. } => None, // No params, uses inline SQL SearchQuery::And(children) | SearchQuery::Or(children) => { for child in children { if let Some(idx) = find_inner(child, offset) { @@ -7526,7 +7649,7 @@ mod tests { #[test] fn test_audit_action_roundtrip() { let action = AuditAction::AddedToCollection; - let s = audit_action_to_string(&action); + let s = audit_action_to_string(action); assert_eq!(s, "added_to_collection"); let parsed = audit_action_from_string(&s).unwrap(); assert_eq!(parsed, action); @@ -7535,7 +7658,7 @@ mod tests { #[test] fn test_collection_kind_roundtrip() { let kind = CollectionKind::Virtual; - let s = collection_kind_to_string(&kind); + let s = collection_kind_to_string(kind); assert_eq!(s, "virtual"); let parsed = collection_kind_from_string(&s).unwrap(); assert_eq!(parsed, kind); @@ -7544,7 +7667,7 @@ mod tests { #[test] fn test_custom_field_type_roundtrip() { let ft = CustomFieldType::Boolean; - let s = custom_field_type_to_string(&ft); + let s = custom_field_type_to_string(ft); assert_eq!(s, "boolean"); let parsed = custom_field_type_from_string(&s).unwrap(); assert_eq!(parsed, ft); @@ -7596,8 +7719,8 @@ mod tests { #[test] fn test_sort_order_clause() { - assert_eq!(sort_order_clause(&SortOrder::DateAsc), "created_at ASC"); - assert_eq!(sort_order_clause(&SortOrder::NameDesc), "file_name DESC"); - assert_eq!(sort_order_clause(&SortOrder::SizeAsc), "file_size ASC"); + assert_eq!(sort_order_clause(SortOrder::DateAsc), "created_at ASC"); + assert_eq!(sort_order_clause(SortOrder::NameDesc), "file_name DESC"); + assert_eq!(sort_order_clause(SortOrder::SizeAsc), "file_size ASC"); } } diff --git a/crates/pinakes-core/src/storage/sqlite.rs b/crates/pinakes-core/src/storage/sqlite.rs index 4b33ef1..c377d9e 100644 --- a/crates/pinakes-core/src/storage/sqlite.rs +++ b/crates/pinakes-core/src/storage/sqlite.rs @@ -11,8 +11,22 @@ use uuid::Uuid; use crate::{ error::{PinakesError, Result}, media_type::MediaType, - model::*, - search::*, + model::{ + AuditAction, + AuditEntry, + Collection, + CollectionKind, + ContentHash, + CustomField, + CustomFieldType, + ManagedBlob, + ManagedStorageStats, + MediaId, + MediaItem, + Pagination, + Tag, + }, + search::{SearchQuery, SearchRequest, SearchResults, SortOrder}, storage::StorageBackend, }; @@ -28,7 +42,7 @@ fn parse_uuid(s: &str) -> rusqlite::Result { }) } -/// SQLite storage backend using WAL mode for concurrent reads. +/// `SQLite` storage backend using WAL mode for concurrent reads. /// /// All async trait methods delegate to `tokio::task::spawn_blocking` because /// `rusqlite::Connection` is synchronous. The connection is wrapped in an @@ -39,12 +53,21 @@ pub struct SqliteBackend { impl SqliteBackend { /// Open (or create) a database at the given file path. + /// + /// # Errors + /// + /// Returns an error if the database cannot be opened or configured. pub fn new(path: &Path) -> Result { let conn = Connection::open(path)?; Self::configure(conn) } /// Create an in-memory database -- useful for tests. + /// + /// # Errors + /// + /// Returns an error if the in-memory database cannot be created or + /// configured. pub fn in_memory() -> Result { let conn = Connection::open_in_memory()?; Self::configure(conn) @@ -108,7 +131,7 @@ fn row_to_media_item(row: &Row) -> rusqlite::Result { file_name: row.get("file_name")?, media_type: parse_media_type(&media_type_str), content_hash: ContentHash(hash_str), - file_size: row.get::<_, i64>("file_size")? as u64, + file_size: row.get::<_, i64>("file_size")?.cast_unsigned(), title: row.get("title")?, artist: row.get("artist")?, album: row.get("album")?, @@ -237,7 +260,31 @@ fn row_to_audit_entry(row: &Row) -> rusqlite::Result { "removed_from_collection" => AuditAction::RemovedFromCollection, "opened" => AuditAction::Opened, "scanned" => AuditAction::Scanned, - _ => AuditAction::Updated, // fallback + "login_success" => AuditAction::LoginSuccess, + "login_failed" => AuditAction::LoginFailed, + "logout" => AuditAction::Logout, + "session_expired" => AuditAction::SessionExpired, + "permission_denied" => AuditAction::PermissionDenied, + "role_changed" => AuditAction::RoleChanged, + "library_access_granted" => AuditAction::LibraryAccessGranted, + "library_access_revoked" => AuditAction::LibraryAccessRevoked, + "user_created" => AuditAction::UserCreated, + "user_updated" => AuditAction::UserUpdated, + "user_deleted" => AuditAction::UserDeleted, + "plugin_installed" => AuditAction::PluginInstalled, + "plugin_uninstalled" => AuditAction::PluginUninstalled, + "plugin_enabled" => AuditAction::PluginEnabled, + "plugin_disabled" => AuditAction::PluginDisabled, + "config_changed" => AuditAction::ConfigChanged, + "root_directory_added" => AuditAction::RootDirectoryAdded, + "root_directory_removed" => AuditAction::RootDirectoryRemoved, + "share_link_created" => AuditAction::ShareLinkCreated, + "share_link_accessed" => AuditAction::ShareLinkAccessed, + "database_vacuumed" => AuditAction::DatabaseVacuumed, + "database_cleared" => AuditAction::DatabaseCleared, + "export_completed" => AuditAction::ExportCompleted, + "integrity_check_completed" => AuditAction::IntegrityCheckCompleted, + _ => AuditAction::Updated, }; Ok(AuditEntry { @@ -249,14 +296,14 @@ fn row_to_audit_entry(row: &Row) -> rusqlite::Result { }) } -fn collection_kind_to_str(kind: CollectionKind) -> &'static str { +const fn collection_kind_to_str(kind: CollectionKind) -> &'static str { match kind { CollectionKind::Manual => "manual", CollectionKind::Virtual => "virtual", } } -fn custom_field_type_to_str(ft: CustomFieldType) -> &'static str { +const fn custom_field_type_to_str(ft: CustomFieldType) -> &'static str { match ft { CustomFieldType::Text => "text", CustomFieldType::Number => "number", @@ -301,7 +348,7 @@ fn load_user_profile_sync( Ok(crate::users::UserProfile { avatar_path: None, bio: None, - preferences: Default::default(), + preferences: crate::users::UserPreferences::default(), }) }, Err(e) => Err(e), @@ -391,7 +438,7 @@ fn load_custom_fields_batch( /// - `where_clauses` are extra WHERE predicates (e.g. type filters), /// - `join_clauses` are extra JOIN snippets (e.g. tag filters). /// - `params` are bind parameter values corresponding to `?` placeholders in -/// where_clauses and join_clauses. +/// `where_clauses` and `join_clauses`. fn search_query_to_fts( query: &SearchQuery, ) -> (String, Vec, Vec, Vec, Vec) { @@ -428,7 +475,7 @@ fn build_fts_expr( let sanitized = sanitize_fts_token(text); // If it's a single word, add prefix matching if !sanitized.contains(' ') && !sanitized.contains('"') { - format!("{}*", sanitized) + format!("{sanitized}*") } else { // For phrases, use as-is but also add NEAR for proximity sanitized @@ -553,7 +600,7 @@ fn build_fts_expr( } } -/// Convert a DateValue to a SQLite datetime comparison expression +/// Convert a `DateValue` to a `SQLite` datetime comparison expression fn date_value_to_sqlite_expr( col: &str, value: &crate::search::DateValue, @@ -607,12 +654,11 @@ fn sanitize_fts_token(s: &str) -> String { format!("\"{escaped}\"") } -fn sort_order_to_sql(sort: &SortOrder) -> &'static str { +const fn sort_order_to_sql(sort: SortOrder) -> &'static str { match sort { - SortOrder::Relevance => "m.created_at DESC", // FTS rank not easily - // portable; use date + // FTS rank is not easily portable; use date for Relevance + SortOrder::Relevance | SortOrder::DateDesc => "m.created_at DESC", SortOrder::DateAsc => "m.created_at ASC", - SortOrder::DateDesc => "m.created_at DESC", SortOrder::NameAsc => "m.file_name ASC", SortOrder::NameDesc => "m.file_name DESC", SortOrder::SizeAsc => "m.file_size ASC", @@ -632,45 +678,53 @@ impl StorageBackend for SqliteBackend { crate::storage::migrations::run_sqlite_migrations(&mut db) }) .await - .map_err(|e| PinakesError::Database(format!("run_migrations: {}", e)))? + .map_err(|e| PinakesError::Database(format!("run_migrations: {e}")))? } async fn add_root_dir(&self, path: PathBuf) -> Result<()> { let path_display = path.display().to_string(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute( - "INSERT OR IGNORE INTO root_dirs (path) VALUES (?1)", - params![path.to_string_lossy().as_ref()], - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute( + "INSERT OR IGNORE INTO root_dirs (path) VALUES (?1)", + params![path.to_string_lossy().as_ref()], + )?; + } Ok(()) }) .await .map_err(|e| { - PinakesError::Database(format!("add_root_dir {}: {}", path_display, e)) + PinakesError::Database(format!("add_root_dir {path_display}: {e}")) })? } async fn list_root_dirs(&self) -> Result> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare("SELECT path FROM root_dirs ORDER BY path")?; - let rows = stmt - .query_map([], |row| { - let p: String = row.get(0)?; - Ok(PathBuf::from(p)) - })? - .collect::>>()?; + let rows = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = + db.prepare("SELECT path FROM root_dirs ORDER BY path")?; + let rows = stmt + .query_map([], |row| { + let p: String = row.get(0)?; + Ok(PathBuf::from(p)) + })? + .collect::>>()?; + drop(stmt); + drop(db); + rows + }; Ok(rows) }) .await - .map_err(|e| PinakesError::Database(format!("list_root_dirs: {}", e)))? + .map_err(|e| PinakesError::Database(format!("list_root_dirs: {e}")))? } async fn remove_root_dir(&self, path: &Path) -> Result<()> { @@ -678,17 +732,19 @@ impl StorageBackend for SqliteBackend { let path_display = path.display().to_string(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute("DELETE FROM root_dirs WHERE path = ?1", params![ - path.to_string_lossy().as_ref() - ])?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute("DELETE FROM root_dirs WHERE path = ?1", params![ + path.to_string_lossy().as_ref() + ])?; + } Ok(()) }) .await .map_err(|e| { - PinakesError::Database(format!("remove_root_dir {}: {}", path_display, e)) + PinakesError::Database(format!("remove_root_dir {path_display}: {e}")) })? } @@ -696,48 +752,50 @@ impl StorageBackend for SqliteBackend { let item = item.clone(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute( - "INSERT INTO media_items (id, path, file_name, media_type, \ - content_hash, file_size, title, artist, album, genre, year, \ - duration_secs, description, thumbnail_path, file_mtime, date_taken, \ - latitude, longitude, camera_make, camera_model, rating, \ - perceptual_hash, created_at, updated_at) VALUES (?1, ?2, ?3, ?4, ?5, \ - ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, \ - ?20, ?21, ?22, ?23, ?24)", - params![ - item.id.0.to_string(), - item.path.to_string_lossy().as_ref(), - item.file_name, - media_type_to_str(&item.media_type), - item.content_hash.0, - item.file_size as i64, - item.title, - item.artist, - item.album, - item.genre, - item.year, - item.duration_secs, - item.description, - item - .thumbnail_path - .as_ref() - .map(|p| p.to_string_lossy().to_string()), - item.file_mtime, - item.date_taken.as_ref().map(|d| d.to_rfc3339()), - item.latitude, - item.longitude, - item.camera_make, - item.camera_model, - item.rating, - item.perceptual_hash, - item.created_at.to_rfc3339(), - item.updated_at.to_rfc3339(), - ], - ) - .map_err(crate::error::db_ctx("insert_media", &item.id))?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute( + "INSERT INTO media_items (id, path, file_name, media_type, \ + content_hash, file_size, title, artist, album, genre, year, \ + duration_secs, description, thumbnail_path, file_mtime, \ + date_taken, latitude, longitude, camera_make, camera_model, \ + rating, perceptual_hash, created_at, updated_at) VALUES (?1, ?2, \ + ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, \ + ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)", + params![ + item.id.0.to_string(), + item.path.to_string_lossy().as_ref(), + item.file_name, + media_type_to_str(&item.media_type), + item.content_hash.0, + item.file_size.cast_signed(), + item.title, + item.artist, + item.album, + item.genre, + item.year, + item.duration_secs, + item.description, + item + .thumbnail_path + .as_ref() + .map(|p| p.to_string_lossy().to_string()), + item.file_mtime, + item.date_taken.as_ref().map(chrono::DateTime::to_rfc3339), + item.latitude, + item.longitude, + item.camera_make, + item.camera_model, + item.rating, + item.perceptual_hash, + item.created_at.to_rfc3339(), + item.updated_at.to_rfc3339(), + ], + ) + .map_err(crate::error::db_ctx("insert_media", &item.id))?; + } Ok(()) }) .await @@ -747,15 +805,18 @@ impl StorageBackend for SqliteBackend { async fn count_media(&self) -> Result { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let count: i64 = db.query_row( - "SELECT COUNT(*) FROM media_items WHERE deleted_at IS NULL", - [], - |row| row.get(0), - )?; - Ok(count as u64) + let count = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let count: i64 = db.query_row( + "SELECT COUNT(*) FROM media_items WHERE deleted_at IS NULL", + [], + |row| row.get(0), + )?; + count + }; + Ok(count.cast_unsigned()) }) .await .map_err(|e| PinakesError::Database(e.to_string()))? @@ -764,28 +825,34 @@ impl StorageBackend for SqliteBackend { async fn get_media(&self, id: MediaId) -> Result { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT id, path, file_name, media_type, content_hash, file_size, \ - title, artist, album, genre, year, duration_secs, description, \ - thumbnail_path, file_mtime, date_taken, latitude, longitude, \ - camera_make, camera_model, rating, perceptual_hash, storage_mode, \ - original_filename, uploaded_at, storage_key, created_at, updated_at, \ - deleted_at, links_extracted_at FROM media_items WHERE id = ?1", - )?; - let mut item = stmt - .query_row(params![id.0.to_string()], row_to_media_item) - .map_err(|e| { - match e { - rusqlite::Error::QueryReturnedNoRows => { - PinakesError::NotFound(format!("media item {id}")) - }, - other => PinakesError::from(other), - } - })?; - item.custom_fields = load_custom_fields_sync(&db, item.id)?; + let item = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT id, path, file_name, media_type, content_hash, file_size, \ + title, artist, album, genre, year, duration_secs, description, \ + thumbnail_path, file_mtime, date_taken, latitude, longitude, \ + camera_make, camera_model, rating, perceptual_hash, storage_mode, \ + original_filename, uploaded_at, storage_key, created_at, \ + updated_at, deleted_at, links_extracted_at FROM media_items WHERE \ + id = ?1", + )?; + let mut item = stmt + .query_row(params![id.0.to_string()], row_to_media_item) + .map_err(|e| { + match e { + rusqlite::Error::QueryReturnedNoRows => { + PinakesError::NotFound(format!("media item {id}")) + }, + other => PinakesError::from(other), + } + })?; + drop(stmt); + item.custom_fields = load_custom_fields_sync(&db, item.id)?; + drop(db); + item + }; Ok(item) }) .await @@ -799,27 +866,33 @@ impl StorageBackend for SqliteBackend { let hash = hash.clone(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT id, path, file_name, media_type, content_hash, file_size, \ - title, artist, album, genre, year, duration_secs, description, \ - thumbnail_path, file_mtime, date_taken, latitude, longitude, \ - camera_make, camera_model, rating, perceptual_hash, storage_mode, \ - original_filename, uploaded_at, storage_key, created_at, updated_at, \ - deleted_at, links_extracted_at FROM media_items WHERE content_hash = \ - ?1", - )?; - let result = stmt - .query_row(params![hash.0], row_to_media_item) - .optional()?; - if let Some(mut item) = result { - item.custom_fields = load_custom_fields_sync(&db, item.id)?; - Ok(Some(item)) - } else { - Ok(None) - } + let result = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT id, path, file_name, media_type, content_hash, file_size, \ + title, artist, album, genre, year, duration_secs, description, \ + thumbnail_path, file_mtime, date_taken, latitude, longitude, \ + camera_make, camera_model, rating, perceptual_hash, storage_mode, \ + original_filename, uploaded_at, storage_key, created_at, \ + updated_at, deleted_at, links_extracted_at FROM media_items WHERE \ + content_hash = ?1", + )?; + let result = stmt + .query_row(params![hash.0], row_to_media_item) + .optional()?; + drop(stmt); + if let Some(mut item) = result { + item.custom_fields = load_custom_fields_sync(&db, item.id)?; + drop(db); + Some(item) + } else { + drop(db); + None + } + }; + Ok(result) }) .await .map_err(|e| PinakesError::Database(e.to_string()))? @@ -832,26 +905,33 @@ impl StorageBackend for SqliteBackend { let path_str = path.to_string_lossy().to_string(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT id, path, file_name, media_type, content_hash, file_size, \ - title, artist, album, genre, year, duration_secs, description, \ - thumbnail_path, file_mtime, date_taken, latitude, longitude, \ - camera_make, camera_model, rating, perceptual_hash, storage_mode, \ - original_filename, uploaded_at, storage_key, created_at, updated_at, \ - deleted_at, links_extracted_at FROM media_items WHERE path = ?1", - )?; - let result = stmt - .query_row(params![path_str], row_to_media_item) - .optional()?; - if let Some(mut item) = result { - item.custom_fields = load_custom_fields_sync(&db, item.id)?; - Ok(Some(item)) - } else { - Ok(None) - } + let result = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT id, path, file_name, media_type, content_hash, file_size, \ + title, artist, album, genre, year, duration_secs, description, \ + thumbnail_path, file_mtime, date_taken, latitude, longitude, \ + camera_make, camera_model, rating, perceptual_hash, storage_mode, \ + original_filename, uploaded_at, storage_key, created_at, \ + updated_at, deleted_at, links_extracted_at FROM media_items WHERE \ + path = ?1", + )?; + let result = stmt + .query_row(params![path_str], row_to_media_item) + .optional()?; + drop(stmt); + if let Some(mut item) = result { + item.custom_fields = load_custom_fields_sync(&db, item.id)?; + drop(db); + Some(item) + } else { + drop(db); + None + } + }; + Ok(result) }) .await .map_err(|e| PinakesError::Database(e.to_string()))? @@ -864,37 +944,45 @@ impl StorageBackend for SqliteBackend { let pagination = pagination.clone(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let order_by = match pagination.sort.as_deref() { - Some("created_at_asc") => "created_at ASC", - Some("name_asc") => "file_name ASC", - Some("name_desc") => "file_name DESC", - Some("size_asc") => "file_size ASC", - Some("size_desc") => "file_size DESC", - Some("type_asc") => "media_type ASC", - Some("type_desc") => "media_type DESC", - // "created_at_desc" or any unrecognized value falls back to default - _ => "created_at DESC", + let rows = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let order_by = match pagination.sort.as_deref() { + Some("created_at_asc") => "created_at ASC", + Some("name_asc") => "file_name ASC", + Some("name_desc") => "file_name DESC", + Some("size_asc") => "file_size ASC", + Some("size_desc") => "file_size DESC", + Some("type_asc") => "media_type ASC", + Some("type_desc") => "media_type DESC", + // "created_at_desc" or any unrecognized value falls back to default + _ => "created_at DESC", + }; + let sql = format!( + "SELECT id, path, file_name, media_type, content_hash, file_size, \ + title, artist, album, genre, year, duration_secs, description, \ + thumbnail_path, file_mtime, date_taken, latitude, longitude, \ + camera_make, camera_model, rating, perceptual_hash, storage_mode, \ + original_filename, uploaded_at, storage_key, created_at, \ + updated_at, deleted_at, links_extracted_at FROM media_items WHERE \ + deleted_at IS NULL ORDER BY {order_by} LIMIT ?1 OFFSET ?2" + ); + let mut stmt = db.prepare(&sql)?; + let mut rows = stmt + .query_map( + params![ + pagination.limit.cast_signed(), + pagination.offset.cast_signed() + ], + row_to_media_item, + )? + .collect::>>()?; + drop(stmt); + load_custom_fields_batch(&db, &mut rows)?; + drop(db); + rows }; - let sql = format!( - "SELECT id, path, file_name, media_type, content_hash, file_size, \ - title, artist, album, genre, year, duration_secs, description, \ - thumbnail_path, file_mtime, date_taken, latitude, longitude, \ - camera_make, camera_model, rating, perceptual_hash, storage_mode, \ - original_filename, uploaded_at, storage_key, created_at, updated_at, \ - deleted_at, links_extracted_at FROM media_items WHERE deleted_at IS \ - NULL ORDER BY {order_by} LIMIT ?1 OFFSET ?2" - ); - let mut stmt = db.prepare(&sql)?; - let mut rows = stmt - .query_map( - params![pagination.limit as i64, pagination.offset as i64], - row_to_media_item, - )? - .collect::>>()?; - load_custom_fields_batch(&db, &mut rows)?; Ok(rows) }) .await @@ -905,50 +993,56 @@ impl StorageBackend for SqliteBackend { let item = item.clone(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let changed = db - .execute( - "UPDATE media_items SET path = ?2, file_name = ?3, media_type = ?4, \ - content_hash = ?5, file_size = ?6, title = ?7, artist = ?8, album \ - = ?9, genre = ?10, year = ?11, duration_secs = ?12, description = \ - ?13, thumbnail_path = ?14, file_mtime = ?15, date_taken = ?16, \ - latitude = ?17, longitude = ?18, camera_make = ?19, camera_model = \ - ?20, rating = ?21, perceptual_hash = ?22, updated_at = ?23 WHERE \ - id = ?1", - params![ - item.id.0.to_string(), - item.path.to_string_lossy().as_ref(), - item.file_name, - media_type_to_str(&item.media_type), - item.content_hash.0, - item.file_size as i64, - item.title, - item.artist, - item.album, - item.genre, - item.year, - item.duration_secs, - item.description, - item - .thumbnail_path - .as_ref() - .map(|p| p.to_string_lossy().to_string()), - item.file_mtime, - item.date_taken.as_ref().map(|d| d.to_rfc3339()), - item.latitude, - item.longitude, - item.camera_make, - item.camera_model, - item.rating, - item.perceptual_hash, - item.updated_at.to_rfc3339(), - ], - ) - .map_err(crate::error::db_ctx("update_media", &item.id))?; - if changed == 0 { - return Err(PinakesError::NotFound(format!("media item {}", item.id))); + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let changed = db + .execute( + "UPDATE media_items SET path = ?2, file_name = ?3, media_type = \ + ?4, content_hash = ?5, file_size = ?6, title = ?7, artist = ?8, \ + album = ?9, genre = ?10, year = ?11, duration_secs = ?12, \ + description = ?13, thumbnail_path = ?14, file_mtime = ?15, \ + date_taken = ?16, latitude = ?17, longitude = ?18, camera_make = \ + ?19, camera_model = ?20, rating = ?21, perceptual_hash = ?22, \ + updated_at = ?23 WHERE id = ?1", + params![ + item.id.0.to_string(), + item.path.to_string_lossy().as_ref(), + item.file_name, + media_type_to_str(&item.media_type), + item.content_hash.0, + item.file_size.cast_signed(), + item.title, + item.artist, + item.album, + item.genre, + item.year, + item.duration_secs, + item.description, + item + .thumbnail_path + .as_ref() + .map(|p| p.to_string_lossy().to_string()), + item.file_mtime, + item.date_taken.as_ref().map(chrono::DateTime::to_rfc3339), + item.latitude, + item.longitude, + item.camera_make, + item.camera_model, + item.rating, + item.perceptual_hash, + item.updated_at.to_rfc3339(), + ], + ) + .map_err(crate::error::db_ctx("update_media", &item.id))?; + drop(db); + if changed == 0 { + return Err(PinakesError::NotFound(format!( + "media item {}", + item.id + ))); + } } Ok(()) }) @@ -959,16 +1053,19 @@ impl StorageBackend for SqliteBackend { async fn delete_media(&self, id: MediaId) -> Result<()> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let changed = db - .execute("DELETE FROM media_items WHERE id = ?1", params![ - id.0.to_string() - ]) - .map_err(crate::error::db_ctx("delete_media", id))?; - if changed == 0 { - return Err(PinakesError::NotFound(format!("media item {id}"))); + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let changed = db + .execute("DELETE FROM media_items WHERE id = ?1", params![ + id.0.to_string() + ]) + .map_err(crate::error::db_ctx("delete_media", id))?; + drop(db); + if changed == 0 { + return Err(PinakesError::NotFound(format!("media item {id}"))); + } } Ok(()) }) @@ -979,12 +1076,17 @@ impl StorageBackend for SqliteBackend { async fn delete_all_media(&self) -> Result { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let count: u64 = - db.query_row("SELECT COUNT(*) FROM media_items", [], |row| row.get(0))?; - db.execute("DELETE FROM media_items", [])?; + let count = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let count: u64 = + db.query_row("SELECT COUNT(*) FROM media_items", [], |row| { + row.get(0) + })?; + db.execute("DELETE FROM media_items", [])?; + count + }; Ok(count) }) .await @@ -999,27 +1101,31 @@ impl StorageBackend for SqliteBackend { let name = name.to_string(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let id = Uuid::now_v7(); - let now = Utc::now(); - db.execute( - "INSERT INTO tags (id, name, parent_id, created_at) VALUES (?1, ?2, \ - ?3, ?4)", - params![ - id.to_string(), + let tag = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let id = Uuid::now_v7(); + let now = Utc::now(); + db.execute( + "INSERT INTO tags (id, name, parent_id, created_at) VALUES (?1, ?2, \ + ?3, ?4)", + params![ + id.to_string(), + name, + parent_id.map(|p| p.to_string()), + now.to_rfc3339(), + ], + )?; + drop(db); + Tag { + id, name, - parent_id.map(|p| p.to_string()), - now.to_rfc3339(), - ], - )?; - Ok(Tag { - id, - name, - parent_id, - created_at: now, - }) + parent_id, + created_at: now, + } + }; + Ok(tag) }) .await .map_err(|e| PinakesError::Database(e.to_string()))? @@ -1028,22 +1134,28 @@ impl StorageBackend for SqliteBackend { async fn get_tag(&self, id: Uuid) -> Result { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT id, name, parent_id, created_at FROM tags WHERE id = ?1", - )?; - stmt - .query_row(params![id.to_string()], row_to_tag) - .map_err(|e| { - match e { - rusqlite::Error::QueryReturnedNoRows => { - PinakesError::TagNotFound(id.to_string()) - }, - other => PinakesError::from(other), - } - }) + let tag = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT id, name, parent_id, created_at FROM tags WHERE id = ?1", + )?; + let tag = stmt + .query_row(params![id.to_string()], row_to_tag) + .map_err(|e| { + match e { + rusqlite::Error::QueryReturnedNoRows => { + PinakesError::TagNotFound(id.to_string()) + }, + other => PinakesError::from(other), + } + })?; + drop(stmt); + drop(db); + tag + }; + Ok(tag) }) .await .map_err(|e| PinakesError::Database(e.to_string()))? @@ -1052,15 +1164,20 @@ impl StorageBackend for SqliteBackend { async fn list_tags(&self) -> Result> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT id, name, parent_id, created_at FROM tags ORDER BY name", - )?; - let rows = stmt - .query_map([], row_to_tag)? - .collect::>>()?; + let rows = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT id, name, parent_id, created_at FROM tags ORDER BY name", + )?; + let rows = stmt + .query_map([], row_to_tag)? + .collect::>>()?; + drop(stmt); + drop(db); + rows + }; Ok(rows) }) .await @@ -1070,13 +1187,16 @@ impl StorageBackend for SqliteBackend { async fn delete_tag(&self, id: Uuid) -> Result<()> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let changed = - db.execute("DELETE FROM tags WHERE id = ?1", params![id.to_string()])?; - if changed == 0 { - return Err(PinakesError::TagNotFound(id.to_string())); + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let changed = db + .execute("DELETE FROM tags WHERE id = ?1", params![id.to_string()])?; + drop(db); + if changed == 0 { + return Err(PinakesError::TagNotFound(id.to_string())); + } } Ok(()) }) @@ -1087,13 +1207,15 @@ impl StorageBackend for SqliteBackend { async fn tag_media(&self, media_id: MediaId, tag_id: Uuid) -> Result<()> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute( - "INSERT OR IGNORE INTO media_tags (media_id, tag_id) VALUES (?1, ?2)", - params![media_id.0.to_string(), tag_id.to_string()], - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute( + "INSERT OR IGNORE INTO media_tags (media_id, tag_id) VALUES (?1, ?2)", + params![media_id.0.to_string(), tag_id.to_string()], + )?; + } Ok(()) }) .await @@ -1103,13 +1225,15 @@ impl StorageBackend for SqliteBackend { async fn untag_media(&self, media_id: MediaId, tag_id: Uuid) -> Result<()> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute( - "DELETE FROM media_tags WHERE media_id = ?1 AND tag_id = ?2", - params![media_id.0.to_string(), tag_id.to_string()], - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute( + "DELETE FROM media_tags WHERE media_id = ?1 AND tag_id = ?2", + params![media_id.0.to_string(), tag_id.to_string()], + )?; + } Ok(()) }) .await @@ -1119,17 +1243,22 @@ impl StorageBackend for SqliteBackend { async fn get_media_tags(&self, media_id: MediaId) -> Result> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT t.id, t.name, t.parent_id, t.created_at FROM tags t JOIN \ - media_tags mt ON mt.tag_id = t.id WHERE mt.media_id = ?1 ORDER BY \ - t.name", - )?; - let rows = stmt - .query_map(params![media_id.0.to_string()], row_to_tag)? - .collect::>>()?; + let rows = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT t.id, t.name, t.parent_id, t.created_at FROM tags t JOIN \ + media_tags mt ON mt.tag_id = t.id WHERE mt.media_id = ?1 ORDER BY \ + t.name", + )?; + let rows = stmt + .query_map(params![media_id.0.to_string()], row_to_tag)? + .collect::>>()?; + drop(stmt); + drop(db); + rows + }; Ok(rows) }) .await @@ -1139,19 +1268,24 @@ impl StorageBackend for SqliteBackend { async fn get_tag_descendants(&self, tag_id: Uuid) -> Result> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "WITH RECURSIVE descendants(id, name, parent_id, created_at) AS ( \ - SELECT id, name, parent_id, created_at FROM tags WHERE parent_id = \ - ?1 UNION ALL SELECT t.id, t.name, t.parent_id, t.created_at FROM \ - tags t JOIN descendants d ON t.parent_id = d.id ) SELECT id, name, \ - parent_id, created_at FROM descendants ORDER BY name", - )?; - let rows = stmt - .query_map(params![tag_id.to_string()], row_to_tag)? - .collect::>>()?; + let rows = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "WITH RECURSIVE descendants(id, name, parent_id, created_at) AS ( \ + SELECT id, name, parent_id, created_at FROM tags WHERE parent_id = \ + ?1 UNION ALL SELECT t.id, t.name, t.parent_id, t.created_at FROM \ + tags t JOIN descendants d ON t.parent_id = d.id ) SELECT id, name, \ + parent_id, created_at FROM descendants ORDER BY name", + )?; + let rows = stmt + .query_map(params![tag_id.to_string()], row_to_tag)? + .collect::>>()?; + drop(stmt); + drop(db); + rows + }; Ok(rows) }) .await @@ -1166,37 +1300,42 @@ impl StorageBackend for SqliteBackend { filter_query: Option<&str>, ) -> Result { let name = name.to_string(); - let description = description.map(|s| s.to_string()); - let filter_query = filter_query.map(|s| s.to_string()); + let description = description.map(std::string::ToString::to_string); + let filter_query = filter_query.map(std::string::ToString::to_string); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let id = Uuid::now_v7(); - let now = Utc::now(); - db.execute( - "INSERT INTO collections (id, name, description, kind, filter_query, \ - created_at, updated_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", - params![ - id.to_string(), + let collection = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let id = Uuid::now_v7(); + let now = Utc::now(); + db.execute( + "INSERT INTO collections (id, name, description, kind, \ + filter_query, created_at, updated_at) VALUES (?1, ?2, ?3, ?4, ?5, \ + ?6, ?7)", + params![ + id.to_string(), + name, + description, + collection_kind_to_str(kind), + filter_query, + now.to_rfc3339(), + now.to_rfc3339(), + ], + )?; + drop(db); + Collection { + id, name, description, - collection_kind_to_str(kind), + kind, filter_query, - now.to_rfc3339(), - now.to_rfc3339(), - ], - )?; - Ok(Collection { - id, - name, - description, - kind, - filter_query, - created_at: now, - updated_at: now, - }) + created_at: now, + updated_at: now, + } + }; + Ok(collection) }) .await .map_err(|e| PinakesError::Database(e.to_string()))? @@ -1205,23 +1344,29 @@ impl StorageBackend for SqliteBackend { async fn get_collection(&self, id: Uuid) -> Result { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT id, name, description, kind, filter_query, created_at, \ - updated_at FROM collections WHERE id = ?1", - )?; - stmt - .query_row(params![id.to_string()], row_to_collection) - .map_err(|e| { - match e { - rusqlite::Error::QueryReturnedNoRows => { - PinakesError::CollectionNotFound(id.to_string()) - }, - other => PinakesError::from(other), - } - }) + let collection = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT id, name, description, kind, filter_query, created_at, \ + updated_at FROM collections WHERE id = ?1", + )?; + let collection = stmt + .query_row(params![id.to_string()], row_to_collection) + .map_err(|e| { + match e { + rusqlite::Error::QueryReturnedNoRows => { + PinakesError::CollectionNotFound(id.to_string()) + }, + other => PinakesError::from(other), + } + })?; + drop(stmt); + drop(db); + collection + }; + Ok(collection) }) .await .map_err(|e| PinakesError::Database(e.to_string()))? @@ -1230,16 +1375,21 @@ impl StorageBackend for SqliteBackend { async fn list_collections(&self) -> Result> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT id, name, description, kind, filter_query, created_at, \ - updated_at FROM collections ORDER BY name", - )?; - let rows = stmt - .query_map([], row_to_collection)? - .collect::>>()?; + let rows = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT id, name, description, kind, filter_query, created_at, \ + updated_at FROM collections ORDER BY name", + )?; + let rows = stmt + .query_map([], row_to_collection)? + .collect::>>()?; + drop(stmt); + drop(db); + rows + }; Ok(rows) }) .await @@ -1249,15 +1399,18 @@ impl StorageBackend for SqliteBackend { async fn delete_collection(&self, id: Uuid) -> Result<()> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let changed = db - .execute("DELETE FROM collections WHERE id = ?1", params![ - id.to_string() - ])?; - if changed == 0 { - return Err(PinakesError::CollectionNotFound(id.to_string())); + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let changed = db + .execute("DELETE FROM collections WHERE id = ?1", params![ + id.to_string() + ])?; + drop(db); + if changed == 0 { + return Err(PinakesError::CollectionNotFound(id.to_string())); + } } Ok(()) }) @@ -1273,20 +1426,22 @@ impl StorageBackend for SqliteBackend { ) -> Result<()> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let now = Utc::now(); - db.execute( - "INSERT OR REPLACE INTO collection_members (collection_id, media_id, \ - position, added_at) VALUES (?1, ?2, ?3, ?4)", - params![ - collection_id.to_string(), - media_id.0.to_string(), - position, - now.to_rfc3339(), - ], - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let now = Utc::now(); + db.execute( + "INSERT OR REPLACE INTO collection_members (collection_id, \ + media_id, position, added_at) VALUES (?1, ?2, ?3, ?4)", + params![ + collection_id.to_string(), + media_id.0.to_string(), + position, + now.to_rfc3339(), + ], + )?; + } Ok(()) }) .await @@ -1300,14 +1455,16 @@ impl StorageBackend for SqliteBackend { ) -> Result<()> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute( - "DELETE FROM collection_members WHERE collection_id = ?1 AND media_id \ - = ?2", - params![collection_id.to_string(), media_id.0.to_string()], - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute( + "DELETE FROM collection_members WHERE collection_id = ?1 AND \ + media_id = ?2", + params![collection_id.to_string(), media_id.0.to_string()], + )?; + } Ok(()) }) .await @@ -1320,24 +1477,29 @@ impl StorageBackend for SqliteBackend { ) -> Result> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \ - m.file_size, m.title, m.artist, m.album, m.genre, m.year, \ - m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \ - m.date_taken, m.latitude, m.longitude, m.camera_make, \ - m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \ - m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \ - m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items m \ - JOIN collection_members cm ON cm.media_id = m.id WHERE \ - cm.collection_id = ?1 ORDER BY cm.position", - )?; - let mut rows = stmt - .query_map(params![collection_id.to_string()], row_to_media_item)? - .collect::>>()?; - load_custom_fields_batch(&db, &mut rows)?; + let rows = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \ + m.file_size, m.title, m.artist, m.album, m.genre, m.year, \ + m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \ + m.date_taken, m.latitude, m.longitude, m.camera_make, \ + m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \ + m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \ + m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items \ + m JOIN collection_members cm ON cm.media_id = m.id WHERE \ + cm.collection_id = ?1 ORDER BY cm.position", + )?; + let mut rows = stmt + .query_map(params![collection_id.to_string()], row_to_media_item)? + .collect::>>()?; + drop(stmt); + load_custom_fields_batch(&db, &mut rows)?; + drop(db); + rows + }; Ok(rows) }) .await @@ -1348,99 +1510,107 @@ impl StorageBackend for SqliteBackend { let request = request.clone(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; + let results = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; - let (fts_expr, _like_terms, where_clauses, join_clauses, bind_params) = - search_query_to_fts(&request.query); + let (fts_expr, _like_terms, where_clauses, join_clauses, bind_params) = + search_query_to_fts(&request.query); - let use_fts = !fts_expr.is_empty(); - let order_by = sort_order_to_sql(&request.sort); + let use_fts = !fts_expr.is_empty(); + let order_by = sort_order_to_sql(request.sort); - // Build the base query. - let mut sql = String::from( - "SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \ - m.file_size, m.title, m.artist, m.album, m.genre, m.year, \ - m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \ - m.date_taken, m.latitude, m.longitude, m.camera_make, \ - m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \ - m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \ - m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items m ", - ); + // Build the base query. + let mut sql = String::from( + "SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \ + m.file_size, m.title, m.artist, m.album, m.genre, m.year, \ + m.duration_secs, m.description, m.thumbnail_path, m.file_mtime, \ + m.date_taken, m.latitude, m.longitude, m.camera_make, \ + m.camera_model, m.rating, m.perceptual_hash, m.storage_mode, \ + m.original_filename, m.uploaded_at, m.storage_key, m.created_at, \ + m.updated_at, m.deleted_at, m.links_extracted_at FROM media_items \ + m ", + ); - if use_fts { - sql.push_str("JOIN media_fts ON media_fts.rowid = m.rowid "); - } + if use_fts { + sql.push_str("JOIN media_fts ON media_fts.rowid = m.rowid "); + } - for j in &join_clauses { - sql.push_str(j); - sql.push(' '); - } + for j in &join_clauses { + sql.push_str(j); + sql.push(' '); + } - // Collect all bind parameters: first the filter params, then FTS - // match (if any), then LIMIT and OFFSET. - let mut all_params: Vec = bind_params.clone(); + // Collect all bind parameters: first the filter params, then FTS + // match (if any), then LIMIT and OFFSET. + let mut all_params: Vec = bind_params.clone(); - let mut conditions = where_clauses.clone(); - if use_fts { - conditions.push("media_fts MATCH ?".to_string()); - all_params.push(fts_expr.clone()); - } + let mut conditions = where_clauses; + if use_fts { + conditions.push("media_fts MATCH ?".to_string()); + all_params.push(fts_expr.clone()); + } - if !conditions.is_empty() { - sql.push_str("WHERE "); - sql.push_str(&conditions.join(" AND ")); - sql.push(' '); - } + if !conditions.is_empty() { + sql.push_str("WHERE "); + sql.push_str(&conditions.join(" AND ")); + sql.push(' '); + } - sql.push_str(&format!("ORDER BY {order_by} LIMIT ? OFFSET ?",)); - all_params.push(request.pagination.limit.to_string()); - all_params.push(request.pagination.offset.to_string()); + sql.push_str("ORDER BY "); + sql.push_str(order_by); + sql.push_str(" LIMIT ? OFFSET ?"); + all_params.push(request.pagination.limit.to_string()); + all_params.push(request.pagination.offset.to_string()); - let mut stmt = db.prepare(&sql)?; - let param_refs: Vec<&dyn rusqlite::types::ToSql> = all_params - .iter() - .map(|s| s as &dyn rusqlite::types::ToSql) - .collect(); - let mut items = stmt - .query_map(param_refs.as_slice(), row_to_media_item)? - .collect::>>()?; - load_custom_fields_batch(&db, &mut items)?; + let mut stmt = db.prepare(&sql)?; + let param_refs: Vec<&dyn rusqlite::types::ToSql> = all_params + .iter() + .map(|s| s as &dyn rusqlite::types::ToSql) + .collect(); + let mut items = stmt + .query_map(param_refs.as_slice(), row_to_media_item)? + .collect::>>()?; + drop(stmt); + load_custom_fields_batch(&db, &mut items)?; - // Count query (same filters, no LIMIT/OFFSET) - let mut count_sql = String::from("SELECT COUNT(*) FROM media_items m "); - if use_fts { - count_sql.push_str("JOIN media_fts ON media_fts.rowid = m.rowid "); - } - for j in &join_clauses { - count_sql.push_str(j); - count_sql.push(' '); - } - if !conditions.is_empty() { - count_sql.push_str("WHERE "); - count_sql.push_str(&conditions.join(" AND ")); - } + // Count query (same filters, no LIMIT/OFFSET) + let mut count_sql = String::from("SELECT COUNT(*) FROM media_items m "); + if use_fts { + count_sql.push_str("JOIN media_fts ON media_fts.rowid = m.rowid "); + } + for j in &join_clauses { + count_sql.push_str(j); + count_sql.push(' '); + } + if !conditions.is_empty() { + count_sql.push_str("WHERE "); + count_sql.push_str(&conditions.join(" AND ")); + } - // Count query uses the same filter params (+ FTS match) but no - // LIMIT/OFFSET - let mut count_params: Vec = bind_params; - if use_fts { - count_params.push(fts_expr); - } - let count_param_refs: Vec<&dyn rusqlite::types::ToSql> = count_params - .iter() - .map(|s| s as &dyn rusqlite::types::ToSql) - .collect(); - let total_count: i64 = - db.query_row(&count_sql, count_param_refs.as_slice(), |row| { - row.get(0) - })?; + // Count query uses the same filter params (+ FTS match) but no + // LIMIT/OFFSET + let mut count_params: Vec = bind_params; + if use_fts { + count_params.push(fts_expr); + } + let count_param_refs: Vec<&dyn rusqlite::types::ToSql> = count_params + .iter() + .map(|s| s as &dyn rusqlite::types::ToSql) + .collect(); + let total_count: i64 = + db.query_row(&count_sql, count_param_refs.as_slice(), |row| { + row.get(0) + })?; + drop(db); - Ok(SearchResults { - items, - total_count: total_count as u64, - }) + SearchResults { + items, + total_count: total_count.cast_unsigned(), + } + }; + Ok(results) }) .await .map_err(|e| PinakesError::Database(e.to_string()))? @@ -1450,20 +1620,22 @@ impl StorageBackend for SqliteBackend { let entry = entry.clone(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute( - "INSERT INTO audit_log (id, media_id, action, details, timestamp) \ - VALUES (?1, ?2, ?3, ?4, ?5)", - params![ - entry.id.to_string(), - entry.media_id.map(|mid| mid.0.to_string()), - entry.action.to_string(), - entry.details, - entry.timestamp.to_rfc3339(), - ], - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute( + "INSERT INTO audit_log (id, media_id, action, details, timestamp) \ + VALUES (?1, ?2, ?3, ?4, ?5)", + params![ + entry.id.to_string(), + entry.media_id.map(|mid| mid.0.to_string()), + entry.action.to_string(), + entry.details, + entry.timestamp.to_rfc3339(), + ], + )?; + } Ok(()) }) .await @@ -1478,41 +1650,56 @@ impl StorageBackend for SqliteBackend { let pagination = pagination.clone(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; + let rows = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; - let (sql, bind_media_id) = if let Some(mid) = media_id { - ( - "SELECT id, media_id, action, details, timestamp FROM audit_log \ - WHERE media_id = ?1 ORDER BY timestamp DESC LIMIT ?2 OFFSET ?3" - .to_string(), - Some(mid.0.to_string()), - ) - } else { - ( - "SELECT id, media_id, action, details, timestamp FROM audit_log \ - ORDER BY timestamp DESC LIMIT ?1 OFFSET ?2" - .to_string(), - None, - ) - }; + let (sql, bind_media_id) = media_id.map_or_else( + || { + ( + "SELECT id, media_id, action, details, timestamp FROM audit_log \ + ORDER BY timestamp DESC LIMIT ?1 OFFSET ?2" + .to_string(), + None, + ) + }, + |mid| { + ( + "SELECT id, media_id, action, details, timestamp FROM audit_log \ + WHERE media_id = ?1 ORDER BY timestamp DESC LIMIT ?2 OFFSET ?3" + .to_string(), + Some(mid.0.to_string()), + ) + }, + ); - let mut stmt = db.prepare(&sql)?; - let rows = if let Some(ref mid_str) = bind_media_id { - stmt - .query_map( - params![mid_str, pagination.limit as i64, pagination.offset as i64], - row_to_audit_entry, - )? - .collect::>>()? - } else { - stmt - .query_map( - params![pagination.limit as i64, pagination.offset as i64], - row_to_audit_entry, - )? - .collect::>>()? + let mut stmt = db.prepare(&sql)?; + let rows = if let Some(ref mid_str) = bind_media_id { + stmt + .query_map( + params![ + mid_str, + pagination.limit.cast_signed(), + pagination.offset.cast_signed() + ], + row_to_audit_entry, + )? + .collect::>>()? + } else { + stmt + .query_map( + params![ + pagination.limit.cast_signed(), + pagination.offset.cast_signed() + ], + row_to_audit_entry, + )? + .collect::>>()? + }; + drop(stmt); + drop(db); + rows }; Ok(rows) @@ -1531,19 +1718,21 @@ impl StorageBackend for SqliteBackend { let field = field.clone(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute( - "INSERT OR REPLACE INTO custom_fields (media_id, field_name, \ - field_type, field_value) VALUES (?1, ?2, ?3, ?4)", - params![ - media_id.0.to_string(), - name, - custom_field_type_to_str(field.field_type), - field.value, - ], - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute( + "INSERT OR REPLACE INTO custom_fields (media_id, field_name, \ + field_type, field_value) VALUES (?1, ?2, ?3, ?4)", + params![ + media_id.0.to_string(), + name, + custom_field_type_to_str(field.field_type), + field.value, + ], + )?; + } Ok(()) }) .await @@ -1556,28 +1745,33 @@ impl StorageBackend for SqliteBackend { ) -> Result> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT field_name, field_type, field_value FROM custom_fields WHERE \ - media_id = ?1", - )?; - let rows = stmt.query_map(params![media_id.0.to_string()], |row| { - let name: String = row.get(0)?; - let ft_str: String = row.get(1)?; - let value: String = row.get(2)?; - Ok((name, CustomField { - field_type: str_to_custom_field_type(&ft_str), - value, - })) - })?; + let map = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT field_name, field_type, field_value FROM custom_fields \ + WHERE media_id = ?1", + )?; + let rows = stmt.query_map(params![media_id.0.to_string()], |row| { + let name: String = row.get(0)?; + let ft_str: String = row.get(1)?; + let value: String = row.get(2)?; + Ok((name, CustomField { + field_type: str_to_custom_field_type(&ft_str), + value, + })) + })?; - let mut map = HashMap::new(); - for r in rows { - let (name, field) = r?; - map.insert(name, field); - } + let mut map = HashMap::new(); + for r in rows { + let (name, field) = r?; + map.insert(name, field); + } + drop(stmt); + drop(db); + map + }; Ok(map) }) .await @@ -1592,13 +1786,15 @@ impl StorageBackend for SqliteBackend { let name = name.to_string(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute( - "DELETE FROM custom_fields WHERE media_id = ?1 AND field_name = ?2", - params![media_id.0.to_string(), name], - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute( + "DELETE FROM custom_fields WHERE media_id = ?1 AND field_name = ?2", + params![media_id.0.to_string(), name], + )?; + } Ok(()) }) .await @@ -1613,30 +1809,34 @@ impl StorageBackend for SqliteBackend { let ids: Vec = ids.iter().map(|id| id.0.to_string()).collect(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; const CHUNK_SIZE: usize = 500; - let ctx = format!("{n} items"); - db.execute_batch("BEGIN IMMEDIATE") - .map_err(crate::error::db_ctx("batch_delete_media", &ctx))?; - let mut count = 0u64; - for chunk in ids.chunks(CHUNK_SIZE) { - let placeholders: Vec = - (1..=chunk.len()).map(|i| format!("?{}", i)).collect(); - let sql = format!( - "DELETE FROM media_items WHERE id IN ({})", - placeholders.join(", ") - ); - let params: Vec<&dyn rusqlite::ToSql> = - chunk.iter().map(|s| s as &dyn rusqlite::ToSql).collect(); - let rows = db - .execute(&sql, params.as_slice()) + let count = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let ctx = format!("{n} items"); + let tx = db + .unchecked_transaction() .map_err(crate::error::db_ctx("batch_delete_media", &ctx))?; - count += rows as u64; - } - db.execute_batch("COMMIT") - .map_err(crate::error::db_ctx("batch_delete_media", &ctx))?; + let mut count = 0u64; + for chunk in ids.chunks(CHUNK_SIZE) { + let placeholders: Vec = + (1..=chunk.len()).map(|i| format!("?{i}")).collect(); + let sql = format!( + "DELETE FROM media_items WHERE id IN ({})", + placeholders.join(", ") + ); + let params: Vec<&dyn rusqlite::ToSql> = + chunk.iter().map(|s| s as &dyn rusqlite::ToSql).collect(); + let rows = tx + .execute(&sql, params.as_slice()) + .map_err(crate::error::db_ctx("batch_delete_media", &ctx))?; + count += rows as u64; + } + tx.commit() + .map_err(crate::error::db_ctx("batch_delete_media", &ctx))?; + count + }; Ok(count) }) .await @@ -1653,26 +1853,32 @@ impl StorageBackend for SqliteBackend { } let media_ids: Vec = media_ids.iter().map(|id| id.0.to_string()).collect(); - let tag_ids: Vec = - tag_ids.iter().map(|id| id.to_string()).collect(); + let tag_ids: Vec = tag_ids + .iter() + .map(std::string::ToString::to_string) + .collect(); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute_batch("BEGIN IMMEDIATE")?; - // Prepare statement once for reuse - let mut stmt = db.prepare_cached( - "INSERT OR IGNORE INTO media_tags (media_id, tag_id) VALUES (?1, ?2)", - )?; - let mut count = 0u64; - for mid in &media_ids { - for tid in &tag_ids { - stmt.execute(params![mid, tid])?; - count += 1; + let count = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let tx = db.unchecked_transaction()?; + // Prepare statement once for reuse + let mut stmt = tx.prepare_cached( + "INSERT OR IGNORE INTO media_tags (media_id, tag_id) VALUES (?1, ?2)", + )?; + let mut count = 0u64; + for mid in &media_ids { + for tid in &tag_ids { + let rows = stmt.execute(params![mid, tid])?; + count += rows as u64; // INSERT OR IGNORE: rows=1 if new, 0 if existed + } } - } - db.execute_batch("COMMIT")?; + drop(stmt); + tx.commit()?; + count + }; Ok(count) }) .await @@ -1700,10 +1906,7 @@ impl StorageBackend for SqliteBackend { let description = description.map(String::from); let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - + const CHUNK_SIZE: usize = 500; // Build SET clause dynamically from provided fields let mut set_parts = Vec::new(); let mut params_vec: Vec> = Vec::new(); @@ -1751,35 +1954,42 @@ impl StorageBackend for SqliteBackend { return Ok(0); } - const CHUNK_SIZE: usize = 500; - let ctx = format!("{} items", ids.len()); - db.execute_batch("BEGIN IMMEDIATE") - .map_err(crate::error::db_ctx("batch_update_media", &ctx))?; + let count = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut count = 0u64; - for chunk in ids.chunks(CHUNK_SIZE) { - let id_placeholders: Vec = - (0..chunk.len()).map(|i| format!("?{}", idx + i)).collect(); - let sql = format!( - "UPDATE media_items SET {} WHERE id IN ({})", - set_parts.join(", "), - id_placeholders.join(", ") - ); - - let mut all_params: Vec<&dyn rusqlite::ToSql> = - params_vec.iter().map(|p| p.as_ref()).collect(); - let id_params: Vec<&dyn rusqlite::ToSql> = - chunk.iter().map(|s| s as &dyn rusqlite::ToSql).collect(); - all_params.extend(id_params); - - let rows = db - .execute(&sql, all_params.as_slice()) + let ctx = format!("{} items", ids.len()); + let tx = db + .unchecked_transaction() .map_err(crate::error::db_ctx("batch_update_media", &ctx))?; - count += rows as u64; - } - db.execute_batch("COMMIT") - .map_err(crate::error::db_ctx("batch_update_media", &ctx))?; + let mut count = 0u64; + for chunk in ids.chunks(CHUNK_SIZE) { + let id_placeholders: Vec = + (0..chunk.len()).map(|i| format!("?{}", idx + i)).collect(); + let sql = format!( + "UPDATE media_items SET {} WHERE id IN ({})", + set_parts.join(", "), + id_placeholders.join(", ") + ); + + let mut all_params: Vec<&dyn rusqlite::ToSql> = + params_vec.iter().map(std::convert::AsRef::as_ref).collect(); + let id_params: Vec<&dyn rusqlite::ToSql> = + chunk.iter().map(|s| s as &dyn rusqlite::ToSql).collect(); + all_params.extend(id_params); + + let rows = tx + .execute(&sql, all_params.as_slice()) + .map_err(crate::error::db_ctx("batch_update_media", &ctx))?; + count += rows as u64; + } + + tx.commit() + .map_err(crate::error::db_ctx("batch_update_media", &ctx))?; + count + }; Ok(count) }) .await @@ -1789,34 +1999,38 @@ impl StorageBackend for SqliteBackend { async fn find_duplicates(&self) -> Result>> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT * FROM media_items WHERE content_hash IN ( - SELECT content_hash FROM media_items GROUP BY content_hash \ - HAVING COUNT(*) > 1 - ) ORDER BY content_hash, created_at", - )?; - let mut rows: Vec = stmt - .query_map([], row_to_media_item)? - .collect::>>()?; + let groups = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT * FROM media_items WHERE deleted_at IS NULL AND \ + content_hash IN ( + SELECT content_hash FROM media_items WHERE deleted_at IS \ + NULL + GROUP BY content_hash HAVING COUNT(*) > 1 + ) ORDER BY content_hash, created_at", + )?; + let mut rows: Vec = stmt + .query_map([], row_to_media_item)? + .collect::>>()?; - load_custom_fields_batch(&db, &mut rows)?; + load_custom_fields_batch(&db, &mut rows)?; - // Group by content_hash - let mut groups: Vec> = Vec::new(); - let mut current_hash = String::new(); - for item in rows { - if item.content_hash.0 != current_hash { - current_hash = item.content_hash.0.clone(); - groups.push(Vec::new()); + // Group by content_hash + let mut groups: Vec> = Vec::new(); + let mut current_hash = String::new(); + for item in rows { + if item.content_hash.0 != current_hash { + current_hash.clone_from(&item.content_hash.0); + groups.push(Vec::new()); + } + if let Some(group) = groups.last_mut() { + group.push(item); + } } - if let Some(group) = groups.last_mut() { - group.push(item); - } - } - + groups + }; Ok(groups) }) .await @@ -1829,23 +2043,26 @@ impl StorageBackend for SqliteBackend { ) -> Result>> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; + use image_hasher::ImageHash; + let items = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; - // Get all images with perceptual hashes - let mut stmt = db.prepare( - "SELECT * FROM media_items WHERE perceptual_hash IS NOT NULL ORDER BY \ - id", - )?; - let mut items: Vec = stmt - .query_map([], row_to_media_item)? - .collect::>>()?; + // Get all images with perceptual hashes + let mut stmt = db.prepare( + "SELECT * FROM media_items WHERE perceptual_hash IS NOT NULL ORDER \ + BY id", + )?; + let mut items: Vec = stmt + .query_map([], row_to_media_item)? + .collect::>>()?; - load_custom_fields_batch(&db, &mut items)?; + load_custom_fields_batch(&db, &mut items)?; + items + }; // Compare each pair and build groups - use image_hasher::ImageHash; let mut groups: Vec> = Vec::new(); let mut grouped_indices: std::collections::HashSet = std::collections::HashSet::new(); @@ -1901,8 +2118,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|e| { PinakesError::Database(format!( - "find_perceptual_duplicates (threshold={}): {}", - threshold, e + "find_perceptual_duplicates (threshold={threshold}): {e}" )) })? } @@ -1910,67 +2126,78 @@ impl StorageBackend for SqliteBackend { async fn database_stats(&self) -> Result { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let media_count: i64 = - db.query_row("SELECT COUNT(*) FROM media_items", [], |row| row.get(0))?; - let tag_count: i64 = - db.query_row("SELECT COUNT(*) FROM tags", [], |row| row.get(0))?; - let collection_count: i64 = - db.query_row("SELECT COUNT(*) FROM collections", [], |row| row.get(0))?; - let audit_count: i64 = - db.query_row("SELECT COUNT(*) FROM audit_log", [], |row| row.get(0))?; - let page_count: i64 = - db.query_row("PRAGMA page_count", [], |row| row.get(0))?; - let page_size: i64 = - db.query_row("PRAGMA page_size", [], |row| row.get(0))?; - let database_size_bytes = (page_count * page_size) as u64; - Ok(crate::storage::DatabaseStats { - media_count: media_count as u64, - tag_count: tag_count as u64, - collection_count: collection_count as u64, - audit_count: audit_count as u64, - database_size_bytes, - backend_name: "sqlite".to_string(), - }) + let stats = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let media_count: i64 = + db.query_row("SELECT COUNT(*) FROM media_items", [], |row| { + row.get(0) + })?; + let tag_count: i64 = + db.query_row("SELECT COUNT(*) FROM tags", [], |row| row.get(0))?; + let collection_count: i64 = + db.query_row("SELECT COUNT(*) FROM collections", [], |row| { + row.get(0) + })?; + let audit_count: i64 = + db.query_row("SELECT COUNT(*) FROM audit_log", [], |row| row.get(0))?; + let page_count: i64 = + db.query_row("PRAGMA page_count", [], |row| row.get(0))?; + let page_size: i64 = + db.query_row("PRAGMA page_size", [], |row| row.get(0))?; + let database_size_bytes = (page_count * page_size).cast_unsigned(); + crate::storage::DatabaseStats { + media_count: media_count.cast_unsigned(), + tag_count: tag_count.cast_unsigned(), + collection_count: collection_count.cast_unsigned(), + audit_count: audit_count.cast_unsigned(), + database_size_bytes, + backend_name: "sqlite".to_string(), + } + }; + Ok(stats) }) .await - .map_err(|e| PinakesError::Database(format!("database_stats: {}", e)))? + .map_err(|e| PinakesError::Database(format!("database_stats: {e}")))? } async fn vacuum(&self) -> Result<()> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute_batch("VACUUM")?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute_batch("VACUUM")?; + } Ok(()) }) .await - .map_err(|e| PinakesError::Database(format!("vacuum: {}", e)))? + .map_err(|e| PinakesError::Database(format!("vacuum: {e}")))? } async fn clear_all_data(&self) -> Result<()> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute_batch( - "DELETE FROM audit_log; - DELETE FROM custom_fields; - DELETE FROM collection_members; - DELETE FROM media_tags; - DELETE FROM media_items; - DELETE FROM tags; - DELETE FROM collections;", - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute_batch( + "DELETE FROM audit_log; + DELETE FROM custom_fields; + DELETE FROM collection_members; + DELETE FROM media_tags; + DELETE FROM media_items; + DELETE FROM tags; + DELETE FROM collections;", + )?; + } Ok(()) }) .await - .map_err(|e| PinakesError::Database(format!("clear_all_data: {}", e)))? + .map_err(|e| PinakesError::Database(format!("clear_all_data: {e}")))? } async fn list_media_paths( @@ -1978,30 +2205,35 @@ impl StorageBackend for SqliteBackend { ) -> Result> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = - db.prepare("SELECT id, path, content_hash FROM media_items")?; - let rows = stmt.query_map([], |row| { - let id_str: String = row.get(0)?; - let path_str: String = row.get(1)?; - let hash_str: String = row.get(2)?; - let id = parse_uuid(&id_str)?; - Ok(( - MediaId(id), - PathBuf::from(path_str), - ContentHash::new(hash_str), - )) - })?; - let mut results = Vec::new(); - for row in rows { - results.push(row?); - } + let results = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT id, path, content_hash FROM media_items WHERE deleted_at IS \ + NULL", + )?; + let rows = stmt.query_map([], |row| { + let id_str: String = row.get(0)?; + let path_str: String = row.get(1)?; + let hash_str: String = row.get(2)?; + let id = parse_uuid(&id_str)?; + Ok(( + MediaId(id), + PathBuf::from(path_str), + ContentHash::new(hash_str), + )) + })?; + let mut results = Vec::new(); + for row in rows { + results.push(row?); + } + results + }; Ok(results) }) .await - .map_err(|e| PinakesError::Database(format!("list_media_paths: {}", e)))? + .map_err(|e| PinakesError::Database(format!("list_media_paths: {e}")))? } async fn save_search( @@ -2015,21 +2247,23 @@ impl StorageBackend for SqliteBackend { let id_str = id.to_string(); let name = name.to_string(); let query = query.to_string(); - let sort_order = sort_order.map(|s| s.to_string()); + let sort_order = sort_order.map(std::string::ToString::to_string); let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute( - "INSERT OR REPLACE INTO saved_searches (id, name, query, sort_order, \ - created_at) VALUES (?1, ?2, ?3, ?4, ?5)", - params![id_str, name, query, sort_order, now], - )?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute( + "INSERT OR REPLACE INTO saved_searches (id, name, query, \ + sort_order, created_at) VALUES (?1, ?2, ?3, ?4, ?5)", + params![id_str, name, query, sort_order, now], + )?; + } Ok(()) }) .await - .map_err(|e| PinakesError::Database(format!("save_search {}: {}", id, e)))? + .map_err(|e| PinakesError::Database(format!("save_search {id}: {e}")))? } async fn list_saved_searches( @@ -2037,53 +2271,104 @@ impl StorageBackend for SqliteBackend { ) -> Result> { let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - let mut stmt = db.prepare( - "SELECT id, name, query, sort_order, created_at FROM saved_searches \ - ORDER BY created_at DESC", - )?; - let rows = stmt.query_map([], |row| { - let id_str: String = row.get(0)?; - let name: String = row.get(1)?; - let query: String = row.get(2)?; - let sort_order: Option = row.get(3)?; - let created_at_str: String = row.get(4)?; - let id = parse_uuid(&id_str)?; - Ok(crate::model::SavedSearch { - id, - name, - query, - sort_order, - created_at: parse_datetime(&created_at_str), - }) - })?; - let mut results = Vec::new(); - for row in rows { - results.push(row?); - } + let results = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + let mut stmt = db.prepare( + "SELECT id, name, query, sort_order, created_at FROM saved_searches \ + ORDER BY created_at DESC", + )?; + let rows = stmt.query_map([], |row| { + let id_str: String = row.get(0)?; + let name: String = row.get(1)?; + let query: String = row.get(2)?; + let sort_order: Option = row.get(3)?; + let created_at_str: String = row.get(4)?; + let id = parse_uuid(&id_str)?; + Ok(crate::model::SavedSearch { + id, + name, + query, + sort_order, + created_at: parse_datetime(&created_at_str), + }) + })?; + let mut results = Vec::new(); + for row in rows { + results.push(row?); + } + results + }; Ok(results) }) .await - .map_err(|e| { - PinakesError::Database(format!("list_saved_searches: {}", e)) - })? + .map_err(|e| PinakesError::Database(format!("list_saved_searches: {e}")))? + } + + async fn get_saved_search( + &self, + id: Uuid, + ) -> Result { + let conn = Arc::clone(&self.conn); + let id_str = id.to_string(); + tokio::task::spawn_blocking(move || { + let result = { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.query_row( + "SELECT id, name, query, sort_order, created_at FROM saved_searches \ + WHERE id = ?1", + params![id_str], + |row| { + let rid: String = row.get(0)?; + let name: String = row.get(1)?; + let query: String = row.get(2)?; + let sort_order: Option = row.get(3)?; + let created_at_str: String = row.get(4)?; + let uid = parse_uuid(&rid)?; + Ok(crate::model::SavedSearch { + id: uid, + name, + query, + sort_order, + created_at: parse_datetime(&created_at_str), + }) + }, + ) + .map_err(|e| { + match e { + rusqlite::Error::QueryReturnedNoRows => { + PinakesError::NotFound(format!("saved search {id}")) + }, + other => PinakesError::Database(other.to_string()), + } + })? + }; + Ok(result) + }) + .await + .map_err(|e| PinakesError::Database(format!("get_saved_search: {e}")))? } async fn delete_saved_search(&self, id: Uuid) -> Result<()> { let conn = Arc::clone(&self.conn); let id_str = id.to_string(); tokio::task::spawn_blocking(move || { - let db = conn - .lock() - .map_err(|e| PinakesError::Database(e.to_string()))?; - db.execute("DELETE FROM saved_searches WHERE id = ?1", params![id_str])?; + { + let db = conn + .lock() + .map_err(|e| PinakesError::Database(e.to_string()))?; + db.execute("DELETE FROM saved_searches WHERE id = ?1", params![ + id_str + ])?; + } Ok(()) }) .await .map_err(|e| { - PinakesError::Database(format!("delete_saved_search {}: {}", id, e)) + PinakesError::Database(format!("delete_saved_search {id}: {e}")) })? } async fn list_media_ids_for_thumbnails( @@ -2107,15 +2392,14 @@ impl StorageBackend for SqliteBackend { let s: String = r.get(0)?; Ok(MediaId(uuid::Uuid::parse_str(&s).unwrap_or_default())) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); Ok(ids) }) .await .map_err(|e| { PinakesError::Database(format!( - "list_media_ids_for_thumbnails (only_missing={}): {}", - only_missing, e + "list_media_ids_for_thumbnails (only_missing={only_missing}): {e}" )) })? } @@ -2143,7 +2427,7 @@ impl StorageBackend for SqliteBackend { )?; let media_by_type: Vec<(String, u64)> = stmt .query_map([], |r| Ok((r.get::<_, String>(0)?, r.get::<_, u64>(1)?)))? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); // Storage by type @@ -2153,7 +2437,7 @@ impl StorageBackend for SqliteBackend { )?; let storage_by_type: Vec<(String, u64)> = stmt .query_map([], |r| Ok((r.get::<_, String>(0)?, r.get::<_, u64>(1)?)))? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); // Newest / oldest @@ -2179,7 +2463,7 @@ impl StorageBackend for SqliteBackend { )?; let top_tags: Vec<(String, u64)> = stmt .query_map([], |r| Ok((r.get::<_, String>(0)?, r.get::<_, u64>(1)?)))? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); // Top collections @@ -2190,7 +2474,7 @@ impl StorageBackend for SqliteBackend { )?; let top_collections: Vec<(String, u64)> = stmt .query_map([], |r| Ok((r.get::<_, String>(0)?, r.get::<_, u64>(1)?)))? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); let total_tags: u64 = @@ -2226,19 +2510,14 @@ impl StorageBackend for SqliteBackend { .map_err(|_| { PinakesError::Database("library_statistics query timed out".to_string()) })? - .map_err(|e| { - PinakesError::Database(format!("library_statistics: {}", e)) - })? + .map_err(|e| PinakesError::Database(format!("library_statistics: {e}")))? } async fn list_users(&self) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT id, username, password_hash, role, created_at, updated_at \ @@ -2276,7 +2555,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("list_users query timed out".to_string()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("list_users: {}", e)) + PinakesError::Database(format!("list_users: {e}")) })? } @@ -2284,15 +2563,12 @@ impl StorageBackend for SqliteBackend { &self, id: crate::users::UserId, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.0.to_string(); let id_str_for_err = id_str.clone(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let opt = db .query_row( @@ -2323,18 +2599,17 @@ impl StorageBackend for SqliteBackend { }, ) .optional()?; - opt.ok_or_else(|| PinakesError::NotFound(format!("user {}", id_str))) + opt.ok_or_else(|| PinakesError::NotFound(format!("user {id_str}"))) }); tokio::time::timeout(std::time::Duration::from_secs(10), fut) .await .map_err(|_| { PinakesError::Database(format!( - "get_user query timed out for {}", - id_str_for_err + "get_user query timed out for {id_str_for_err}" )) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_user {}: {}", id_str_for_err, e)) + PinakesError::Database(format!("get_user {id_str_for_err}: {e}")) })? } @@ -2342,15 +2617,12 @@ impl StorageBackend for SqliteBackend { &self, username: &str, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let username = username.to_string(); let username_for_err = username.clone(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let opt = db .query_row( @@ -2382,21 +2654,19 @@ impl StorageBackend for SqliteBackend { ) .optional()?; opt.ok_or_else(|| { - PinakesError::NotFound(format!("user with username {}", username)) + PinakesError::NotFound(format!("user with username {username}")) }) }); tokio::time::timeout(std::time::Duration::from_secs(10), fut) .await .map_err(|_| { PinakesError::Database(format!( - "get_user_by_username query timed out for {}", - username_for_err + "get_user_by_username query timed out for {username_for_err}" )) })? .map_err(|e: tokio::task::JoinError| { PinakesError::Database(format!( - "get_user_by_username {}: {}", - username_for_err, e + "get_user_by_username {username_for_err}: {e}" )) })? } @@ -2408,15 +2678,14 @@ impl StorageBackend for SqliteBackend { role: crate::config::UserRole, profile: Option, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let username = username.to_string(); let password_hash = password_hash.to_string(); let fut = tokio::task::spawn_blocking(move || -> Result { let db = conn.lock().map_err(|e| { PinakesError::Database(format!( - "failed to acquire database lock: {}", - e + "failed to acquire database lock: {e}" )) })?; @@ -2426,7 +2695,7 @@ impl StorageBackend for SqliteBackend { let id_str = id.0.to_string(); let now = chrono::Utc::now(); let role_str = serde_json::to_string(&role).map_err(|e| { - PinakesError::Database(format!("failed to serialize role: {}", e)) + PinakesError::Database(format!("failed to serialize role: {e}")) })?; tx.execute( @@ -2446,8 +2715,7 @@ impl StorageBackend for SqliteBackend { let prefs_json = serde_json::to_string(&prof.preferences).map_err(|e| { PinakesError::Database(format!( - "failed to serialize preferences: {}", - e + "failed to serialize preferences: {e}" )) })?; tx.execute( @@ -2490,7 +2758,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("create_user query timed out".to_string()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("create_user: {}", e)) + PinakesError::Database(format!("create_user: {e}")) })? } @@ -2501,15 +2769,14 @@ impl StorageBackend for SqliteBackend { role: Option, profile: Option, ) -> Result { - let conn = self.conn.clone(); - let password_hash = password_hash.map(|s| s.to_string()); + let conn = Arc::clone(&self.conn); + let password_hash = password_hash.map(std::string::ToString::to_string); let id_str = id.0.to_string(); let fut = tokio::task::spawn_blocking(move || -> Result { let db = conn.lock().map_err(|e| { PinakesError::Database(format!( - "failed to acquire database lock: {}", - e + "failed to acquire database lock: {e}" )) })?; @@ -2529,7 +2796,7 @@ impl StorageBackend for SqliteBackend { if let Some(ref r) = role { updates.push("role = ?"); let role_str = serde_json::to_string(r).map_err(|e| { - PinakesError::Database(format!("failed to serialize role: {}", e)) + PinakesError::Database(format!("failed to serialize role: {e}")) })?; params.push(Box::new(role_str)); } @@ -2539,7 +2806,7 @@ impl StorageBackend for SqliteBackend { let sql = format!("UPDATE users SET {} WHERE id = ?", updates.join(", ")); let param_refs: Vec<&dyn rusqlite::ToSql> = - params.iter().map(|p| p.as_ref()).collect(); + params.iter().map(std::convert::AsRef::as_ref).collect(); tx.execute(&sql, param_refs.as_slice())?; } @@ -2548,8 +2815,7 @@ impl StorageBackend for SqliteBackend { let prefs_json = serde_json::to_string(&prof.preferences).map_err(|e| { PinakesError::Database(format!( - "failed to serialize preferences: {}", - e + "failed to serialize preferences: {e}" )) })?; tx.execute( @@ -2606,19 +2872,16 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("update_user query timed out".to_string()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("update_user: {}", e)) + PinakesError::Database(format!("update_user: {e}")) })? } async fn delete_user(&self, id: crate::users::UserId) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.0.to_string(); let fut = tokio::task::spawn_blocking(move || -> Result<()> { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let tx = db.unchecked_transaction()?; @@ -2630,7 +2893,7 @@ impl StorageBackend for SqliteBackend { // Delete user let affected = tx.execute("DELETE FROM users WHERE id = ?", [&id_str])?; if affected == 0 { - return Err(PinakesError::NotFound(format!("user {}", id_str))); + return Err(PinakesError::NotFound(format!("user {id_str}"))); } tx.commit()?; @@ -2642,7 +2905,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("delete_user query timed out".to_string()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_user: {}", e)) + PinakesError::Database(format!("delete_user: {e}")) })? } @@ -2650,14 +2913,11 @@ impl StorageBackend for SqliteBackend { &self, user_id: crate::users::UserId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT user_id, root_path, permission, granted_at FROM \ @@ -2678,7 +2938,7 @@ impl StorageBackend for SqliteBackend { .with_timezone(&chrono::Utc), }) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect::>(); Ok(libraries) }); @@ -2698,18 +2958,15 @@ impl StorageBackend for SqliteBackend { root_path: &str, permission: crate::users::LibraryPermission, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let root_path = root_path.to_string(); let user_id_str = user_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || -> Result<()> { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let perm_str = serde_json::to_string(&permission).map_err(|e| { - PinakesError::Database(format!("failed to serialize permission: {}", e)) + PinakesError::Database(format!("failed to serialize permission: {e}")) })?; let now = chrono::Utc::now(); db.execute( @@ -2732,7 +2989,7 @@ impl StorageBackend for SqliteBackend { ) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("grant_library_access: {}", e)) + PinakesError::Database(format!("grant_library_access: {e}")) })? } @@ -2741,15 +2998,12 @@ impl StorageBackend for SqliteBackend { user_id: crate::users::UserId, root_path: &str, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let root_path = root_path.to_string(); let user_id_str = user_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "DELETE FROM user_libraries WHERE user_id = ? AND root_path = ?", @@ -2765,7 +3019,7 @@ impl StorageBackend for SqliteBackend { ) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("revoke_library_access: {}", e)) + PinakesError::Database(format!("revoke_library_access: {e}")) })? } @@ -2776,16 +3030,13 @@ impl StorageBackend for SqliteBackend { stars: u8, review: Option<&str>, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let media_id_str = media_id.0.to_string(); let review = review.map(String::from); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let id = Uuid::now_v7(); let id_str = id.to_string(); @@ -2797,7 +3048,7 @@ impl StorageBackend for SqliteBackend { &id_str, &user_id_str, &media_id_str, - stars as i32, + i32::from(stars), &review, now.to_rfc3339() ], @@ -2826,7 +3077,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("rate_media timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("rate_media: {}", e)) + PinakesError::Database(format!("rate_media: {e}")) })? } @@ -2834,14 +3085,11 @@ impl StorageBackend for SqliteBackend { &self, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT id, user_id, media_id, stars, review_text, created_at FROM \ @@ -2857,12 +3105,12 @@ impl StorageBackend for SqliteBackend { id: parse_uuid(&id_str)?, user_id: crate::users::UserId(parse_uuid(&uid_str)?), media_id: MediaId(parse_uuid(&mid_str)?), - stars: row.get::<_, i32>(3)? as u8, + stars: u8::try_from(row.get::<_, i32>(3)?).unwrap_or(0), review_text: row.get(4)?, created_at: parse_datetime(&created_str), }) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); Ok(ratings) }); @@ -2872,7 +3120,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_media_ratings timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_media_ratings: {}", e)) + PinakesError::Database(format!("get_media_ratings: {e}")) })? } @@ -2881,15 +3129,12 @@ impl StorageBackend for SqliteBackend { user_id: crate::users::UserId, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let result = db .query_row( @@ -2905,7 +3150,7 @@ impl StorageBackend for SqliteBackend { id: parse_uuid(&id_str)?, user_id: crate::users::UserId(parse_uuid(&uid_str)?), media_id: MediaId(parse_uuid(&mid_str)?), - stars: row.get::<_, i32>(3)? as u8, + stars: u8::try_from(row.get::<_, i32>(3)?).unwrap_or(0), review_text: row.get(4)?, created_at: parse_datetime(&created_str), }) @@ -2918,19 +3163,16 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("get_user_rating timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_user_rating: {}", e)) + PinakesError::Database(format!("get_user_rating: {e}")) })? } async fn delete_rating(&self, id: Uuid) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute("DELETE FROM ratings WHERE id = ?", [&id_str])?; Ok(()) @@ -2939,7 +3181,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("delete_rating timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_rating: {}", e)) + PinakesError::Database(format!("delete_rating: {e}")) })? } @@ -2950,17 +3192,14 @@ impl StorageBackend for SqliteBackend { text: &str, parent_id: Option, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let media_id_str = media_id.0.to_string(); let text = text.to_string(); let parent_str = parent_id.map(|p| p.to_string()); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let id = Uuid::now_v7(); let id_str = id.to_string(); @@ -2990,7 +3229,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("add_comment timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("add_comment: {}", e)) + PinakesError::Database(format!("add_comment: {e}")) })? } @@ -2998,14 +3237,11 @@ impl StorageBackend for SqliteBackend { &self, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT id, user_id, media_id, parent_comment_id, text, created_at \ @@ -3028,7 +3264,7 @@ impl StorageBackend for SqliteBackend { created_at: parse_datetime(&created_str), }) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); Ok(comments) }); @@ -3038,19 +3274,16 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_media_comments timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_media_comments: {}", e)) + PinakesError::Database(format!("get_media_comments: {e}")) })? } async fn delete_comment(&self, id: Uuid) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute("DELETE FROM comments WHERE id = ?", [&id_str])?; Ok(()) @@ -3059,7 +3292,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("delete_comment timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_comment: {}", e)) + PinakesError::Database(format!("delete_comment: {e}")) })? } @@ -3068,15 +3301,12 @@ impl StorageBackend for SqliteBackend { user_id: crate::users::UserId, media_id: MediaId, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let now = chrono::Utc::now(); db.execute( @@ -3090,7 +3320,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("add_favorite timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("add_favorite: {}", e)) + PinakesError::Database(format!("add_favorite: {e}")) })? } @@ -3099,15 +3329,12 @@ impl StorageBackend for SqliteBackend { user_id: crate::users::UserId, media_id: MediaId, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "DELETE FROM favorites WHERE user_id = ? AND media_id = ?", @@ -3119,7 +3346,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("remove_favorite timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("remove_favorite: {}", e)) + PinakesError::Database(format!("remove_favorite: {e}")) })? } @@ -3128,16 +3355,13 @@ impl StorageBackend for SqliteBackend { user_id: crate::users::UserId, pagination: &Pagination, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); - let limit = pagination.limit as i64; - let offset = pagination.offset as i64; + let limit = pagination.limit.cast_signed(); + let offset = pagination.offset.cast_signed(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \ @@ -3149,7 +3373,7 @@ impl StorageBackend for SqliteBackend { )?; let mut items: Vec = stmt .query_map(params![&user_id_str, limit, offset], row_to_media_item)? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); load_custom_fields_batch(&db, &mut items)?; Ok(items) @@ -3160,7 +3384,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_user_favorites timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_user_favorites: {}", e)) + PinakesError::Database(format!("get_user_favorites: {e}")) })? } @@ -3169,15 +3393,12 @@ impl StorageBackend for SqliteBackend { user_id: crate::users::UserId, media_id: MediaId, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let count: i64 = db.query_row( "SELECT COUNT(*) FROM favorites WHERE user_id = ? AND media_id = ?", @@ -3190,7 +3411,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("is_favorite timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("is_favorite: {}", e)) + PinakesError::Database(format!("is_favorite: {e}")) })? } @@ -3202,7 +3423,7 @@ impl StorageBackend for SqliteBackend { password_hash: Option<&str>, expires_at: Option>, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let created_by_str = created_by.0.to_string(); let token = token.to_string(); @@ -3210,10 +3431,7 @@ impl StorageBackend for SqliteBackend { let expires_str = expires_at.map(|dt| dt.to_rfc3339()); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let id = Uuid::now_v7(); let id_str = id.to_string(); @@ -3249,7 +3467,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("create_share_link timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("create_share_link: {}", e)) + PinakesError::Database(format!("create_share_link: {e}")) })? } @@ -3257,14 +3475,11 @@ impl StorageBackend for SqliteBackend { &self, token: &str, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let token = token.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.query_row( "SELECT id, media_id, created_by, token, password_hash, expires_at, \ @@ -3283,7 +3498,7 @@ impl StorageBackend for SqliteBackend { token: row.get(3)?, password_hash: row.get(4)?, expires_at: expires_str.map(|s| parse_datetime(&s)), - view_count: row.get::<_, i64>(6)? as u64, + view_count: row.get::<_, i64>(6)?.cast_unsigned(), created_at: parse_datetime(&created_str), }) }, @@ -3301,19 +3516,16 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("get_share_link timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_share_link: {}", e)) + PinakesError::Database(format!("get_share_link: {e}")) })? } async fn increment_share_views(&self, token: &str) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let token = token.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "UPDATE share_links SET view_count = view_count + 1 WHERE token = ?", @@ -3327,19 +3539,16 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("increment_share_views timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("increment_share_views: {}", e)) + PinakesError::Database(format!("increment_share_views: {e}")) })? } async fn delete_share_link(&self, id: Uuid) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute("DELETE FROM share_links WHERE id = ?", [&id_str])?; Ok(()) @@ -3350,7 +3559,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("delete_share_link timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_share_link: {}", e)) + PinakesError::Database(format!("delete_share_link: {e}")) })? } @@ -3363,17 +3572,14 @@ impl StorageBackend for SqliteBackend { is_smart: bool, filter_query: Option<&str>, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let owner_id_str = owner_id.0.to_string(); let name = name.to_string(); let description = description.map(String::from); let filter_query = filter_query.map(String::from); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let id = Uuid::now_v7(); let id_str = id.to_string(); @@ -3387,8 +3593,8 @@ impl StorageBackend for SqliteBackend { &owner_id_str, &name, &description, - is_public as i32, - is_smart as i32, + i32::from(is_public), + i32::from(is_smart), &filter_query, now.to_rfc3339(), now.to_rfc3339() @@ -3410,19 +3616,16 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("create_playlist timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("create_playlist: {}", e)) + PinakesError::Database(format!("create_playlist: {e}")) })? } async fn get_playlist(&self, id: Uuid) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.query_row( "SELECT id, owner_id, name, description, is_public, is_smart, \ @@ -3459,7 +3662,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("get_playlist timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_playlist: {}", e)) + PinakesError::Database(format!("get_playlist: {e}")) })? } @@ -3467,25 +3670,13 @@ impl StorageBackend for SqliteBackend { &self, owner_id: Option, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; - let (sql, param): (String, Option) = match owner_id { - Some(uid) => { - ( - "SELECT id, owner_id, name, description, is_public, is_smart, \ - filter_query, created_at, updated_at FROM playlists WHERE \ - owner_id = ? OR is_public = 1 ORDER BY updated_at DESC" - .to_string(), - Some(uid.0.to_string()), - ) - }, - None => { + let (sql, param): (String, Option) = owner_id.map_or_else( + || { ( "SELECT id, owner_id, name, description, is_public, is_smart, \ filter_query, created_at, updated_at FROM playlists ORDER BY \ @@ -3494,7 +3685,16 @@ impl StorageBackend for SqliteBackend { None, ) }, - }; + |uid| { + ( + "SELECT id, owner_id, name, description, is_public, is_smart, \ + filter_query, created_at, updated_at FROM playlists WHERE \ + owner_id = ? OR is_public = 1 ORDER BY updated_at DESC" + .to_string(), + Some(uid.0.to_string()), + ) + }, + ); let mut stmt = db.prepare(&sql)?; let rows = if let Some(ref p) = param { stmt @@ -3515,7 +3715,7 @@ impl StorageBackend for SqliteBackend { updated_at: parse_datetime(&updated_str), }) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect() } else { stmt @@ -3536,7 +3736,7 @@ impl StorageBackend for SqliteBackend { updated_at: parse_datetime(&updated_str), }) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect() }; Ok(rows) @@ -3545,7 +3745,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("list_playlists timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("list_playlists: {}", e)) + PinakesError::Database(format!("list_playlists: {e}")) })? } @@ -3556,16 +3756,13 @@ impl StorageBackend for SqliteBackend { description: Option<&str>, is_public: Option, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let name = name.map(String::from); let description = description.map(String::from); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let now = chrono::Utc::now(); let mut updates = vec!["updated_at = ?".to_string()]; @@ -3581,13 +3778,13 @@ impl StorageBackend for SqliteBackend { } if let Some(p) = is_public { updates.push("is_public = ?".to_string()); - sql_params.push(Box::new(p as i32)); + sql_params.push(Box::new(i32::from(p))); } sql_params.push(Box::new(id_str.clone())); let sql = format!("UPDATE playlists SET {} WHERE id = ?", updates.join(", ")); let param_refs: Vec<&dyn rusqlite::ToSql> = - sql_params.iter().map(|p| p.as_ref()).collect(); + sql_params.iter().map(std::convert::AsRef::as_ref).collect(); db.execute(&sql, param_refs.as_slice())?; // Fetch updated db.query_row( @@ -3615,7 +3812,7 @@ impl StorageBackend for SqliteBackend { .map_err(|e| { match e { rusqlite::Error::QueryReturnedNoRows => { - PinakesError::NotFound(format!("playlist {}", id_str)) + PinakesError::NotFound(format!("playlist {id_str}")) }, _ => PinakesError::Database(e.to_string()), } @@ -3625,19 +3822,16 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("update_playlist timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("update_playlist: {}", e)) + PinakesError::Database(format!("update_playlist: {e}")) })? } async fn delete_playlist(&self, id: Uuid) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute("DELETE FROM playlists WHERE id = ?", [&id_str])?; Ok(()) @@ -3646,7 +3840,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("delete_playlist timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_playlist: {}", e)) + PinakesError::Database(format!("delete_playlist: {e}")) })? } @@ -3656,15 +3850,12 @@ impl StorageBackend for SqliteBackend { media_id: MediaId, position: i32, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let playlist_id_str = playlist_id.to_string(); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let now = chrono::Utc::now(); db.execute( @@ -3678,7 +3869,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("add_to_playlist timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("add_to_playlist: {}", e)) + PinakesError::Database(format!("add_to_playlist: {e}")) })? } @@ -3687,15 +3878,12 @@ impl StorageBackend for SqliteBackend { playlist_id: Uuid, media_id: MediaId, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let playlist_id_str = playlist_id.to_string(); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "DELETE FROM playlist_items WHERE playlist_id = ? AND media_id = ?", @@ -3709,7 +3897,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("remove_from_playlist timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("remove_from_playlist: {}", e)) + PinakesError::Database(format!("remove_from_playlist: {e}")) })? } @@ -3717,14 +3905,11 @@ impl StorageBackend for SqliteBackend { &self, playlist_id: Uuid, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let playlist_id_str = playlist_id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \ @@ -3735,7 +3920,7 @@ impl StorageBackend for SqliteBackend { )?; let mut items: Vec = stmt .query_map([&playlist_id_str], row_to_media_item)? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); load_custom_fields_batch(&db, &mut items)?; Ok(items) @@ -3746,7 +3931,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_playlist_items timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_playlist_items: {}", e)) + PinakesError::Database(format!("get_playlist_items: {e}")) })? } @@ -3756,15 +3941,12 @@ impl StorageBackend for SqliteBackend { media_id: MediaId, new_position: i32, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let playlist_id_str = playlist_id.to_string(); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "UPDATE playlist_items SET position = ? WHERE playlist_id = ? AND \ @@ -3777,7 +3959,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("reorder_playlist timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("reorder_playlist: {}", e)) + PinakesError::Database(format!("reorder_playlist: {e}")) })? } @@ -3785,7 +3967,7 @@ impl StorageBackend for SqliteBackend { &self, event: &crate::analytics::UsageEvent, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = event.id.to_string(); let media_id_str = event.media_id.map(|m| m.0.to_string()); let user_id_str = event.user_id.map(|u| u.0.to_string()); @@ -3795,10 +3977,7 @@ impl StorageBackend for SqliteBackend { let context = event.context_json.clone(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "INSERT INTO usage_events (id, media_id, user_id, event_type, \ @@ -3821,7 +4000,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("record_usage_event timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("record_usage_event: {}", e)) + PinakesError::Database(format!("record_usage_event: {e}")) })? } @@ -3831,13 +4010,10 @@ impl StorageBackend for SqliteBackend { user_id: Option, limit: u64, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut conditions = Vec::new(); let mut sql_params: Vec> = Vec::new(); @@ -3854,15 +4030,15 @@ impl StorageBackend for SqliteBackend { } else { format!("WHERE {}", conditions.join(" AND ")) }; - sql_params.push(Box::new(limit as i64)); + sql_params.push(Box::new(limit.cast_signed())); let sql = format!( "SELECT id, media_id, user_id, event_type, timestamp, duration_secs, \ - context_json FROM usage_events {} ORDER BY timestamp DESC LIMIT ?", - where_clause + context_json FROM usage_events {where_clause} ORDER BY timestamp \ + DESC LIMIT ?" ); let mut stmt = db.prepare(&sql)?; let param_refs: Vec<&dyn rusqlite::ToSql> = - sql_params.iter().map(|p| p.as_ref()).collect(); + sql_params.iter().map(std::convert::AsRef::as_ref).collect(); let events = stmt .query_map(param_refs.as_slice(), |row| { let id_str: String = row.get(0)?; @@ -3886,7 +4062,7 @@ impl StorageBackend for SqliteBackend { context_json: row.get(6)?, }) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); Ok(events) }); @@ -3894,18 +4070,15 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("get_usage_events timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_usage_events: {}", e)) + PinakesError::Database(format!("get_usage_events: {e}")) })? } async fn get_most_viewed(&self, limit: u64) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \ @@ -3916,12 +4089,12 @@ impl StorageBackend for SqliteBackend { ('view', 'play') GROUP BY m.id ORDER BY view_count DESC LIMIT ?", )?; let mut items: Vec<(MediaItem, u64)> = stmt - .query_map([limit as i64], |row| { + .query_map([limit.cast_signed()], |row| { let item = row_to_media_item(row)?; let count: i64 = row.get(16)?; - Ok((item, count as u64)) + Ok((item, count.cast_unsigned())) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); // Load custom fields for each item let mut media_items: Vec = @@ -3936,7 +4109,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("get_most_viewed timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_most_viewed: {}", e)) + PinakesError::Database(format!("get_most_viewed: {e}")) })? } @@ -3945,14 +4118,11 @@ impl StorageBackend for SqliteBackend { user_id: crate::users::UserId, limit: u64, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, \ @@ -3963,8 +4133,11 @@ impl StorageBackend for SqliteBackend { 'play') GROUP BY m.id ORDER BY MAX(ue.timestamp) DESC LIMIT ?", )?; let mut items: Vec = stmt - .query_map(params![&user_id_str, limit as i64], row_to_media_item)? - .filter_map(|r| r.ok()) + .query_map( + params![&user_id_str, limit.cast_signed()], + row_to_media_item, + )? + .filter_map(std::result::Result::ok) .collect(); load_custom_fields_batch(&db, &mut items)?; Ok(items) @@ -3975,7 +4148,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_recently_viewed timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_recently_viewed: {}", e)) + PinakesError::Database(format!("get_recently_viewed: {e}")) })? } @@ -3985,15 +4158,12 @@ impl StorageBackend for SqliteBackend { media_id: MediaId, progress_secs: f64, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let id = Uuid::now_v7().to_string(); let now = chrono::Utc::now(); @@ -4018,7 +4188,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("update_watch_progress timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("update_watch_progress: {}", e)) + PinakesError::Database(format!("update_watch_progress: {e}")) })? } @@ -4027,15 +4197,12 @@ impl StorageBackend for SqliteBackend { user_id: crate::users::UserId, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.0.to_string(); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let result = db .query_row( @@ -4053,7 +4220,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_watch_progress timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_watch_progress: {}", e)) + PinakesError::Database(format!("get_watch_progress: {e}")) })? } @@ -4061,14 +4228,11 @@ impl StorageBackend for SqliteBackend { &self, before: chrono::DateTime, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let before_str = before.to_rfc3339(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let affected = db .execute("DELETE FROM usage_events WHERE timestamp < ?", [ @@ -4082,7 +4246,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("cleanup_old_events timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("cleanup_old_events: {}", e)) + PinakesError::Database(format!("cleanup_old_events: {e}")) })? } @@ -4090,7 +4254,7 @@ impl StorageBackend for SqliteBackend { &self, subtitle: &crate::subtitles::Subtitle, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = subtitle.id.to_string(); let media_id_str = subtitle.media_id.0.to_string(); let language = subtitle.language.clone(); @@ -4100,15 +4264,14 @@ impl StorageBackend for SqliteBackend { .as_ref() .map(|p| p.to_string_lossy().to_string()); let is_embedded = subtitle.is_embedded; - let track_index = subtitle.track_index.map(|i| i as i64); + let track_index = subtitle + .track_index + .map(|i| i64::try_from(i).unwrap_or(i64::MAX)); let offset_ms = subtitle.offset_ms; let now = subtitle.created_at.to_rfc3339(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "INSERT INTO subtitles (id, media_id, language, format, file_path, \ @@ -4120,7 +4283,7 @@ impl StorageBackend for SqliteBackend { &language, &format, &file_path, - is_embedded as i32, + i32::from(is_embedded), &track_index, offset_ms, &now @@ -4132,7 +4295,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("add_subtitle timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("add_subtitle: {}", e)) + PinakesError::Database(format!("add_subtitle: {e}")) })? } @@ -4140,14 +4303,11 @@ impl StorageBackend for SqliteBackend { &self, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT id, media_id, language, format, file_path, is_embedded, \ @@ -4170,12 +4330,14 @@ impl StorageBackend for SqliteBackend { .get::<_, Option>(4)? .map(std::path::PathBuf::from), is_embedded: row.get::<_, i32>(5)? != 0, - track_index: row.get::<_, Option>(6)?.map(|i| i as usize), + track_index: row + .get::<_, Option>(6)? + .map(|i| usize::try_from(i).unwrap_or(0)), offset_ms: row.get(7)?, created_at: parse_datetime(&created_str), }) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); Ok(subtitles) }); @@ -4185,19 +4347,16 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_media_subtitles timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_media_subtitles: {}", e)) + PinakesError::Database(format!("get_media_subtitles: {e}")) })? } async fn delete_subtitle(&self, id: Uuid) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute("DELETE FROM subtitles WHERE id = ?", [&id_str])?; Ok(()) @@ -4206,7 +4365,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("delete_subtitle timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_subtitle: {}", e)) + PinakesError::Database(format!("delete_subtitle: {e}")) })? } @@ -4215,14 +4374,11 @@ impl StorageBackend for SqliteBackend { id: Uuid, offset_ms: i64, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute("UPDATE subtitles SET offset_ms = ? WHERE id = ?", params![ offset_ms, &id_str @@ -4235,7 +4391,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("update_subtitle_offset timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("update_subtitle_offset: {}", e)) + PinakesError::Database(format!("update_subtitle_offset: {e}")) })? } @@ -4243,7 +4399,7 @@ impl StorageBackend for SqliteBackend { &self, meta: &crate::enrichment::ExternalMetadata, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = meta.id.to_string(); let media_id_str = meta.media_id.0.to_string(); let source = meta.source.to_string(); @@ -4253,10 +4409,7 @@ impl StorageBackend for SqliteBackend { let last_updated = meta.last_updated.to_rfc3339(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "INSERT OR REPLACE INTO external_metadata (id, media_id, source, \ @@ -4280,7 +4433,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("store_external_metadata timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("store_external_metadata: {}", e)) + PinakesError::Database(format!("store_external_metadata: {e}")) })? } @@ -4288,14 +4441,11 @@ impl StorageBackend for SqliteBackend { &self, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let mut stmt = db.prepare( "SELECT id, media_id, source, external_id, metadata_json, confidence, \ @@ -4319,7 +4469,7 @@ impl StorageBackend for SqliteBackend { last_updated: parse_datetime(&updated_str), }) })? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); Ok(metas) }); @@ -4329,19 +4479,16 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_external_metadata timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_external_metadata: {}", e)) + PinakesError::Database(format!("get_external_metadata: {e}")) })? } async fn delete_external_metadata(&self, id: Uuid) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute("DELETE FROM external_metadata WHERE id = ?", [&id_str])?; Ok(()) @@ -4352,7 +4499,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("delete_external_metadata timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_external_metadata: {}", e)) + PinakesError::Database(format!("delete_external_metadata: {e}")) })? } @@ -4360,7 +4507,7 @@ impl StorageBackend for SqliteBackend { &self, session: &crate::transcode::TranscodeSession, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = session.id.to_string(); let media_id_str = session.media_id.0.to_string(); let user_id_str = session.user_id.map(|u| u.0.to_string()); @@ -4373,10 +4520,7 @@ impl StorageBackend for SqliteBackend { let expires_at = session.expires_at.map(|dt| dt.to_rfc3339()); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "INSERT INTO transcode_sessions (id, media_id, user_id, profile, \ @@ -4403,7 +4547,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("create_transcode_session timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("create_transcode_session: {}", e)) + PinakesError::Database(format!("create_transcode_session: {e}")) })? } @@ -4411,14 +4555,11 @@ impl StorageBackend for SqliteBackend { &self, id: Uuid, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.query_row( "SELECT id, media_id, user_id, profile, cache_path, status, progress, \ @@ -4468,7 +4609,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_transcode_session timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_transcode_session: {}", e)) + PinakesError::Database(format!("get_transcode_session: {e}")) })? } @@ -4476,25 +4617,13 @@ impl StorageBackend for SqliteBackend { &self, media_id: Option, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; - let (sql, param) = match media_id { - Some(mid) => { - ( - "SELECT id, media_id, user_id, profile, cache_path, status, \ - progress, error_message, created_at, expires_at FROM \ - transcode_sessions WHERE media_id = ? ORDER BY created_at DESC" - .to_string(), - Some(mid.0.to_string()), - ) - }, - None => { + let (sql, param) = media_id.map_or_else( + || { ( "SELECT id, media_id, user_id, profile, cache_path, status, \ progress, error_message, created_at, expires_at FROM \ @@ -4503,7 +4632,16 @@ impl StorageBackend for SqliteBackend { None, ) }, - }; + |mid| { + ( + "SELECT id, media_id, user_id, profile, cache_path, status, \ + progress, error_message, created_at, expires_at FROM \ + transcode_sessions WHERE media_id = ? ORDER BY created_at DESC" + .to_string(), + Some(mid.0.to_string()), + ) + }, + ); let mut stmt = db.prepare(&sql)?; let parse_row = |row: &Row| -> rusqlite::Result { @@ -4536,12 +4674,12 @@ impl StorageBackend for SqliteBackend { let sessions: Vec<_> = if let Some(ref p) = param { stmt .query_map([p], parse_row)? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect() } else { stmt .query_map([], parse_row)? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect() }; Ok(sessions) @@ -4552,7 +4690,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("list_transcode_sessions timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("list_transcode_sessions: {}", e)) + PinakesError::Database(format!("list_transcode_sessions: {e}")) })? } @@ -4562,16 +4700,13 @@ impl StorageBackend for SqliteBackend { status: crate::transcode::TranscodeStatus, progress: f32, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.to_string(); let status_str = status.as_str().to_string(); let error_message = status.error_message().map(String::from); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "UPDATE transcode_sessions SET status = ?, progress = ?, \ @@ -4586,7 +4721,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("update_transcode_status timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("update_transcode_status: {}", e)) + PinakesError::Database(format!("update_transcode_status: {e}")) })? } @@ -4594,14 +4729,11 @@ impl StorageBackend for SqliteBackend { &self, before: chrono::DateTime, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let before_str = before.to_rfc3339(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let affected = db.execute( "DELETE FROM transcode_sessions WHERE expires_at IS NOT NULL AND \ @@ -4616,7 +4748,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("cleanup_expired_transcodes timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("cleanup_expired_transcodes: {}", e)) + PinakesError::Database(format!("cleanup_expired_transcodes: {e}")) })? } @@ -4624,7 +4756,7 @@ impl StorageBackend for SqliteBackend { &self, session: &crate::storage::SessionData, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let session_token = session.session_token.clone(); let user_id = session.user_id.clone(); let username = session.username.clone(); @@ -4635,10 +4767,7 @@ impl StorageBackend for SqliteBackend { let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "INSERT INTO sessions (session_token, user_id, username, role, \ @@ -4660,7 +4789,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("create_session timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("create_session: {}", e)) + PinakesError::Database(format!("create_session: {e}")) })? } @@ -4668,15 +4797,12 @@ impl StorageBackend for SqliteBackend { &self, session_token: &str, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let token = session_token.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let result = db @@ -4727,21 +4853,18 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("get_session timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_session: {}", e)) + PinakesError::Database(format!("get_session: {e}")) })? } async fn touch_session(&self, session_token: &str) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let token = session_token.to_string(); let now = chrono::Utc::now().to_rfc3339(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute( "UPDATE sessions SET last_accessed = ? WHERE session_token = ?", @@ -4753,20 +4876,50 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("touch_session timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("touch_session: {}", e)) + PinakesError::Database(format!("touch_session: {e}")) + })? + } + + async fn extend_session( + &self, + session_token: &str, + new_expires_at: chrono::DateTime, + ) -> Result>> { + let conn = Arc::clone(&self.conn); + let token = session_token.to_string(); + let expires = new_expires_at.to_rfc3339(); + let now = chrono::Utc::now().to_rfc3339(); + + let fut = tokio::task::spawn_blocking(move || { + let db = conn.lock().map_err(|e| { + PinakesError::Database(format!("failed to acquire database lock: {e}")) + })?; + let rows = db.execute( + "UPDATE sessions SET expires_at = ?, last_accessed = ? WHERE \ + session_token = ? AND expires_at > datetime('now')", + params![&expires, &now, &token], + )?; + if rows > 0 { + Ok(Some(new_expires_at)) + } else { + Ok(None) + } + }); + tokio::time::timeout(std::time::Duration::from_secs(10), fut) + .await + .map_err(|_| PinakesError::Database("extend_session timed out".into()))? + .map_err(|e: tokio::task::JoinError| { + PinakesError::Database(format!("extend_session: {e}")) })? } async fn delete_session(&self, session_token: &str) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let token = session_token.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; db.execute("DELETE FROM sessions WHERE session_token = ?", [&token])?; Ok(()) @@ -4775,20 +4928,17 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("delete_session timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_session: {}", e)) + PinakesError::Database(format!("delete_session: {e}")) })? } async fn delete_user_sessions(&self, username: &str) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user = username.to_string(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let affected = db.execute("DELETE FROM sessions WHERE username = ?", [&user])?; @@ -4800,20 +4950,17 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("delete_user_sessions timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_user_sessions: {}", e)) + PinakesError::Database(format!("delete_user_sessions: {e}")) })? } async fn delete_expired_sessions(&self) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let now = chrono::Utc::now().to_rfc3339(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let affected = db.execute("DELETE FROM sessions WHERE expires_at < ?", [&now])?; @@ -4825,7 +4972,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("delete_expired_sessions timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("delete_expired_sessions: {}", e)) + PinakesError::Database(format!("delete_expired_sessions: {e}")) })? } @@ -4833,16 +4980,13 @@ impl StorageBackend for SqliteBackend { &self, username: Option<&str>, ) -> Result> { - let conn = self.conn.clone(); - let user_filter = username.map(|s| s.to_string()); + let conn = Arc::clone(&self.conn); + let user_filter = username.map(std::string::ToString::to_string); let now = chrono::Utc::now().to_rfc3339(); let fut = tokio::task::spawn_blocking(move || { let db = conn.lock().map_err(|e| { - PinakesError::Database(format!( - "failed to acquire database lock: {}", - e - )) + PinakesError::Database(format!("failed to acquire database lock: {e}")) })?; let (query, params): (&str, Vec) = if let Some(user) = user_filter @@ -4893,7 +5037,7 @@ impl StorageBackend for SqliteBackend { rows .collect::, _>>() - .map_err(|e| e.into()) + .map_err(std::convert::Into::into) }); tokio::time::timeout(std::time::Duration::from_secs(10), fut) .await @@ -4901,7 +5045,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("list_active_sessions timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("list_active_sessions: {}", e)) + PinakesError::Database(format!("list_active_sessions: {e}")) })? } @@ -4911,7 +5055,7 @@ impl StorageBackend for SqliteBackend { &self, metadata: &crate::model::BookMetadata, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = metadata.media_id.to_string(); let isbn = metadata.isbn.clone(); let isbn13 = metadata.isbn13.clone(); @@ -4926,7 +5070,9 @@ impl StorageBackend for SqliteBackend { let identifiers = metadata.identifiers.clone(); let fut = tokio::task::spawn_blocking(move || { - let mut conn = conn.lock().expect("connection mutex not poisoned"); + let mut conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let tx = conn.transaction()?; // Upsert book_metadata @@ -5000,7 +5146,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("upsert_book_metadata timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("upsert_book_metadata: {}", e)) + PinakesError::Database(format!("upsert_book_metadata: {e}")) })??; Ok(()) } @@ -5009,11 +5155,13 @@ impl StorageBackend for SqliteBackend { &self, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.to_string(); let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; // Get base book metadata let metadata_row = conn @@ -5124,7 +5272,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_book_metadata timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_book_metadata: {}", e)) + PinakesError::Database(format!("get_book_metadata: {e}")) })??, ) } @@ -5134,12 +5282,14 @@ impl StorageBackend for SqliteBackend { media_id: MediaId, author: &crate::model::AuthorInfo, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.to_string(); let author_clone = author.clone(); let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO book_authors (media_id, author_name, author_sort, role, \ position) @@ -5161,7 +5311,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("add_book_author timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("add_book_author: {}", e)) + PinakesError::Database(format!("add_book_author: {e}")) })??; Ok(()) } @@ -5170,11 +5320,13 @@ impl StorageBackend for SqliteBackend { &self, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.to_string(); let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT author_name, author_sort, role, position FROM book_authors WHERE media_id = ?1 ORDER BY position", @@ -5199,7 +5351,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_book_authors timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_book_authors: {}", e)) + PinakesError::Database(format!("get_book_authors: {e}")) })??, ) } @@ -5208,12 +5360,14 @@ impl StorageBackend for SqliteBackend { &self, pagination: &Pagination, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let offset = pagination.offset; let limit = pagination.limit; let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT author_name, COUNT(DISTINCT media_id) as book_count FROM book_authors @@ -5222,8 +5376,8 @@ impl StorageBackend for SqliteBackend { LIMIT ?1 OFFSET ?2", )?; let authors: Vec<(String, u64)> = stmt - .query_map([limit as i64, offset as i64], |row| { - Ok((row.get(0)?, row.get::<_, i64>(1)? as u64)) + .query_map([limit.cast_signed(), offset.cast_signed()], |row| { + Ok((row.get(0)?, row.get::<_, i64>(1)?.cast_unsigned())) })? .collect::>>()?; Ok::<_, PinakesError>(authors) @@ -5236,16 +5390,18 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("list_all_authors timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("list_all_authors: {}", e)) + PinakesError::Database(format!("list_all_authors: {e}")) })??, ) } async fn list_series(&self) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT series_name, COUNT(*) as book_count FROM book_metadata @@ -5254,7 +5410,9 @@ impl StorageBackend for SqliteBackend { ORDER BY series_name", )?; let series: Vec<(String, u64)> = stmt - .query_map([], |row| Ok((row.get(0)?, row.get::<_, i64>(1)? as u64)))? + .query_map([], |row| { + Ok((row.get(0)?, row.get::<_, i64>(1)?.cast_unsigned())) + })? .collect::>>()?; Ok::<_, PinakesError>(series) }); @@ -5264,7 +5422,7 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("list_series timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("list_series: {}", e)) + PinakesError::Database(format!("list_series: {e}")) })??, ) } @@ -5273,11 +5431,13 @@ impl StorageBackend for SqliteBackend { &self, series_name: &str, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let series = series_name.to_string(); let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT m.id, m.path, m.file_name, m.media_type, m.content_hash, m.file_size, m.title, m.artist, m.album, m.genre, \ @@ -5303,7 +5463,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_series_books timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_series_books: {}", e)) + PinakesError::Database(format!("get_series_books: {e}")) })??, ) } @@ -5315,19 +5475,21 @@ impl StorageBackend for SqliteBackend { current_page: i32, ) -> Result<()> { // Reuse watch_history table: progress_secs stores current page for books - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.to_string(); let media_id_str = media_id.to_string(); let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO watch_history (user_id, media_id, progress_secs, \ - last_watched_at) + last_watched) VALUES (?1, ?2, ?3, datetime('now')) ON CONFLICT(user_id, media_id) DO UPDATE SET - progress_secs = ?3, last_watched_at = datetime('now')", - rusqlite::params![user_id_str, media_id_str, current_page as f64], + progress_secs = ?3, last_watched = datetime('now')", + rusqlite::params![user_id_str, media_id_str, f64::from(current_page)], )?; Ok::<_, PinakesError>(()) }); @@ -5338,7 +5500,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("update_reading_progress timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("update_reading_progress: {}", e)) + PinakesError::Database(format!("update_reading_progress: {e}")) })??; Ok(()) } @@ -5348,21 +5510,25 @@ impl StorageBackend for SqliteBackend { user_id: uuid::Uuid, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.to_string(); let media_id_str = media_id.to_string(); let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let result = conn .query_row( - "SELECT wh.progress_secs, bm.page_count, wh.last_watched_at + "SELECT wh.progress_secs, bm.page_count, wh.last_watched FROM watch_history wh LEFT JOIN book_metadata bm ON wh.media_id = bm.media_id WHERE wh.user_id = ?1 AND wh.media_id = ?2", [&user_id_str, &media_id_str], |row| { - let current_page = row.get::<_, f64>(0)? as i32; + let current_page = row + .get::<_, i64>(0) + .map(|v| i32::try_from(v).unwrap_or(0))?; let total_pages = row.get::<_, Option>(1)?; let last_read_str = row.get::<_, String>(2)?; Ok((current_page, total_pages, last_read_str)) @@ -5385,15 +5551,13 @@ impl StorageBackend for SqliteBackend { user_id, current_page, total_pages, - progress_percent: if let Some(total) = total_pages { + progress_percent: total_pages.map_or(0.0, |total| { if total > 0 { - (current_page as f64 / total as f64 * 100.0).min(100.0) + (f64::from(current_page) / f64::from(total) * 100.0).min(100.0) } else { 0.0 } - } else { - 0.0 - }, + }), last_read_at, }) }, @@ -5409,7 +5573,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_reading_progress timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_reading_progress: {}", e)) + PinakesError::Database(format!("get_reading_progress: {e}")) })??, ) } @@ -5419,11 +5583,13 @@ impl StorageBackend for SqliteBackend { user_id: uuid::Uuid, status: Option, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let user_id_str = user_id.to_string(); let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; // Query books with reading progress for this user // Join with book_metadata to get page counts and media_items for the @@ -5434,16 +5600,18 @@ impl StorageBackend for SqliteBackend { INNER JOIN watch_history wh ON m.id = wh.media_id LEFT JOIN book_metadata bm ON m.id = bm.media_id WHERE wh.user_id = ?1 - ORDER BY wh.last_watched_at DESC", + ORDER BY wh.last_watched DESC", )?; let rows = stmt.query_map([&user_id_str], |row| { // Parse the media item let item = row_to_media_item(row)?; - // Get progress info (after all MediaItem columns) - let col_offset = 27; // MediaItem has ~27 columns - let current_page = row.get::<_, f64>(col_offset)? as i32; - let total_pages = row.get::<_, Option>(col_offset + 1)?; + // Read the extra columns by name, this is safe *regardless* of column + // count. + let current_page = row + .get::<_, Option>("progress_secs")? + .map_or(0, |v| i32::try_from(v).unwrap_or(0)); + let total_pages = row.get::<_, Option>("page_count")?; Ok((item, current_page, total_pages)) })?; @@ -5452,24 +5620,26 @@ impl StorageBackend for SqliteBackend { match row { Ok((item, current_page, total_pages)) => { // Calculate status based on progress - let calculated_status = if let Some(total) = total_pages { - if total > 0 { - let percent = - (current_page as f64 / total as f64 * 100.0).min(100.0); - if percent >= 100.0 { - crate::model::ReadingStatus::Completed - } else if percent > 0.0 { - crate::model::ReadingStatus::Reading - } else { - crate::model::ReadingStatus::ToRead - } - } else { - crate::model::ReadingStatus::Reading - } - } else { + let calculated_status = total_pages.map_or( // No total pages known, assume reading - crate::model::ReadingStatus::Reading - }; + crate::model::ReadingStatus::Reading, + |total| { + if total > 0 { + let percent = (f64::from(current_page) / f64::from(total) + * 100.0) + .min(100.0); + if percent >= 100.0 { + crate::model::ReadingStatus::Completed + } else if percent > 0.0 { + crate::model::ReadingStatus::Reading + } else { + crate::model::ReadingStatus::ToRead + } + } else { + crate::model::ReadingStatus::Reading + } + }, + ); // Filter by status if specified match status { @@ -5491,7 +5661,7 @@ impl StorageBackend for SqliteBackend { PinakesError::Database("get_reading_list timed out".into()) })? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("get_reading_list: {}", e)) + PinakesError::Database(format!("get_reading_list: {e}")) })??, ) } @@ -5506,7 +5676,7 @@ impl StorageBackend for SqliteBackend { language: Option<&str>, pagination: &Pagination, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let isbn = isbn.map(String::from); let author = author.map(String::from); let series = series.map(String::from); @@ -5516,7 +5686,9 @@ impl StorageBackend for SqliteBackend { let limit = pagination.limit; let fut = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut query = String::from( "SELECT DISTINCT m.id, m.path, m.file_name, m.media_type, \ @@ -5541,15 +5713,15 @@ impl StorageBackend for SqliteBackend { if let Some(ref a) = author { query.push_str(" INNER JOIN book_authors ba ON m.id = ba.media_id"); conditions.push("ba.author_name LIKE ?"); - params.push(Box::new(format!("%{}%", a))); + params.push(Box::new(format!("%{a}%"))); } if let Some(ref s) = series { conditions.push("bm.series_name LIKE ?"); - params.push(Box::new(format!("%{}%", s))); + params.push(Box::new(format!("%{s}%"))); } if let Some(ref p) = publisher { conditions.push("bm.publisher LIKE ?"); - params.push(Box::new(format!("%{}%", p))); + params.push(Box::new(format!("%{p}%"))); } if let Some(ref l) = language { conditions.push("bm.language = ?"); @@ -5562,11 +5734,11 @@ impl StorageBackend for SqliteBackend { } query.push_str(" ORDER BY m.title LIMIT ? OFFSET ?"); - params.push(Box::new(limit as i64)); - params.push(Box::new(offset as i64)); + params.push(Box::new(limit.cast_signed())); + params.push(Box::new(offset.cast_signed())); let params_refs: Vec<&dyn rusqlite::ToSql> = - params.iter().map(|p| p.as_ref()).collect(); + params.iter().map(std::convert::AsRef::as_ref).collect(); let mut stmt = conn.prepare(&query)?; let items = stmt @@ -5580,16 +5752,18 @@ impl StorageBackend for SqliteBackend { .await .map_err(|_| PinakesError::Database("search_books timed out".into()))? .map_err(|e: tokio::task::JoinError| { - PinakesError::Database(format!("search_books: {}", e)) + PinakesError::Database(format!("search_books: {e}")) })??, ) } async fn insert_managed_media(&self, item: &MediaItem) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let item = item.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO media_items (id, path, file_name, media_type, \ content_hash, file_size, @@ -5605,7 +5779,7 @@ impl StorageBackend for SqliteBackend { item.file_name, media_type_to_str(&item.media_type), item.content_hash.0, - item.file_size as i64, + item.file_size.cast_signed(), item.title, item.artist, item.album, @@ -5629,7 +5803,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("insert_managed_media: {}", e)) + PinakesError::Database(format!("insert_managed_media: {e}")) })??; Ok(()) } @@ -5640,13 +5814,15 @@ impl StorageBackend for SqliteBackend { size: u64, mime_type: &str, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let hash_str = hash.0.clone(); let mime = mime_type.to_string(); let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; // Try to get existing blob let existing = conn @@ -5658,9 +5834,9 @@ impl StorageBackend for SqliteBackend { |row| { Ok(ManagedBlob { content_hash: ContentHash(row.get::<_, String>(0)?), - file_size: row.get::<_, i64>(1)? as u64, + file_size: row.get::<_, i64>(1)?.cast_unsigned(), mime_type: row.get(2)?, - reference_count: row.get::<_, i32>(3)? as u32, + reference_count: row.get::<_, i32>(3)?.cast_unsigned(), stored_at: parse_datetime(&row.get::<_, String>(4)?), last_verified: row .get::<_, Option>(5)? @@ -5679,7 +5855,7 @@ impl StorageBackend for SqliteBackend { "INSERT INTO managed_blobs (content_hash, file_size, mime_type, \ reference_count, stored_at) VALUES (?1, ?2, ?3, 1, ?4)", - params![&hash_str, size as i64, &mime, &now], + params![&hash_str, size.cast_signed(), &mime, &now], )?; Ok(ManagedBlob { @@ -5692,15 +5868,17 @@ impl StorageBackend for SqliteBackend { }) }) .await - .map_err(|e| PinakesError::Database(format!("get_or_create_blob: {}", e)))? + .map_err(|e| PinakesError::Database(format!("get_or_create_blob: {e}")))? } async fn get_blob(&self, hash: &ContentHash) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let hash_str = hash.0.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn .query_row( "SELECT content_hash, file_size, mime_type, reference_count, \ @@ -5710,9 +5888,9 @@ impl StorageBackend for SqliteBackend { |row| { Ok(ManagedBlob { content_hash: ContentHash(row.get::<_, String>(0)?), - file_size: row.get::<_, i64>(1)? as u64, + file_size: row.get::<_, i64>(1)?.cast_unsigned(), mime_type: row.get(2)?, - reference_count: row.get::<_, i32>(3)? as u32, + reference_count: row.get::<_, i32>(3)?.cast_unsigned(), stored_at: parse_datetime(&row.get::<_, String>(4)?), last_verified: row .get::<_, Option>(5)? @@ -5721,18 +5899,20 @@ impl StorageBackend for SqliteBackend { }, ) .optional() + .map_err(|e| PinakesError::Database(format!("get_blob query: {e}"))) }) .await - .map_err(|e| PinakesError::Database(format!("get_blob: {}", e)))? - .map_err(|e| PinakesError::Database(format!("get_blob query: {}", e))) + .map_err(|e| PinakesError::Database(format!("get_blob: {e}")))? } async fn increment_blob_ref(&self, hash: &ContentHash) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let hash_str = hash.0.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE managed_blobs SET reference_count = reference_count + 1 WHERE \ content_hash = ?1", @@ -5742,17 +5922,19 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("increment_blob_ref: {}", e)) + PinakesError::Database(format!("increment_blob_ref: {e}")) })??; Ok(()) } async fn decrement_blob_ref(&self, hash: &ContentHash) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let hash_str = hash.0.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE managed_blobs SET reference_count = reference_count - 1 WHERE \ content_hash = ?1", @@ -5766,24 +5948,28 @@ impl StorageBackend for SqliteBackend { params![&hash_str], |row| row.get(0), ) - .unwrap_or(0); + .map_err(|e| { + PinakesError::Database(format!("decrement_blob_ref read: {e}")) + })?; Ok::<_, PinakesError>(count <= 0) }) .await - .map_err(|e| PinakesError::Database(format!("decrement_blob_ref: {}", e)))? + .map_err(|e| PinakesError::Database(format!("decrement_blob_ref: {e}")))? .map_err(|e| { - PinakesError::Database(format!("decrement_blob_ref query: {}", e)) + PinakesError::Database(format!("decrement_blob_ref query: {e}")) }) } async fn update_blob_verified(&self, hash: &ContentHash) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let hash_str = hash.0.clone(); let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE managed_blobs SET last_verified = ?1 WHERE content_hash = ?2", params![&now, &hash_str], @@ -5792,16 +5978,18 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("update_blob_verified: {}", e)) + PinakesError::Database(format!("update_blob_verified: {e}")) })??; Ok(()) } async fn list_orphaned_blobs(&self) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT content_hash, file_size, mime_type, reference_count, \ stored_at, last_verified @@ -5811,9 +5999,9 @@ impl StorageBackend for SqliteBackend { .query_map([], |row| { Ok(ManagedBlob { content_hash: ContentHash(row.get::<_, String>(0)?), - file_size: row.get::<_, i64>(1)? as u64, + file_size: row.get::<_, i64>(1)?.cast_unsigned(), mime_type: row.get(2)?, - reference_count: row.get::<_, i32>(3)? as u32, + reference_count: row.get::<_, i32>(3)?.cast_unsigned(), stored_at: parse_datetime(&row.get::<_, String>(4)?), last_verified: row .get::<_, Option>(5)? @@ -5824,18 +6012,20 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(blobs) }) .await - .map_err(|e| PinakesError::Database(format!("list_orphaned_blobs: {}", e)))? + .map_err(|e| PinakesError::Database(format!("list_orphaned_blobs: {e}")))? .map_err(|e| { - PinakesError::Database(format!("list_orphaned_blobs query: {}", e)) + PinakesError::Database(format!("list_orphaned_blobs query: {e}")) }) } async fn delete_blob(&self, hash: &ContentHash) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let hash_str = hash.0.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "DELETE FROM managed_blobs WHERE content_hash = ?1", params![&hash_str], @@ -5843,48 +6033,64 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(()) }) .await - .map_err(|e| PinakesError::Database(format!("delete_blob: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("delete_blob: {e}")))??; Ok(()) } async fn managed_storage_stats(&self) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; - let total_blobs: u64 = - conn.query_row("SELECT COUNT(*) FROM managed_blobs", [], |row| { + let total_blobs: u64 = conn + .query_row("SELECT COUNT(*) FROM managed_blobs", [], |row| { row.get::<_, i64>(0) - })? as u64; + })? + .cast_unsigned(); - let total_size: u64 = conn.query_row( - "SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs", - [], - |row| row.get::<_, i64>(0), - )? as u64; + let total_size: u64 = conn + .query_row( + "SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs", + [], + |row| row.get::<_, i64>(0), + )? + .cast_unsigned(); - let unique_size: u64 = conn.query_row( - "SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs WHERE \ - reference_count = 1", - [], - |row| row.get::<_, i64>(0), - )? as u64; + let unique_size: u64 = conn + .query_row( + "SELECT COALESCE(SUM(file_size), 0) FROM managed_blobs WHERE \ + reference_count = 1", + [], + |row| row.get::<_, i64>(0), + )? + .cast_unsigned(); - let managed_media_count: u64 = conn.query_row( - "SELECT COUNT(*) FROM media_items WHERE storage_mode = 'managed'", - [], - |row| row.get::<_, i64>(0), - )? as u64; + let managed_media_count: u64 = conn + .query_row( + "SELECT COUNT(*) FROM media_items WHERE storage_mode = 'managed'", + [], + |row| row.get::<_, i64>(0), + )? + .cast_unsigned(); - let orphaned_blobs: u64 = conn.query_row( - "SELECT COUNT(*) FROM managed_blobs WHERE reference_count <= 0", - [], - |row| row.get::<_, i64>(0), - )? as u64; + let orphaned_blobs: u64 = conn + .query_row( + "SELECT COUNT(*) FROM managed_blobs WHERE reference_count <= 0", + [], + |row| row.get::<_, i64>(0), + )? + .cast_unsigned(); let dedup_ratio = if total_size > 0 { - unique_size as f64 / total_size as f64 + // Compute ratio via fixed-point arithmetic to avoid u64->f64 precision + // loss. Uses u128 intermediate to avoid overflow. + let ratio_fixed = + u128::from(unique_size) * (1u128 << 20) / u128::from(total_size); + f64::from(u32::try_from(ratio_fixed).unwrap_or(u32::MAX)) + / f64::from(1u32 << 20) } else { 1.0 }; @@ -5899,11 +6105,9 @@ impl StorageBackend for SqliteBackend { }) }) .await + .map_err(|e| PinakesError::Database(format!("managed_storage_stats: {e}")))? .map_err(|e| { - PinakesError::Database(format!("managed_storage_stats: {}", e)) - })? - .map_err(|e| { - PinakesError::Database(format!("managed_storage_stats query: {}", e)) + PinakesError::Database(format!("managed_storage_stats query: {e}")) }) } @@ -5912,12 +6116,14 @@ impl StorageBackend for SqliteBackend { device: &crate::sync::SyncDevice, token_hash: &str, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let device = device.clone(); let token_hash = token_hash.to_string(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO sync_devices (id, user_id, name, device_type, \ client_version, os_info, @@ -5942,67 +6148,70 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(device) }) .await - .map_err(|e| PinakesError::Database(format!("register_device: {}", e)))? - .map_err(|e| { - PinakesError::Database(format!("register_device query: {}", e)) - }) + .map_err(|e| PinakesError::Database(format!("register_device: {e}")))? + .map_err(|e| PinakesError::Database(format!("register_device query: {e}"))) } async fn get_device( &self, id: crate::sync::DeviceId, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.query_row( - "SELECT id, user_id, name, device_type, client_version, os_info, + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .query_row( + "SELECT id, user_id, name, device_type, client_version, os_info, last_sync_at, last_seen_at, sync_cursor, enabled, \ - created_at, updated_at + created_at, updated_at FROM sync_devices WHERE id = ?1", - params![id.0.to_string()], - |row| { - Ok(crate::sync::SyncDevice { - id: crate::sync::DeviceId(parse_uuid( - &row.get::<_, String>(0)?, - )?), - user_id: crate::users::UserId(parse_uuid( - &row.get::<_, String>(1)?, - )?), - name: row.get(2)?, - device_type: row - .get::<_, String>(3)? - .parse() - .unwrap_or_default(), - client_version: row.get(4)?, - os_info: row.get(5)?, - last_sync_at: row - .get::<_, Option>(6)? - .map(|s| parse_datetime(&s)), - last_seen_at: parse_datetime(&row.get::<_, String>(7)?), - sync_cursor: row.get(8)?, - enabled: row.get(9)?, - created_at: parse_datetime(&row.get::<_, String>(10)?), - updated_at: parse_datetime(&row.get::<_, String>(11)?), - }) - }, - ) + params![id.0.to_string()], + |row| { + Ok(crate::sync::SyncDevice { + id: crate::sync::DeviceId(parse_uuid( + &row.get::<_, String>(0)?, + )?), + user_id: crate::users::UserId(parse_uuid( + &row.get::<_, String>(1)?, + )?), + name: row.get(2)?, + device_type: row + .get::<_, String>(3)? + .parse() + .unwrap_or_default(), + client_version: row.get(4)?, + os_info: row.get(5)?, + last_sync_at: row + .get::<_, Option>(6)? + .map(|s| parse_datetime(&s)), + last_seen_at: parse_datetime(&row.get::<_, String>(7)?), + sync_cursor: row.get(8)?, + enabled: row.get(9)?, + created_at: parse_datetime(&row.get::<_, String>(10)?), + updated_at: parse_datetime(&row.get::<_, String>(11)?), + }) + }, + ) + .map_err(|e| PinakesError::Database(format!("get_device query: {e}"))) }) .await - .map_err(|e| PinakesError::Database(format!("get_device: {}", e)))? - .map_err(|e| PinakesError::Database(format!("get_device query: {}", e))) + .map_err(|e| PinakesError::Database(format!("get_device: {e}")))? } async fn get_device_by_token( &self, token_hash: &str, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let token_hash = token_hash.to_string(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn .query_row( "SELECT id, user_id, name, device_type, client_version, os_info, @@ -6037,22 +6246,24 @@ impl StorageBackend for SqliteBackend { }, ) .optional() + .map_err(|e| { + PinakesError::Database(format!("get_device_by_token query: {e}")) + }) }) .await - .map_err(|e| PinakesError::Database(format!("get_device_by_token: {}", e)))? - .map_err(|e| { - PinakesError::Database(format!("get_device_by_token query: {}", e)) - }) + .map_err(|e| PinakesError::Database(format!("get_device_by_token: {e}")))? } async fn list_user_devices( &self, user_id: crate::users::UserId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, user_id, name, device_type, client_version, os_info, last_sync_at, last_seen_at, sync_cursor, enabled, \ @@ -6090,9 +6301,9 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(devices) }) .await - .map_err(|e| PinakesError::Database(format!("list_user_devices: {}", e)))? + .map_err(|e| PinakesError::Database(format!("list_user_devices: {e}")))? .map_err(|e| { - PinakesError::Database(format!("list_user_devices query: {}", e)) + PinakesError::Database(format!("list_user_devices query: {e}")) }) } @@ -6100,11 +6311,13 @@ impl StorageBackend for SqliteBackend { &self, device: &crate::sync::SyncDevice, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let device = device.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE sync_devices SET name = ?1, device_type = ?2, client_version \ = ?3, @@ -6127,31 +6340,35 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(()) }) .await - .map_err(|e| PinakesError::Database(format!("update_device: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("update_device: {e}")))??; Ok(()) } async fn delete_device(&self, id: crate::sync::DeviceId) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute("DELETE FROM sync_devices WHERE id = ?1", params![ id.0.to_string() ])?; Ok::<_, PinakesError>(()) }) .await - .map_err(|e| PinakesError::Database(format!("delete_device: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("delete_device: {e}")))??; Ok(()) } async fn touch_device(&self, id: crate::sync::DeviceId) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE sync_devices SET last_seen_at = ?1, updated_at = ?1 WHERE id \ = ?2", @@ -6160,7 +6377,7 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(()) }) .await - .map_err(|e| PinakesError::Database(format!("touch_device: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("touch_device: {e}")))??; Ok(()) } @@ -6168,11 +6385,13 @@ impl StorageBackend for SqliteBackend { &self, change: &crate::sync::SyncLogEntry, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let change = change.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; // Get and increment sequence let seq: i64 = conn.query_row( @@ -6194,7 +6413,7 @@ impl StorageBackend for SqliteBackend { change.media_id.map(|m| m.0.to_string()), change.path, change.content_hash.as_ref().map(|h| h.0.clone()), - change.file_size.map(|s| s as i64), + change.file_size.map(u64::cast_signed), change.metadata_json, change.changed_by_device.map(|d| d.0.to_string()), change.timestamp.to_rfc3339(), @@ -6204,7 +6423,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("record_sync_change: {}", e)) + PinakesError::Database(format!("record_sync_change: {e}")) })??; Ok(()) } @@ -6214,17 +6433,19 @@ impl StorageBackend for SqliteBackend { cursor: i64, limit: u64, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, sequence, change_type, media_id, path, content_hash, file_size, metadata_json, changed_by_device, timestamp FROM sync_log WHERE sequence > ?1 ORDER BY sequence LIMIT ?2", )?; let entries = stmt - .query_map(params![cursor, limit as i64], |row| { + .query_map(params![cursor, limit.cast_signed()], |row| { Ok(crate::sync::SyncLogEntry { id: parse_uuid(&row.get::<_, String>(0)?)?, sequence: row.get(1)?, @@ -6239,7 +6460,9 @@ impl StorageBackend for SqliteBackend { content_hash: row .get::<_, Option>(5)? .map(ContentHash), - file_size: row.get::<_, Option>(6)?.map(|s| s as u64), + file_size: row + .get::<_, Option>(6)? + .map(i64::cast_unsigned), metadata_json: row.get(7)?, changed_by_device: row.get::<_, Option>(8)?.and_then(|s| { Uuid::parse_str(&s).ok().map(crate::sync::DeviceId) @@ -6251,50 +6474,54 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(entries) }) .await - .map_err(|e| PinakesError::Database(format!("get_changes_since: {}", e)))? + .map_err(|e| PinakesError::Database(format!("get_changes_since: {e}")))? .map_err(|e| { - PinakesError::Database(format!("get_changes_since query: {}", e)) + PinakesError::Database(format!("get_changes_since query: {e}")) }) } async fn get_current_sync_cursor(&self) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.query_row( - "SELECT current_value FROM sync_sequence WHERE id = 1", - [], - |row| row.get(0), - ) + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .query_row( + "SELECT current_value FROM sync_sequence WHERE id = 1", + [], + |row| row.get(0), + ) + .map_err(|e| { + PinakesError::Database(format!("get_current_sync_cursor query: {e}")) + }) }) .await .map_err(|e| { - PinakesError::Database(format!("get_current_sync_cursor: {}", e)) + PinakesError::Database(format!("get_current_sync_cursor: {e}")) })? - .map_err(|e| { - PinakesError::Database(format!("get_current_sync_cursor query: {}", e)) - }) } async fn cleanup_old_sync_log(&self, before: DateTime) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let before_str = before.to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.execute("DELETE FROM sync_log WHERE timestamp < ?1", params![ - &before_str - ]) + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .execute("DELETE FROM sync_log WHERE timestamp < ?1", params![ + &before_str + ]) + .map(|n| n as u64) + .map_err(|e| { + PinakesError::Database(format!("cleanup_old_sync_log query: {e}")) + }) }) .await - .map_err(|e| { - PinakesError::Database(format!("cleanup_old_sync_log: {}", e)) - })? - .map(|n| n as u64) - .map_err(|e| { - PinakesError::Database(format!("cleanup_old_sync_log query: {}", e)) - }) + .map_err(|e| PinakesError::Database(format!("cleanup_old_sync_log: {e}")))? } async fn get_device_sync_state( @@ -6302,11 +6529,13 @@ impl StorageBackend for SqliteBackend { device_id: crate::sync::DeviceId, path: &str, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let path = path.to_string(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn .query_row( "SELECT device_id, path, local_hash, server_hash, local_mtime, \ @@ -6336,25 +6565,27 @@ impl StorageBackend for SqliteBackend { }, ) .optional() + .map_err(|e| { + PinakesError::Database(format!("get_device_sync_state query: {e}")) + }) }) .await .map_err(|e| { - PinakesError::Database(format!("get_device_sync_state: {}", e)) + PinakesError::Database(format!("get_device_sync_state: {e}")) })? - .map_err(|e| { - PinakesError::Database(format!("get_device_sync_state query: {}", e)) - }) } async fn upsert_device_sync_state( &self, state: &crate::sync::DeviceSyncState, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let state = state.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO device_sync_state (device_id, path, local_hash, \ server_hash, @@ -6385,7 +6616,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("upsert_device_sync_state: {}", e)) + PinakesError::Database(format!("upsert_device_sync_state: {e}")) })??; Ok(()) } @@ -6394,10 +6625,12 @@ impl StorageBackend for SqliteBackend { &self, device_id: crate::sync::DeviceId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT device_id, path, local_hash, server_hash, local_mtime, \ server_mtime, @@ -6431,9 +6664,9 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(states) }) .await - .map_err(|e| PinakesError::Database(format!("list_pending_sync: {}", e)))? + .map_err(|e| PinakesError::Database(format!("list_pending_sync: {e}")))? .map_err(|e| { - PinakesError::Database(format!("list_pending_sync query: {}", e)) + PinakesError::Database(format!("list_pending_sync query: {e}")) }) } @@ -6441,11 +6674,13 @@ impl StorageBackend for SqliteBackend { &self, session: &crate::sync::UploadSession, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let session = session.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO upload_sessions (id, device_id, target_path, \ expected_hash, @@ -6457,9 +6692,9 @@ impl StorageBackend for SqliteBackend { session.device_id.0.to_string(), session.target_path, session.expected_hash.0, - session.expected_size as i64, - session.chunk_size as i64, - session.chunk_count as i64, + session.expected_size.cast_signed(), + session.chunk_size.cast_signed(), + session.chunk_count.cast_signed(), session.status.to_string(), session.created_at.to_rfc3339(), session.expires_at.to_rfc3339(), @@ -6470,7 +6705,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("create_upload_session: {}", e)) + PinakesError::Database(format!("create_upload_session: {e}")) })??; Ok(()) } @@ -6479,55 +6714,60 @@ impl StorageBackend for SqliteBackend { &self, id: Uuid, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.query_row( - "SELECT id, device_id, target_path, expected_hash, expected_size, \ - chunk_size, + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .query_row( + "SELECT id, device_id, target_path, expected_hash, expected_size, \ + chunk_size, chunk_count, status, created_at, expires_at, \ - last_activity + last_activity FROM upload_sessions WHERE id = ?1", - params![id.to_string()], - |row| { - Ok(crate::sync::UploadSession { - id: parse_uuid(&row.get::<_, String>(0)?)?, - device_id: crate::sync::DeviceId(parse_uuid( - &row.get::<_, String>(1)?, - )?), - target_path: row.get(2)?, - expected_hash: ContentHash(row.get(3)?), - expected_size: row.get::<_, i64>(4)? as u64, - chunk_size: row.get::<_, i64>(5)? as u64, - chunk_count: row.get::<_, i64>(6)? as u64, - status: row - .get::<_, String>(7)? - .parse() - .unwrap_or(crate::sync::UploadStatus::Pending), - created_at: parse_datetime(&row.get::<_, String>(8)?), - expires_at: parse_datetime(&row.get::<_, String>(9)?), - last_activity: parse_datetime(&row.get::<_, String>(10)?), - }) - }, - ) + params![id.to_string()], + |row| { + Ok(crate::sync::UploadSession { + id: parse_uuid(&row.get::<_, String>(0)?)?, + device_id: crate::sync::DeviceId(parse_uuid( + &row.get::<_, String>(1)?, + )?), + target_path: row.get(2)?, + expected_hash: ContentHash(row.get(3)?), + expected_size: row.get::<_, i64>(4)?.cast_unsigned(), + chunk_size: row.get::<_, i64>(5)?.cast_unsigned(), + chunk_count: row.get::<_, i64>(6)?.cast_unsigned(), + status: row + .get::<_, String>(7)? + .parse() + .unwrap_or(crate::sync::UploadStatus::Pending), + created_at: parse_datetime(&row.get::<_, String>(8)?), + expires_at: parse_datetime(&row.get::<_, String>(9)?), + last_activity: parse_datetime(&row.get::<_, String>(10)?), + }) + }, + ) + .map_err(|e| { + PinakesError::Database(format!("get_upload_session query: {e}")) + }) }) .await - .map_err(|e| PinakesError::Database(format!("get_upload_session: {}", e)))? - .map_err(|e| { - PinakesError::Database(format!("get_upload_session query: {}", e)) - }) + .map_err(|e| PinakesError::Database(format!("get_upload_session: {e}")))? } async fn update_upload_session( &self, session: &crate::sync::UploadSession, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let session = session.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE upload_sessions SET status = ?1, last_activity = ?2 WHERE id \ = ?3", @@ -6541,7 +6781,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("update_upload_session: {}", e)) + PinakesError::Database(format!("update_upload_session: {e}")) })??; Ok(()) } @@ -6551,11 +6791,13 @@ impl StorageBackend for SqliteBackend { upload_id: Uuid, chunk: &crate::sync::ChunkInfo, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let chunk = chunk.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO upload_chunks (upload_id, chunk_index, offset, size, \ hash, received_at) @@ -6565,9 +6807,9 @@ impl StorageBackend for SqliteBackend { hash = excluded.hash, received_at = excluded.received_at", params![ upload_id.to_string(), - chunk.chunk_index as i64, - chunk.offset as i64, - chunk.size as i64, + chunk.chunk_index.cast_signed(), + chunk.offset.cast_signed(), + chunk.size.cast_signed(), chunk.hash, chunk.received_at.to_rfc3339(), ], @@ -6575,7 +6817,7 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(()) }) .await - .map_err(|e| PinakesError::Database(format!("record_chunk: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("record_chunk: {e}")))??; Ok(()) } @@ -6583,10 +6825,12 @@ impl StorageBackend for SqliteBackend { &self, upload_id: Uuid, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT upload_id, chunk_index, offset, size, hash, received_at FROM upload_chunks WHERE upload_id = ?1 ORDER BY chunk_index", @@ -6595,9 +6839,9 @@ impl StorageBackend for SqliteBackend { .query_map(params![upload_id.to_string()], |row| { Ok(crate::sync::ChunkInfo { upload_id: parse_uuid(&row.get::<_, String>(0)?)?, - chunk_index: row.get::<_, i64>(1)? as u64, - offset: row.get::<_, i64>(2)? as u64, - size: row.get::<_, i64>(3)? as u64, + chunk_index: row.get::<_, i64>(1)?.cast_unsigned(), + offset: row.get::<_, i64>(2)?.cast_unsigned(), + size: row.get::<_, i64>(3)?.cast_unsigned(), hash: row.get(4)?, received_at: parse_datetime(&row.get::<_, String>(5)?), }) @@ -6606,42 +6850,47 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(chunks) }) .await - .map_err(|e| PinakesError::Database(format!("get_upload_chunks: {}", e)))? + .map_err(|e| PinakesError::Database(format!("get_upload_chunks: {e}")))? .map_err(|e| { - PinakesError::Database(format!("get_upload_chunks query: {}", e)) + PinakesError::Database(format!("get_upload_chunks query: {e}")) }) } async fn cleanup_expired_uploads(&self) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.execute( - "DELETE FROM upload_sessions WHERE expires_at < ?1", - params![&now], - ) + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .execute( + "DELETE FROM upload_sessions WHERE expires_at < ?1", + params![&now], + ) + .map(|n| n as u64) + .map_err(|e| { + PinakesError::Database(format!("cleanup_expired_uploads query: {e}")) + }) }) .await .map_err(|e| { - PinakesError::Database(format!("cleanup_expired_uploads: {}", e)) + PinakesError::Database(format!("cleanup_expired_uploads: {e}")) })? - .map(|n| n as u64) - .map_err(|e| { - PinakesError::Database(format!("cleanup_expired_uploads query: {}", e)) - }) } async fn record_conflict( &self, conflict: &crate::sync::SyncConflict, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let conflict = conflict.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO sync_conflicts (id, device_id, path, local_hash, \ local_mtime, @@ -6661,7 +6910,7 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(()) }) .await - .map_err(|e| PinakesError::Database(format!("record_conflict: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("record_conflict: {e}")))??; Ok(()) } @@ -6669,10 +6918,12 @@ impl StorageBackend for SqliteBackend { &self, device_id: crate::sync::DeviceId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, device_id, path, local_hash, local_mtime, server_hash, \ server_mtime, @@ -6718,10 +6969,10 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("get_unresolved_conflicts: {}", e)) + PinakesError::Database(format!("get_unresolved_conflicts: {e}")) })? .map_err(|e| { - PinakesError::Database(format!("get_unresolved_conflicts query: {}", e)) + PinakesError::Database(format!("get_unresolved_conflicts query: {e}")) }) } @@ -6730,7 +6981,7 @@ impl StorageBackend for SqliteBackend { id: Uuid, resolution: crate::config::ConflictResolution, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let now = chrono::Utc::now().to_rfc3339(); let resolution_str = match resolution { crate::config::ConflictResolution::ServerWins => "server_wins", @@ -6740,7 +6991,9 @@ impl StorageBackend for SqliteBackend { }; tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE sync_conflicts SET resolved_at = ?1, resolution = ?2 WHERE id \ = ?3", @@ -6749,9 +7002,7 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(()) }) .await - .map_err(|e| { - PinakesError::Database(format!("resolve_conflict: {}", e)) - })??; + .map_err(|e| PinakesError::Database(format!("resolve_conflict: {e}")))??; Ok(()) } @@ -6759,11 +7010,13 @@ impl StorageBackend for SqliteBackend { &self, share: &crate::sharing::Share, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let share = share.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let (recipient_type, recipient_user_id, public_token, password_hash) = match &share.recipient { @@ -6809,15 +7062,15 @@ impl StorageBackend for SqliteBackend { recipient_user_id, public_token, password_hash, - share.permissions.can_view, - share.permissions.can_download, - share.permissions.can_edit, - share.permissions.can_delete, - share.permissions.can_reshare, - share.permissions.can_add, + share.permissions.view.can_view, + share.permissions.view.can_download, + share.permissions.mutate.can_edit, + share.permissions.mutate.can_delete, + share.permissions.view.can_reshare, + share.permissions.mutate.can_add, share.note, share.expires_at.map(|dt| dt.to_rfc3339()), - share.access_count as i64, + share.access_count.cast_signed(), share.inherit_to_children, share.parent_share_id.map(|s| s.0.to_string()), share.created_at.to_rfc3339(), @@ -6827,65 +7080,71 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(share) }) .await - .map_err(|e| PinakesError::Database(format!("create_share: {}", e)))? - .map_err(|e| PinakesError::Database(format!("create_share query: {}", e))) + .map_err(|e| PinakesError::Database(format!("create_share: {e}")))? + .map_err(|e| PinakesError::Database(format!("create_share query: {e}"))) } async fn get_share( &self, id: crate::sharing::ShareId, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.query_row( - "SELECT id, target_type, target_id, owner_id, recipient_type, \ - recipient_user_id, + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .query_row( + "SELECT id, target_type, target_id, owner_id, recipient_type, \ + recipient_user_id, public_token, public_password_hash, perm_view, \ - perm_download, perm_edit, + perm_download, perm_edit, perm_delete, perm_reshare, perm_add, note, expires_at, \ - access_count, + access_count, last_accessed, inherit_to_children, parent_share_id, \ - created_at, updated_at + created_at, updated_at FROM shares WHERE id = ?1", - params![id.0.to_string()], - row_to_share, - ) + params![id.0.to_string()], + row_to_share, + ) + .map_err(|e| PinakesError::Database(format!("get_share query: {e}"))) }) .await - .map_err(|e| PinakesError::Database(format!("get_share: {}", e)))? - .map_err(|e| PinakesError::Database(format!("get_share query: {}", e))) + .map_err(|e| PinakesError::Database(format!("get_share: {e}")))? } async fn get_share_by_token( &self, token: &str, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let token = token.to_string(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.query_row( - "SELECT id, target_type, target_id, owner_id, recipient_type, \ - recipient_user_id, + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .query_row( + "SELECT id, target_type, target_id, owner_id, recipient_type, \ + recipient_user_id, public_token, public_password_hash, perm_view, \ - perm_download, perm_edit, + perm_download, perm_edit, perm_delete, perm_reshare, perm_add, note, expires_at, \ - access_count, + access_count, last_accessed, inherit_to_children, parent_share_id, \ - created_at, updated_at + created_at, updated_at FROM shares WHERE public_token = ?1", - params![&token], - row_to_share, - ) + params![&token], + row_to_share, + ) + .map_err(|e| { + PinakesError::Database(format!("get_share_by_token query: {e}")) + }) }) .await - .map_err(|e| PinakesError::Database(format!("get_share_by_token: {}", e)))? - .map_err(|e| { - PinakesError::Database(format!("get_share_by_token query: {}", e)) - }) + .map_err(|e| PinakesError::Database(format!("get_share_by_token: {e}")))? } async fn list_shares_by_owner( @@ -6893,12 +7152,14 @@ impl StorageBackend for SqliteBackend { owner_id: crate::users::UserId, pagination: &Pagination, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let offset = pagination.offset; let limit = pagination.limit; tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, target_type, target_id, owner_id, recipient_type, \ recipient_user_id, @@ -6913,18 +7174,20 @@ impl StorageBackend for SqliteBackend { )?; let shares = stmt .query_map( - params![owner_id.0.to_string(), limit as i64, offset as i64], + params![ + owner_id.0.to_string(), + limit.cast_signed(), + offset.cast_signed() + ], row_to_share, )? .collect::>>()?; Ok::<_, PinakesError>(shares) }) .await + .map_err(|e| PinakesError::Database(format!("list_shares_by_owner: {e}")))? .map_err(|e| { - PinakesError::Database(format!("list_shares_by_owner: {}", e)) - })? - .map_err(|e| { - PinakesError::Database(format!("list_shares_by_owner query: {}", e)) + PinakesError::Database(format!("list_shares_by_owner query: {e}")) }) } @@ -6933,12 +7196,14 @@ impl StorageBackend for SqliteBackend { user_id: crate::users::UserId, pagination: &Pagination, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let offset = pagination.offset; let limit = pagination.limit; tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, target_type, target_id, owner_id, recipient_type, \ recipient_user_id, @@ -6953,18 +7218,20 @@ impl StorageBackend for SqliteBackend { )?; let shares = stmt .query_map( - params![user_id.0.to_string(), limit as i64, offset as i64], + params![ + user_id.0.to_string(), + limit.cast_signed(), + offset.cast_signed() + ], row_to_share, )? .collect::>>()?; Ok::<_, PinakesError>(shares) }) .await + .map_err(|e| PinakesError::Database(format!("list_shares_for_user: {e}")))? .map_err(|e| { - PinakesError::Database(format!("list_shares_for_user: {}", e)) - })? - .map_err(|e| { - PinakesError::Database(format!("list_shares_for_user query: {}", e)) + PinakesError::Database(format!("list_shares_for_user query: {e}")) }) } @@ -6972,12 +7239,14 @@ impl StorageBackend for SqliteBackend { &self, target: &crate::sharing::ShareTarget, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let target_type = target.target_type().to_string(); let target_id = target.target_id().to_string(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, target_type, target_id, owner_id, recipient_type, \ recipient_user_id, @@ -6996,10 +7265,10 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("list_shares_for_target: {}", e)) + PinakesError::Database(format!("list_shares_for_target: {e}")) })? .map_err(|e| { - PinakesError::Database(format!("list_shares_for_target query: {}", e)) + PinakesError::Database(format!("list_shares_for_target query: {e}")) }) } @@ -7007,11 +7276,13 @@ impl StorageBackend for SqliteBackend { &self, share: &crate::sharing::Share, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let share = share.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE shares SET perm_view = ?1, perm_download = ?2, perm_edit = ?3, \ @@ -7021,12 +7292,12 @@ impl StorageBackend for SqliteBackend { inherit_to_children = ?9, updated_at = ?10 WHERE id = ?11", params![ - share.permissions.can_view, - share.permissions.can_download, - share.permissions.can_edit, - share.permissions.can_delete, - share.permissions.can_reshare, - share.permissions.can_add, + share.permissions.view.can_view, + share.permissions.view.can_download, + share.permissions.mutate.can_edit, + share.permissions.mutate.can_delete, + share.permissions.view.can_reshare, + share.permissions.mutate.can_add, share.note, share.expires_at.map(|dt| dt.to_rfc3339()), share.inherit_to_children, @@ -7037,22 +7308,24 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(share) }) .await - .map_err(|e| PinakesError::Database(format!("update_share: {}", e)))? - .map_err(|e| PinakesError::Database(format!("update_share query: {}", e))) + .map_err(|e| PinakesError::Database(format!("update_share: {e}")))? + .map_err(|e| PinakesError::Database(format!("update_share query: {e}"))) } async fn delete_share(&self, id: crate::sharing::ShareId) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute("DELETE FROM shares WHERE id = ?1", params![ id.0.to_string() ])?; Ok::<_, PinakesError>(()) }) .await - .map_err(|e| PinakesError::Database(format!("delete_share: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("delete_share: {e}")))??; Ok(()) } @@ -7060,11 +7333,13 @@ impl StorageBackend for SqliteBackend { &self, id: crate::sharing::ShareId, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE shares SET access_count = access_count + 1, last_accessed = \ ?1 WHERE id = ?2", @@ -7074,7 +7349,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("record_share_access: {}", e)) + PinakesError::Database(format!("record_share_access: {e}")) })??; Ok(()) } @@ -7128,11 +7403,13 @@ impl StorageBackend for SqliteBackend { } // Check collection shares (inheritance) - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let collection_ids: Vec = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT collection_id FROM collection_items WHERE media_id = ?1", )?; @@ -7148,14 +7425,12 @@ impl StorageBackend for SqliteBackend { .await .map_err(|e| { PinakesError::Database(format!( - "get_effective_share_permissions (collections): {}", - e + "get_effective_share_permissions (collections): {e}" )) })? .map_err(|e| { PinakesError::Database(format!( - "get_effective_share_permissions (collections) query: {}", - e + "get_effective_share_permissions (collections) query: {e}" )) })?; @@ -7167,11 +7442,13 @@ impl StorageBackend for SqliteBackend { } // Check tag shares (inheritance) - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let tag_ids: Vec = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare("SELECT tag_id FROM media_tags WHERE media_id = ?1")?; let ids = stmt @@ -7186,14 +7463,12 @@ impl StorageBackend for SqliteBackend { .await .map_err(|e| { PinakesError::Database(format!( - "get_effective_share_permissions (tags): {}", - e + "get_effective_share_permissions (tags): {e}" )) })? .map_err(|e| { PinakesError::Database(format!( - "get_effective_share_permissions (tags) query: {}", - e + "get_effective_share_permissions (tags) query: {e}" )) })?; @@ -7211,7 +7486,7 @@ impl StorageBackend for SqliteBackend { &self, ids: &[crate::sharing::ShareId], ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_strings: Vec = ids.iter().map(|id| id.0.to_string()).collect(); @@ -7220,9 +7495,11 @@ impl StorageBackend for SqliteBackend { } tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let placeholders: Vec = - (1..=id_strings.len()).map(|i| format!("?{}", i)).collect(); + (1..=id_strings.len()).map(|i| format!("?{i}")).collect(); let sql = format!( "DELETE FROM shares WHERE id IN ({})", placeholders.join(", ") @@ -7231,46 +7508,49 @@ impl StorageBackend for SqliteBackend { .iter() .map(|s| s as &dyn rusqlite::types::ToSql) .collect(); - conn.execute(&sql, &*params) + conn.execute(&sql, &*params).map(|n| n as u64).map_err(|e| { + PinakesError::Database(format!("batch_delete_shares query: {e}")) + }) }) .await - .map_err(|e| PinakesError::Database(format!("batch_delete_shares: {}", e)))? - .map(|n| n as u64) - .map_err(|e| { - PinakesError::Database(format!("batch_delete_shares query: {}", e)) - }) + .map_err(|e| PinakesError::Database(format!("batch_delete_shares: {e}")))? } async fn cleanup_expired_shares(&self) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.execute( - "DELETE FROM shares WHERE expires_at IS NOT NULL AND expires_at < ?1", - params![&now], - ) + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .execute( + "DELETE FROM shares WHERE expires_at IS NOT NULL AND expires_at < ?1", + params![&now], + ) + .map(|n| n as u64) + .map_err(|e| { + PinakesError::Database(format!("cleanup_expired_shares query: {e}")) + }) }) .await .map_err(|e| { - PinakesError::Database(format!("cleanup_expired_shares: {}", e)) + PinakesError::Database(format!("cleanup_expired_shares: {e}")) })? - .map(|n| n as u64) - .map_err(|e| { - PinakesError::Database(format!("cleanup_expired_shares query: {}", e)) - }) } async fn record_share_activity( &self, activity: &crate::sharing::ShareActivity, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let activity = activity.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO share_activity (id, share_id, actor_id, actor_ip, \ action, details, timestamp) @@ -7289,7 +7569,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("record_share_activity: {}", e)) + PinakesError::Database(format!("record_share_activity: {e}")) })??; Ok(()) } @@ -7299,12 +7579,14 @@ impl StorageBackend for SqliteBackend { share_id: crate::sharing::ShareId, pagination: &Pagination, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let offset = pagination.offset; let limit = pagination.limit; tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, share_id, actor_id, actor_ip, action, details, timestamp FROM share_activity WHERE share_id = ?1 ORDER BY timestamp \ @@ -7312,7 +7594,11 @@ impl StorageBackend for SqliteBackend { )?; let activities = stmt .query_map( - params![share_id.0.to_string(), limit as i64, offset as i64], + params![ + share_id.0.to_string(), + limit.cast_signed(), + offset.cast_signed() + ], |row| { Ok(crate::sharing::ShareActivity { id: parse_uuid(&row.get::<_, String>(0)?)?, @@ -7336,9 +7622,9 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(activities) }) .await - .map_err(|e| PinakesError::Database(format!("get_share_activity: {}", e)))? + .map_err(|e| PinakesError::Database(format!("get_share_activity: {e}")))? .map_err(|e| { - PinakesError::Database(format!("get_share_activity query: {}", e)) + PinakesError::Database(format!("get_share_activity query: {e}")) }) } @@ -7346,11 +7632,13 @@ impl StorageBackend for SqliteBackend { &self, notification: &crate::sharing::ShareNotification, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let notification = notification.clone(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "INSERT INTO share_notifications (id, user_id, share_id, \ notification_type, is_read, created_at) @@ -7368,7 +7656,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("create_share_notification: {}", e)) + PinakesError::Database(format!("create_share_notification: {e}")) })??; Ok(()) } @@ -7377,10 +7665,12 @@ impl StorageBackend for SqliteBackend { &self, user_id: crate::users::UserId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, user_id, share_id, notification_type, is_read, created_at FROM share_notifications WHERE user_id = ?1 AND is_read = 0 \ @@ -7409,27 +7699,34 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("get_unread_notifications: {}", e)) + PinakesError::Database(format!("get_unread_notifications: {e}")) })? .map_err(|e| { - PinakesError::Database(format!("get_unread_notifications query: {}", e)) + PinakesError::Database(format!("get_unread_notifications query: {e}")) }) } - async fn mark_notification_read(&self, id: Uuid) -> Result<()> { - let conn = self.conn.clone(); + async fn mark_notification_read( + &self, + id: Uuid, + user_id: crate::users::UserId, + ) -> Result<()> { + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( - "UPDATE share_notifications SET is_read = 1 WHERE id = ?1", - params![id.to_string()], + "UPDATE share_notifications SET is_read = 1 WHERE id = ?1 AND user_id \ + = ?2", + params![id.to_string(), user_id.0.to_string()], )?; Ok::<_, PinakesError>(()) }) .await .map_err(|e| { - PinakesError::Database(format!("mark_notification_read: {}", e)) + PinakesError::Database(format!("mark_notification_read: {e}")) })??; Ok(()) } @@ -7438,10 +7735,12 @@ impl StorageBackend for SqliteBackend { &self, user_id: crate::users::UserId, ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE share_notifications SET is_read = 1 WHERE user_id = ?1", params![user_id.0.to_string()], @@ -7450,7 +7749,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("mark_all_notifications_read: {}", e)) + PinakesError::Database(format!("mark_all_notifications_read: {e}")) })??; Ok(()) } @@ -7465,7 +7764,7 @@ impl StorageBackend for SqliteBackend { )); } - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.0.to_string(); let new_name = new_name.to_string(); @@ -7473,7 +7772,9 @@ impl StorageBackend for SqliteBackend { let conn = conn.clone(); let id_str = id_str.clone(); move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let row: (String, String) = conn.query_row( "SELECT path, storage_mode FROM media_items WHERE id = ?1 AND \ deleted_at IS NULL", @@ -7485,7 +7786,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("rename_media (get info): {}", e)) + PinakesError::Database(format!("rename_media (get info): {e}")) })??; let old_path_buf = std::path::PathBuf::from(&old_path); @@ -7500,7 +7801,7 @@ impl StorageBackend for SqliteBackend { .map_err(|e| { PinakesError::Io(std::io::Error::new( e.kind(), - format!("Failed to rename file: {}", e), + format!("Failed to rename file: {e}"), )) })?; } @@ -7508,7 +7809,9 @@ impl StorageBackend for SqliteBackend { // Update the database let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE media_items SET file_name = ?1, path = ?2, updated_at = ?3 \ WHERE id = ?4", @@ -7518,7 +7821,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("rename_media (update db): {}", e)) + PinakesError::Database(format!("rename_media (update db): {e}")) })??; Ok(old_path) @@ -7529,7 +7832,7 @@ impl StorageBackend for SqliteBackend { id: MediaId, new_directory: &std::path::Path, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.0.to_string(); let new_dir = new_directory.to_path_buf(); @@ -7537,7 +7840,9 @@ impl StorageBackend for SqliteBackend { let conn = conn.clone(); let id_str = id_str.clone(); move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let row: (String, String, String) = conn.query_row( "SELECT path, file_name, storage_mode FROM media_items WHERE id = \ ?1 AND deleted_at IS NULL", @@ -7549,7 +7854,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("move_media (get info): {}", e)) + PinakesError::Database(format!("move_media (get info): {e}")) })??; let old_path_buf = std::path::PathBuf::from(&old_path); @@ -7568,7 +7873,7 @@ impl StorageBackend for SqliteBackend { .map_err(|e| { PinakesError::Io(std::io::Error::new( e.kind(), - format!("Failed to move file: {}", e), + format!("Failed to move file: {e}"), )) })?; } @@ -7576,7 +7881,9 @@ impl StorageBackend for SqliteBackend { // Update the database let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE media_items SET path = ?1, updated_at = ?2 WHERE id = ?3", params![new_path_str, now, id_str], @@ -7585,34 +7892,37 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("move_media (update db): {}", e)) + PinakesError::Database(format!("move_media (update db): {e}")) })??; Ok(old_path) } async fn soft_delete_media(&self, id: MediaId) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.0.to_string(); let now = chrono::Utc::now().to_rfc3339(); let rows_affected = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.execute( - "UPDATE media_items SET deleted_at = ?1, updated_at = ?1 WHERE id = \ - ?2 AND deleted_at IS NULL", - params![now, id_str], - ) + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .execute( + "UPDATE media_items SET deleted_at = ?1, updated_at = ?1 WHERE id = \ + ?2 AND deleted_at IS NULL", + params![now, id_str], + ) + .map_err(|e| { + PinakesError::Database(format!("soft_delete_media query: {e}")) + }) }) .await - .map_err(|e| { - PinakesError::Database(format!("soft_delete_media: {}", e)) - })??; + .map_err(|e| PinakesError::Database(format!("soft_delete_media: {e}")))??; if rows_affected == 0 { return Err(PinakesError::NotFound(format!( - "Media item {} not found or already deleted", - id + "Media item {id} not found or already deleted" ))); } @@ -7620,25 +7930,30 @@ impl StorageBackend for SqliteBackend { } async fn restore_media(&self, id: MediaId) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let id_str = id.0.to_string(); let now = chrono::Utc::now().to_rfc3339(); let rows_affected = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); - conn.execute( - "UPDATE media_items SET deleted_at = NULL, updated_at = ?1 WHERE id = \ - ?2 AND deleted_at IS NOT NULL", - params![now, id_str], - ) + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; + conn + .execute( + "UPDATE media_items SET deleted_at = NULL, updated_at = ?1 WHERE id \ + = ?2 AND deleted_at IS NOT NULL", + params![now, id_str], + ) + .map_err(|e| { + PinakesError::Database(format!("restore_media query: {e}")) + }) }) .await - .map_err(|e| PinakesError::Database(format!("restore_media: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("restore_media: {e}")))??; if rows_affected == 0 { return Err(PinakesError::NotFound(format!( - "Media item {} not found in trash", - id + "Media item {id} not found in trash" ))); } @@ -7649,12 +7964,14 @@ impl StorageBackend for SqliteBackend { &self, pagination: &Pagination, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let offset = pagination.offset; let limit = pagination.limit; let items = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, path, file_name, media_type, content_hash, file_size, title, artist, album, genre, year, duration_secs, \ @@ -7670,8 +7987,10 @@ impl StorageBackend for SqliteBackend { ORDER BY deleted_at DESC LIMIT ?1 OFFSET ?2", )?; - let rows = stmt - .query_map(params![limit as i64, offset as i64], row_to_media_item)?; + let rows = stmt.query_map( + params![limit.cast_signed(), offset.cast_signed()], + row_to_media_item, + )?; let mut items = Vec::new(); for row in rows { items.push(row?); @@ -7679,23 +7998,25 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(items) }) .await - .map_err(|e| PinakesError::Database(format!("list_trash: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("list_trash: {e}")))??; Ok(items) } async fn empty_trash(&self) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let count = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; // First, get the IDs to clean up related data let mut stmt = conn .prepare("SELECT id FROM media_items WHERE deleted_at IS NOT NULL")?; let ids: Vec = stmt .query_map([], |row| row.get(0))? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); // Delete related data @@ -7703,7 +8024,7 @@ impl StorageBackend for SqliteBackend { conn .execute("DELETE FROM media_tags WHERE media_id = ?1", params![id])?; conn.execute( - "DELETE FROM collection_items WHERE media_id = ?1", + "DELETE FROM collection_members WHERE media_id = ?1", params![id], )?; conn @@ -7718,7 +8039,7 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(count as u64) }) .await - .map_err(|e| PinakesError::Database(format!("empty_trash: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("empty_trash: {e}")))??; Ok(count) } @@ -7727,11 +8048,13 @@ impl StorageBackend for SqliteBackend { &self, before: chrono::DateTime, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let before_str = before.to_rfc3339(); let count = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; // First, get the IDs to clean up related data let mut stmt = conn.prepare( @@ -7740,7 +8063,7 @@ impl StorageBackend for SqliteBackend { )?; let ids: Vec = stmt .query_map(params![before_str], |row| row.get(0))? - .filter_map(|r| r.ok()) + .filter_map(std::result::Result::ok) .collect(); // Delete related data @@ -7748,7 +8071,7 @@ impl StorageBackend for SqliteBackend { conn .execute("DELETE FROM media_tags WHERE media_id = ?1", params![id])?; conn.execute( - "DELETE FROM collection_items WHERE media_id = ?1", + "DELETE FROM collection_members WHERE media_id = ?1", params![id], )?; conn @@ -7766,25 +8089,27 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(count as u64) }) .await - .map_err(|e| PinakesError::Database(format!("purge_old_trash: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("purge_old_trash: {e}")))??; Ok(count) } async fn count_trash(&self) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let count = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let count: i64 = conn.query_row( "SELECT COUNT(*) FROM media_items WHERE deleted_at IS NOT NULL", [], |row| row.get(0), )?; - Ok::<_, PinakesError>(count as u64) + Ok::<_, PinakesError>(count.cast_unsigned()) }) .await - .map_err(|e| PinakesError::Database(format!("count_trash: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("count_trash: {e}")))??; Ok(count) } @@ -7794,12 +8119,14 @@ impl StorageBackend for SqliteBackend { media_id: MediaId, links: &[crate::model::MarkdownLink], ) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let links: Vec<_> = links.to_vec(); tokio::task::spawn_blocking(move || { - let mut conn = conn.lock().expect("connection mutex not poisoned"); + let mut conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; // Wrap DELETE + INSERT in transaction to ensure atomicity let tx = conn.transaction()?; @@ -7839,7 +8166,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("save_markdown_links: {}", e)) + PinakesError::Database(format!("save_markdown_links: {e}")) })??; Ok(()) @@ -7849,11 +8176,13 @@ impl StorageBackend for SqliteBackend { &self, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let links = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT id, source_media_id, target_path, target_media_id, link_type, link_text, line_number, context, created_at @@ -7872,7 +8201,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("get_outgoing_links: {}", e)) + PinakesError::Database(format!("get_outgoing_links: {e}")) })??; Ok(links) @@ -7882,11 +8211,13 @@ impl StorageBackend for SqliteBackend { &self, media_id: MediaId, ) -> Result> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let backlinks = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let mut stmt = conn.prepare( "SELECT l.id, l.source_media_id, m.title, m.path, l.link_text, l.line_number, l.context, l.link_type @@ -7927,17 +8258,19 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(backlinks) }) .await - .map_err(|e| PinakesError::Database(format!("get_backlinks: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("get_backlinks: {e}")))??; Ok(backlinks) } async fn clear_links_for_media(&self, media_id: MediaId) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn .execute("DELETE FROM markdown_links WHERE source_media_id = ?1", [ &media_id_str, @@ -7946,7 +8279,7 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("clear_links_for_media: {}", e)) + PinakesError::Database(format!("clear_links_for_media: {e}")) })??; Ok(()) @@ -7957,12 +8290,12 @@ impl StorageBackend for SqliteBackend { center_id: Option, depth: u32, ) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let center_id_str = center_id.map(|id| id.0.to_string()); let depth = depth.min(5); // Limit depth to prevent huge queries let graph_data = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| PinakesError::Database(format!("connection mutex poisoned: {e}")))?; let mut nodes = Vec::new(); let mut edges = Vec::new(); let mut node_ids = std::collections::HashSet::new(); @@ -7972,7 +8305,7 @@ impl StorageBackend for SqliteBackend { // BFS to find connected nodes within depth let mut frontier = vec![center_id.clone()]; let mut visited = std::collections::HashSet::new(); - visited.insert(center_id.clone()); + visited.insert(center_id); for _ in 0..depth { let mut next_frontier = Vec::new(); @@ -8066,8 +8399,8 @@ impl StorageBackend for SqliteBackend { label, title, media_type, - link_count: link_count as u32, - backlink_count: backlink_count as u32, + link_count: u32::try_from(link_count).unwrap_or(0), + backlink_count: u32::try_from(backlink_count).unwrap_or(0), }); } } @@ -8102,16 +8435,18 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>(crate::model::GraphData { nodes, edges }) }) .await - .map_err(|e| PinakesError::Database(format!("get_graph_data: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("get_graph_data: {e}")))??; Ok(graph_data) } async fn resolve_links(&self) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let count = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; // Find unresolved links and try to resolve them // Strategy 1: Exact path match @@ -8160,18 +8495,20 @@ impl StorageBackend for SqliteBackend { Ok::<_, PinakesError>((updated1 + updated2) as u64) }) .await - .map_err(|e| PinakesError::Database(format!("resolve_links: {}", e)))??; + .map_err(|e| PinakesError::Database(format!("resolve_links: {e}")))??; Ok(count) } async fn mark_links_extracted(&self, media_id: MediaId) -> Result<()> { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let media_id_str = media_id.0.to_string(); let now = chrono::Utc::now().to_rfc3339(); tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; conn.execute( "UPDATE media_items SET links_extracted_at = ?1 WHERE id = ?2", params![now, media_id_str], @@ -8180,31 +8517,52 @@ impl StorageBackend for SqliteBackend { }) .await .map_err(|e| { - PinakesError::Database(format!("mark_links_extracted: {}", e)) + PinakesError::Database(format!("mark_links_extracted: {e}")) })??; Ok(()) } async fn count_unresolved_links(&self) -> Result { - let conn = self.conn.clone(); + let conn = Arc::clone(&self.conn); let count = tokio::task::spawn_blocking(move || { - let conn = conn.lock().expect("connection mutex not poisoned"); + let conn = conn.lock().map_err(|e| { + PinakesError::Database(format!("connection mutex poisoned: {e}")) + })?; let count: i64 = conn.query_row( "SELECT COUNT(*) FROM markdown_links WHERE target_media_id IS NULL", [], |row| row.get(0), )?; - Ok::<_, PinakesError>(count as u64) + Ok::<_, PinakesError>(count.cast_unsigned()) }) .await .map_err(|e| { - PinakesError::Database(format!("count_unresolved_links: {}", e)) + PinakesError::Database(format!("count_unresolved_links: {e}")) })??; Ok(count) } + + async fn backup(&self, dest: &std::path::Path) -> Result<()> { + let conn = Arc::clone(&self.conn); + let dest = dest.to_path_buf(); + + let fut = tokio::task::spawn_blocking(move || { + let db = conn.lock().map_err(|e| { + PinakesError::Database(format!("failed to acquire database lock: {e}")) + })?; + db.execute("VACUUM INTO ?1", params![dest.to_string_lossy()])?; + Ok(()) + }); + tokio::time::timeout(std::time::Duration::from_mins(5), fut) + .await + .map_err(|_| PinakesError::Database("backup timed out".into()))? + .map_err(|e: tokio::task::JoinError| { + PinakesError::Database(format!("backup: {e}")) + })? + } } // Helper function to parse a markdown link row @@ -8310,18 +8668,22 @@ fn row_to_share(row: &Row) -> rusqlite::Result { owner_id: crate::users::UserId(parse_uuid(&owner_id_str)?), recipient, permissions: crate::sharing::SharePermissions { - can_view: row.get(8)?, - can_download: row.get(9)?, - can_edit: row.get(10)?, - can_delete: row.get(11)?, - can_reshare: row.get(12)?, - can_add: row.get(13)?, + view: crate::sharing::ShareViewPermissions { + can_view: row.get(8)?, + can_download: row.get(9)?, + can_reshare: row.get(12)?, + }, + mutate: crate::sharing::ShareMutatePermissions { + can_edit: row.get(10)?, + can_delete: row.get(11)?, + can_add: row.get(13)?, + }, }, note: row.get(14)?, expires_at: row .get::<_, Option>(15)? .map(|s| parse_datetime(&s)), - access_count: row.get::<_, i64>(16)? as u64, + access_count: row.get::<_, i64>(16)?.cast_unsigned(), last_accessed: row .get::<_, Option>(17)? .map(|s| parse_datetime(&s)),