From 3d9f8933d22afe4b8fb16d4beb1a34fb417e31a3 Mon Sep 17 00:00:00 2001 From: NotAShelf Date: Sun, 8 Mar 2026 00:42:29 +0300 Subject: [PATCH] pinakes-core: update remaining modules and tests Signed-off-by: NotAShelf Change-Id: I9e0ff5ea33a5cf697473423e88f167ce6a6a6964 --- crates/pinakes-core/src/cache.rs | 67 ++++--- crates/pinakes-core/src/collections.rs | 29 ++- crates/pinakes-core/src/enrichment/books.rs | 30 ++- .../src/enrichment/googlebooks.rs | 64 ++++--- crates/pinakes-core/src/enrichment/lastfm.rs | 18 +- .../src/enrichment/openlibrary.rs | 69 ++++--- crates/pinakes-core/src/enrichment/tmdb.rs | 16 +- crates/pinakes-core/src/error.rs | 6 +- crates/pinakes-core/src/events.rs | 11 +- crates/pinakes-core/src/hash.rs | 1 + crates/pinakes-core/src/import.rs | 48 ++++- crates/pinakes-core/src/integrity.rs | 17 ++ crates/pinakes-core/src/links.rs | 80 +++++--- crates/pinakes-core/src/managed_storage.rs | 90 +++++++-- crates/pinakes-core/src/media_type/builtin.rs | 17 +- crates/pinakes-core/src/media_type/mod.rs | 33 +++- .../pinakes-core/src/media_type/registry.rs | 20 +- crates/pinakes-core/src/metadata/audio.rs | 2 +- crates/pinakes-core/src/metadata/image.rs | 14 +- crates/pinakes-core/src/metadata/mod.rs | 16 +- crates/pinakes-core/src/metadata/video.rs | 6 +- crates/pinakes-core/src/opener.rs | 1 + crates/pinakes-core/src/path_validation.rs | 27 ++- crates/pinakes-core/src/plugin/loader.rs | 105 ++++++----- crates/pinakes-core/src/plugin/mod.rs | 102 +++++++--- crates/pinakes-core/src/plugin/registry.rs | 35 +++- crates/pinakes-core/src/plugin/runtime.rs | 178 +++++++++++------- crates/pinakes-core/src/plugin/security.rs | 44 +++-- crates/pinakes-core/src/scan.rs | 49 ++++- crates/pinakes-core/src/scheduler.rs | 84 +++++---- crates/pinakes-core/src/search.rs | 25 +-- crates/pinakes-core/src/sync/chunked.rs | 41 +++- crates/pinakes-core/src/sync/conflict.rs | 13 +- crates/pinakes-core/src/sync/models.rs | 13 +- crates/pinakes-core/src/sync/protocol.rs | 24 ++- crates/pinakes-core/src/tags.rs | 16 ++ crates/pinakes-core/src/thumbnail.rs | 144 ++++++++------ crates/pinakes-core/src/transcode.rs | 125 ++++++------ crates/pinakes-core/src/upload.rs | 35 +++- crates/pinakes-core/src/users.rs | 15 +- crates/pinakes-core/tests/book_metadata.rs | 28 ++- crates/pinakes-core/tests/integrity.rs | 11 +- .../tests/markdown_links_atomicity.rs | 2 +- .../pinakes-core/tests/session_persistence.rs | 14 +- 44 files changed, 1207 insertions(+), 578 deletions(-) diff --git a/crates/pinakes-core/src/cache.rs b/crates/pinakes-core/src/cache.rs index ab1bd75..ee37f69 100644 --- a/crates/pinakes-core/src/cache.rs +++ b/crates/pinakes-core/src/cache.rs @@ -30,13 +30,18 @@ pub struct CacheStats { } impl CacheStats { + #[must_use] pub fn hit_rate(&self) -> f64 { let total = self.hits + self.misses; - if total == 0 { - 0.0 - } else { - self.hits as f64 / total as f64 - } + // Compute ratio using integer arithmetic: hits * 10000 / total gives basis + // points (0..=10000), then scale back to [0.0, 1.0]. Returns 0.0 if total + // is zero. + let basis_points = self + .hits + .saturating_mul(10_000) + .checked_div(total) + .unwrap_or(0); + f64::from(u32::try_from(basis_points).unwrap_or(u32::MAX)) / 10_000.0 } } @@ -88,6 +93,7 @@ where V: Clone + Send + Sync + 'static, { /// Create a new cache with the specified TTL and maximum capacity. + #[must_use] pub fn new(ttl: Duration, max_capacity: u64) -> Self { let inner = MokaCache::builder() .time_to_live(ttl) @@ -101,6 +107,7 @@ where } /// Create a new cache with TTL, max capacity, and time-to-idle. + #[must_use] pub fn new_with_idle( ttl: Duration, tti: Duration, @@ -120,16 +127,16 @@ where /// Get a value from the cache. pub async fn get(&self, key: &K) -> Option { - match self.inner.get(key).await { - Some(value) => { - self.metrics.record_hit(); - Some(value) - }, - None => { + self.inner.get(key).await.map_or_else( + || { self.metrics.record_miss(); None }, - } + |value| { + self.metrics.record_hit(); + Some(value) + }, + ) } /// Insert a value into the cache. @@ -150,11 +157,13 @@ where } /// Get the current number of entries in the cache. + #[must_use] pub fn entry_count(&self) -> u64 { self.inner.entry_count() } /// Get cache statistics. + #[must_use] pub fn stats(&self) -> CacheStats { let (hits, misses) = self.metrics.stats(); CacheStats { @@ -168,11 +177,12 @@ where /// Specialized cache for search query results. pub struct QueryCache { - /// Cache keyed by (query_hash, offset, limit) + /// Cache keyed by (`query_hash`, offset, limit) inner: Cache, } impl QueryCache { + #[must_use] pub fn new(ttl: Duration, max_capacity: u64) -> Self { Self { inner: Cache::new(ttl, max_capacity), @@ -224,6 +234,7 @@ impl QueryCache { self.inner.invalidate_all().await; } + #[must_use] pub fn stats(&self) -> CacheStats { self.inner.stats() } @@ -236,6 +247,7 @@ pub struct MetadataCache { } impl MetadataCache { + #[must_use] pub fn new(ttl: Duration, max_capacity: u64) -> Self { Self { inner: Cache::new(ttl, max_capacity), @@ -257,6 +269,7 @@ impl MetadataCache { self.inner.invalidate(&content_hash.to_string()).await; } + #[must_use] pub fn stats(&self) -> CacheStats { self.inner.stats() } @@ -268,6 +281,7 @@ pub struct MediaCache { } impl MediaCache { + #[must_use] pub fn new(ttl: Duration, max_capacity: u64) -> Self { Self { inner: Cache::new(ttl, max_capacity), @@ -290,6 +304,7 @@ impl MediaCache { self.inner.invalidate_all().await; } + #[must_use] pub fn stats(&self) -> CacheStats { self.inner.stats() } @@ -348,6 +363,7 @@ pub struct CacheLayer { impl CacheLayer { /// Create a new cache layer with the specified TTL (using defaults for other /// settings). + #[must_use] pub fn new(ttl_secs: u64) -> Self { let config = CacheConfig { response_ttl_secs: ttl_secs, @@ -357,6 +373,7 @@ impl CacheLayer { } /// Create a new cache layer with full configuration. + #[must_use] pub fn with_config(config: CacheConfig) -> Self { Self { responses: Cache::new( @@ -401,6 +418,7 @@ impl CacheLayer { } /// Get aggregated statistics for all caches. + #[must_use] pub fn stats(&self) -> CacheLayerStats { CacheLayerStats { responses: self.responses.stats(), @@ -411,7 +429,8 @@ impl CacheLayer { } /// Get the current configuration. - pub fn config(&self) -> &CacheConfig { + #[must_use] + pub const fn config(&self) -> &CacheConfig { &self.config } } @@ -427,6 +446,7 @@ pub struct CacheLayerStats { impl CacheLayerStats { /// Get the overall hit rate across all caches. + #[must_use] pub fn overall_hit_rate(&self) -> f64 { let total_hits = self.responses.hits + self.queries.hits @@ -438,15 +458,16 @@ impl CacheLayerStats { + self.metadata.misses + self.media.misses; - if total_requests == 0 { - 0.0 - } else { - total_hits as f64 / total_requests as f64 - } + let basis_points = total_hits + .saturating_mul(10_000) + .checked_div(total_requests) + .unwrap_or(0); + f64::from(u32::try_from(basis_points).unwrap_or(u32::MAX)) / 10_000.0 } /// Get the total number of entries across all caches. - pub fn total_entries(&self) -> u64 { + #[must_use] + pub const fn total_entries(&self) -> u64 { self.responses.size + self.queries.size + self.metadata.size @@ -460,7 +481,7 @@ mod tests { #[tokio::test] async fn test_cache_basic_operations() { - let cache: Cache = Cache::new(Duration::from_secs(60), 100); + let cache: Cache = Cache::new(Duration::from_mins(1), 100); // Insert and get cache.insert("key1".to_string(), "value1".to_string()).await; @@ -479,7 +500,7 @@ mod tests { #[tokio::test] async fn test_cache_stats() { - let cache: Cache = Cache::new(Duration::from_secs(60), 100); + let cache: Cache = Cache::new(Duration::from_mins(1), 100); cache.insert("key1".to_string(), "value1".to_string()).await; let _ = cache.get(&"key1".to_string()).await; // hit @@ -493,7 +514,7 @@ mod tests { #[tokio::test] async fn test_query_cache() { - let cache = QueryCache::new(Duration::from_secs(60), 100); + let cache = QueryCache::new(Duration::from_mins(1), 100); cache .insert("test query", 0, 10, Some("name"), "results".to_string()) diff --git a/crates/pinakes-core/src/collections.rs b/crates/pinakes-core/src/collections.rs index 789c853..00dada5 100644 --- a/crates/pinakes-core/src/collections.rs +++ b/crates/pinakes-core/src/collections.rs @@ -1,6 +1,17 @@ use uuid::Uuid; -use crate::{error::Result, model::*, storage::DynStorageBackend}; +use crate::{ + error::Result, + model::{ + AuditAction, + Collection, + CollectionKind, + MediaId, + MediaItem, + Pagination, + }, + storage::DynStorageBackend, +}; /// Creates a new collection. /// @@ -15,6 +26,10 @@ use crate::{error::Result, model::*, storage::DynStorageBackend}; /// # Returns /// /// The created collection +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn create_collection( storage: &DynStorageBackend, name: &str, @@ -39,6 +54,10 @@ pub async fn create_collection( /// # Returns /// /// `Ok(())` on success +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn add_member( storage: &DynStorageBackend, collection_id: Uuid, @@ -68,6 +87,10 @@ pub async fn add_member( /// # Returns /// /// `Ok(())` on success +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn remove_member( storage: &DynStorageBackend, collection_id: Uuid, @@ -98,6 +121,10 @@ pub async fn remove_member( /// # Returns /// /// List of media items in the collection +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn get_members( storage: &DynStorageBackend, collection_id: Uuid, diff --git a/crates/pinakes-core/src/enrichment/books.rs b/crates/pinakes-core/src/enrichment/books.rs index 5882b2d..02226f5 100644 --- a/crates/pinakes-core/src/enrichment/books.rs +++ b/crates/pinakes-core/src/enrichment/books.rs @@ -13,13 +13,15 @@ use crate::{ model::MediaItem, }; -/// Book enricher that tries OpenLibrary first, then falls back to Google Books +/// Book enricher that tries `OpenLibrary` first, then falls back to Google +/// Books pub struct BookEnricher { openlibrary: OpenLibraryClient, googlebooks: GoogleBooksClient, } impl BookEnricher { + #[must_use] pub fn new(google_api_key: Option) -> Self { Self { openlibrary: OpenLibraryClient::new(), @@ -27,7 +29,11 @@ impl BookEnricher { } } - /// Try to enrich from OpenLibrary first + /// Try to enrich from `OpenLibrary` first + /// + /// # Errors + /// + /// Returns an error if the metadata cannot be serialized. pub async fn try_openlibrary( &self, isbn: &str, @@ -35,7 +41,7 @@ impl BookEnricher { match self.openlibrary.fetch_by_isbn(isbn).await { Ok(book) => { let metadata_json = serde_json::to_string(&book).map_err(|e| { - PinakesError::External(format!("Failed to serialize metadata: {}", e)) + PinakesError::External(format!("Failed to serialize metadata: {e}")) })?; Ok(Some(ExternalMetadata { @@ -53,6 +59,10 @@ impl BookEnricher { } /// Try to enrich from Google Books + /// + /// # Errors + /// + /// Returns an error if the metadata cannot be serialized. pub async fn try_googlebooks( &self, isbn: &str, @@ -61,7 +71,7 @@ impl BookEnricher { Ok(books) if !books.is_empty() => { let book = &books[0]; let metadata_json = serde_json::to_string(book).map_err(|e| { - PinakesError::External(format!("Failed to serialize metadata: {}", e)) + PinakesError::External(format!("Failed to serialize metadata: {e}")) })?; Ok(Some(ExternalMetadata { @@ -79,6 +89,10 @@ impl BookEnricher { } /// Try to enrich by searching with title and author + /// + /// # Errors + /// + /// Returns an error if the metadata cannot be serialized. pub async fn enrich_by_search( &self, title: &str, @@ -89,7 +103,7 @@ impl BookEnricher { && let Some(result) = results.first() { let metadata_json = serde_json::to_string(result).map_err(|e| { - PinakesError::External(format!("Failed to serialize metadata: {}", e)) + PinakesError::External(format!("Failed to serialize metadata: {e}")) })?; return Ok(Some(ExternalMetadata { @@ -108,7 +122,7 @@ impl BookEnricher { && let Some(book) = results.first() { let metadata_json = serde_json::to_string(book).map_err(|e| { - PinakesError::External(format!("Failed to serialize metadata: {}", e)) + PinakesError::External(format!("Failed to serialize metadata: {e}")) })?; return Ok(Some(ExternalMetadata { @@ -158,7 +172,8 @@ impl MetadataEnricher for BookEnricher { } } -/// Calculate confidence score for OpenLibrary metadata +/// Calculate confidence score for `OpenLibrary` metadata +#[must_use] pub fn calculate_openlibrary_confidence( book: &super::openlibrary::OpenLibraryBook, ) -> f64 { @@ -187,6 +202,7 @@ pub fn calculate_openlibrary_confidence( } /// Calculate confidence score for Google Books metadata +#[must_use] pub fn calculate_googlebooks_confidence( info: &super::googlebooks::VolumeInfo, ) -> f64 { diff --git a/crates/pinakes-core/src/enrichment/googlebooks.rs b/crates/pinakes-core/src/enrichment/googlebooks.rs index 0e9bc01..abfb118 100644 --- a/crates/pinakes-core/src/enrichment/googlebooks.rs +++ b/crates/pinakes-core/src/enrichment/googlebooks.rs @@ -1,3 +1,5 @@ +use std::fmt::Write as _; + use serde::{Deserialize, Serialize}; use crate::error::{PinakesError, Result}; @@ -9,30 +11,33 @@ pub struct GoogleBooksClient { } impl GoogleBooksClient { + /// Create a new `GoogleBooksClient`. + #[must_use] pub fn new(api_key: Option) -> Self { - Self { - client: reqwest::Client::builder() - .user_agent("Pinakes/1.0") - .timeout(std::time::Duration::from_secs(10)) - .build() - .expect("Failed to build HTTP client"), - api_key, - } + let client = reqwest::Client::builder() + .user_agent("Pinakes/1.0") + .timeout(std::time::Duration::from_secs(10)) + .build() + .unwrap_or_else(|_| reqwest::Client::new()); + Self { client, api_key } } /// Fetch book metadata by ISBN + /// + /// # Errors + /// + /// Returns an error if the HTTP request fails or the response cannot be + /// parsed. pub async fn fetch_by_isbn(&self, isbn: &str) -> Result> { - let mut url = format!( - "https://www.googleapis.com/books/v1/volumes?q=isbn:{}", - isbn - ); + let mut url = + format!("https://www.googleapis.com/books/v1/volumes?q=isbn:{isbn}"); if let Some(ref key) = self.api_key { - url.push_str(&format!("&key={}", key)); + let _ = write!(url, "&key={key}"); } let response = self.client.get(&url).send().await.map_err(|e| { - PinakesError::External(format!("Google Books request failed: {}", e)) + PinakesError::External(format!("Google Books request failed: {e}")) })?; if !response.status().is_success() { @@ -44,8 +49,7 @@ impl GoogleBooksClient { let volumes: GoogleBooksResponse = response.json().await.map_err(|e| { PinakesError::External(format!( - "Failed to parse Google Books response: {}", - e + "Failed to parse Google Books response: {e}" )) })?; @@ -53,6 +57,11 @@ impl GoogleBooksClient { } /// Search for books by title and author + /// + /// # Errors + /// + /// Returns an error if the HTTP request fails or the response cannot be + /// parsed. pub async fn search( &self, title: &str, @@ -61,20 +70,19 @@ impl GoogleBooksClient { let mut query = format!("intitle:{}", urlencoding::encode(title)); if let Some(author) = author { - query.push_str(&format!("+inauthor:{}", urlencoding::encode(author))); + let _ = write!(query, "+inauthor:{}", urlencoding::encode(author)); } let mut url = format!( - "https://www.googleapis.com/books/v1/volumes?q={}&maxResults=5", - query + "https://www.googleapis.com/books/v1/volumes?q={query}&maxResults=5" ); if let Some(ref key) = self.api_key { - url.push_str(&format!("&key={}", key)); + let _ = write!(url, "&key={key}"); } let response = self.client.get(&url).send().await.map_err(|e| { - PinakesError::External(format!("Google Books search failed: {}", e)) + PinakesError::External(format!("Google Books search failed: {e}")) })?; if !response.status().is_success() { @@ -85,13 +93,18 @@ impl GoogleBooksClient { } let volumes: GoogleBooksResponse = response.json().await.map_err(|e| { - PinakesError::External(format!("Failed to parse search results: {}", e)) + PinakesError::External(format!("Failed to parse search results: {e}")) })?; Ok(volumes.items) } /// Download cover image from Google Books + /// + /// # Errors + /// + /// Returns an error if the HTTP request fails or the response cannot be + /// read. pub async fn fetch_cover(&self, image_link: &str) -> Result> { // Replace thumbnail link with higher resolution if possible let high_res_link = image_link @@ -100,7 +113,7 @@ impl GoogleBooksClient { let response = self.client.get(&high_res_link).send().await.map_err(|e| { - PinakesError::External(format!("Cover download failed: {}", e)) + PinakesError::External(format!("Cover download failed: {e}")) })?; if !response.status().is_success() { @@ -111,7 +124,7 @@ impl GoogleBooksClient { } response.bytes().await.map(|b| b.to_vec()).map_err(|e| { - PinakesError::External(format!("Failed to read cover data: {}", e)) + PinakesError::External(format!("Failed to read cover data: {e}")) }) } } @@ -201,6 +214,7 @@ pub struct ImageLinks { impl ImageLinks { /// Get the best available image link (highest resolution) + #[must_use] pub fn best_link(&self) -> Option<&String> { self .extra_large @@ -223,11 +237,13 @@ pub struct IndustryIdentifier { impl IndustryIdentifier { /// Check if this is an ISBN-13 + #[must_use] pub fn is_isbn13(&self) -> bool { self.identifier_type == "ISBN_13" } /// Check if this is an ISBN-10 + #[must_use] pub fn is_isbn10(&self) -> bool { self.identifier_type == "ISBN_10" } diff --git a/crates/pinakes-core/src/enrichment/lastfm.rs b/crates/pinakes-core/src/enrichment/lastfm.rs index 63c7d60..9bde8c5 100644 --- a/crates/pinakes-core/src/enrichment/lastfm.rs +++ b/crates/pinakes-core/src/enrichment/lastfm.rs @@ -18,13 +18,16 @@ pub struct LastFmEnricher { } impl LastFmEnricher { + /// Create a new `LastFmEnricher`. + #[must_use] pub fn new(api_key: String) -> Self { + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(10)) + .connect_timeout(Duration::from_secs(5)) + .build() + .unwrap_or_else(|_| reqwest::Client::new()); Self { - client: reqwest::Client::builder() - .timeout(Duration::from_secs(10)) - .connect_timeout(Duration::from_secs(5)) - .build() - .expect("failed to build HTTP client with configured timeouts"), + client, api_key, base_url: "https://ws.audioscrobbler.com/2.0".to_string(), } @@ -87,9 +90,8 @@ impl MetadataEnricher for LastFmEnricher { return Ok(None); } - let track = match json.get("track") { - Some(t) => t, - None => return Ok(None), + let Some(track) = json.get("track") else { + return Ok(None); }; let mbid = track.get("mbid").and_then(|m| m.as_str()).map(String::from); diff --git a/crates/pinakes-core/src/enrichment/openlibrary.rs b/crates/pinakes-core/src/enrichment/openlibrary.rs index df1c517..02ca965 100644 --- a/crates/pinakes-core/src/enrichment/openlibrary.rs +++ b/crates/pinakes-core/src/enrichment/openlibrary.rs @@ -1,8 +1,10 @@ +use std::fmt::Write as _; + use serde::{Deserialize, Serialize}; use crate::error::{PinakesError, Result}; -/// OpenLibrary API client for book metadata enrichment +/// `OpenLibrary` API client for book metadata enrichment pub struct OpenLibraryClient { client: reqwest::Client, base_url: String, @@ -15,23 +17,31 @@ impl Default for OpenLibraryClient { } impl OpenLibraryClient { + /// Create a new `OpenLibraryClient`. + #[must_use] pub fn new() -> Self { + let client = reqwest::Client::builder() + .user_agent("Pinakes/1.0") + .timeout(std::time::Duration::from_secs(10)) + .build() + .unwrap_or_else(|_| reqwest::Client::new()); Self { - client: reqwest::Client::builder() - .user_agent("Pinakes/1.0") - .timeout(std::time::Duration::from_secs(10)) - .build() - .expect("Failed to build HTTP client"), + client, base_url: "https://openlibrary.org".to_string(), } } /// Fetch book metadata by ISBN + /// + /// # Errors + /// + /// Returns an error if the HTTP request fails or the response cannot be + /// parsed. pub async fn fetch_by_isbn(&self, isbn: &str) -> Result { let url = format!("{}/isbn/{}.json", self.base_url, isbn); let response = self.client.get(&url).send().await.map_err(|e| { - PinakesError::External(format!("OpenLibrary request failed: {}", e)) + PinakesError::External(format!("OpenLibrary request failed: {e}")) })?; if !response.status().is_success() { @@ -43,13 +53,17 @@ impl OpenLibraryClient { response.json::().await.map_err(|e| { PinakesError::External(format!( - "Failed to parse OpenLibrary response: {}", - e + "Failed to parse OpenLibrary response: {e}" )) }) } /// Search for books by title and author + /// + /// # Errors + /// + /// Returns an error if the HTTP request fails or the response cannot be + /// parsed. pub async fn search( &self, title: &str, @@ -62,13 +76,13 @@ impl OpenLibraryClient { ); if let Some(author) = author { - url.push_str(&format!("&author={}", urlencoding::encode(author))); + let _ = write!(url, "&author={}", urlencoding::encode(author)); } url.push_str("&limit=5"); let response = self.client.get(&url).send().await.map_err(|e| { - PinakesError::External(format!("OpenLibrary search failed: {}", e)) + PinakesError::External(format!("OpenLibrary search failed: {e}")) })?; if !response.status().is_success() { @@ -80,13 +94,18 @@ impl OpenLibraryClient { let search_response: OpenLibrarySearchResponse = response.json().await.map_err(|e| { - PinakesError::External(format!("Failed to parse search results: {}", e)) + PinakesError::External(format!("Failed to parse search results: {e}")) })?; Ok(search_response.docs) } /// Fetch cover image by cover ID + /// + /// # Errors + /// + /// Returns an error if the HTTP request fails or the response cannot be + /// read. pub async fn fetch_cover( &self, cover_id: i64, @@ -98,13 +117,11 @@ impl OpenLibraryClient { CoverSize::Large => "L", }; - let url = format!( - "https://covers.openlibrary.org/b/id/{}-{}.jpg", - cover_id, size_str - ); + let url = + format!("https://covers.openlibrary.org/b/id/{cover_id}-{size_str}.jpg"); let response = self.client.get(&url).send().await.map_err(|e| { - PinakesError::External(format!("Cover download failed: {}", e)) + PinakesError::External(format!("Cover download failed: {e}")) })?; if !response.status().is_success() { @@ -115,11 +132,16 @@ impl OpenLibraryClient { } response.bytes().await.map(|b| b.to_vec()).map_err(|e| { - PinakesError::External(format!("Failed to read cover data: {}", e)) + PinakesError::External(format!("Failed to read cover data: {e}")) }) } /// Fetch cover by ISBN + /// + /// # Errors + /// + /// Returns an error if the HTTP request fails or the response cannot be + /// read. pub async fn fetch_cover_by_isbn( &self, isbn: &str, @@ -131,13 +153,11 @@ impl OpenLibraryClient { CoverSize::Large => "L", }; - let url = format!( - "https://covers.openlibrary.org/b/isbn/{}-{}.jpg", - isbn, size_str - ); + let url = + format!("https://covers.openlibrary.org/b/isbn/{isbn}-{size_str}.jpg"); let response = self.client.get(&url).send().await.map_err(|e| { - PinakesError::External(format!("Cover download failed: {}", e)) + PinakesError::External(format!("Cover download failed: {e}")) })?; if !response.status().is_success() { @@ -148,7 +168,7 @@ impl OpenLibraryClient { } response.bytes().await.map(|b| b.to_vec()).map_err(|e| { - PinakesError::External(format!("Failed to read cover data: {}", e)) + PinakesError::External(format!("Failed to read cover data: {e}")) }) } } @@ -220,6 +240,7 @@ pub enum StringOrObject { } impl StringOrObject { + #[must_use] pub fn as_str(&self) -> &str { match self { Self::String(s) => s, diff --git a/crates/pinakes-core/src/enrichment/tmdb.rs b/crates/pinakes-core/src/enrichment/tmdb.rs index 3a5575e..146deb8 100644 --- a/crates/pinakes-core/src/enrichment/tmdb.rs +++ b/crates/pinakes-core/src/enrichment/tmdb.rs @@ -18,6 +18,13 @@ pub struct TmdbEnricher { } impl TmdbEnricher { + /// Create a new `TMDb` enricher. + /// + /// # Panics + /// + /// Panics if the HTTP client cannot be built (programming error in client + /// configuration). + #[must_use] pub fn new(api_key: String) -> Self { Self { client: reqwest::Client::builder() @@ -50,7 +57,7 @@ impl MetadataEnricher for TmdbEnricher { .get(&url) .query(&[ ("api_key", &self.api_key), - ("query", &title.to_string()), + ("query", &title.clone()), ("page", &"1".to_string()), ]) .send() @@ -85,7 +92,7 @@ impl MetadataEnricher for TmdbEnricher { })?; let results = json.get("results").and_then(|r| r.as_array()); - if results.is_none_or(|r| r.is_empty()) { + if results.is_none_or(std::vec::Vec::is_empty) { return Ok(None); } @@ -93,13 +100,14 @@ impl MetadataEnricher for TmdbEnricher { return Ok(None); }; let movie = &results[0]; - let external_id = match movie.get("id").and_then(|id| id.as_i64()) { + let external_id = match movie.get("id").and_then(serde_json::Value::as_i64) + { Some(id) => id.to_string(), None => return Ok(None), }; let popularity = movie .get("popularity") - .and_then(|p| p.as_f64()) + .and_then(serde_json::Value::as_f64) .unwrap_or(0.0); // Normalize popularity to 0-1 range (TMDB popularity can be very high) let confidence = (popularity / 100.0).min(1.0); diff --git a/crates/pinakes-core/src/error.rs b/crates/pinakes-core/src/error.rs index 404bd73..941a23d 100644 --- a/crates/pinakes-core/src/error.rs +++ b/crates/pinakes-core/src/error.rs @@ -112,19 +112,19 @@ pub enum PinakesError { impl From for PinakesError { fn from(e: rusqlite::Error) -> Self { - PinakesError::Database(e.to_string()) + Self::Database(e.to_string()) } } impl From for PinakesError { fn from(e: tokio_postgres::Error) -> Self { - PinakesError::Database(e.to_string()) + Self::Database(e.to_string()) } } impl From for PinakesError { fn from(e: serde_json::Error) -> Self { - PinakesError::Serialization(e.to_string()) + Self::Serialization(e.to_string()) } } diff --git a/crates/pinakes-core/src/events.rs b/crates/pinakes-core/src/events.rs index 9fd52d8..cbe90ae 100644 --- a/crates/pinakes-core/src/events.rs +++ b/crates/pinakes-core/src/events.rs @@ -55,10 +55,12 @@ fn haversine_distance(lat1: f64, lon1: f64, lat2: f64, lon2: f64) -> f64 { let dlat = (lat2 - lat1).to_radians(); let dlon = (lon2 - lon1).to_radians(); - let a = (dlat / 2.0).sin().powi(2) - + lat1.to_radians().cos() + let a = (dlat / 2.0).sin().mul_add( + (dlat / 2.0).sin(), + lat1.to_radians().cos() * lat2.to_radians().cos() - * (dlon / 2.0).sin().powi(2); + * (dlon / 2.0).sin().powi(2), + ); let c = 2.0 * a.sqrt().atan2((1.0 - a).sqrt()); @@ -127,7 +129,8 @@ pub fn detect_events( if let (Some((lat1, lon1)), Some((lat2, lon2))) = (current_location, item.latitude.zip(item.longitude)) { - current_location = Some(((lat1 + lat2) / 2.0, (lon1 + lon2) / 2.0)); + current_location = + Some((f64::midpoint(lat1, lat2), f64::midpoint(lon1, lon2))); } else if item.latitude.is_some() && item.longitude.is_some() { current_location = item.latitude.zip(item.longitude); } diff --git a/crates/pinakes-core/src/hash.rs b/crates/pinakes-core/src/hash.rs index fc0ebdd..348f2db 100644 --- a/crates/pinakes-core/src/hash.rs +++ b/crates/pinakes-core/src/hash.rs @@ -38,6 +38,7 @@ pub async fn compute_file_hash(path: &Path) -> Result { } /// Computes the BLAKE3 hash of a byte slice synchronously. +#[must_use] pub fn compute_hash_sync(data: &[u8]) -> ContentHash { let hash = blake3::hash(data); ContentHash::new(hash.to_hex().to_string()) diff --git a/crates/pinakes-core/src/import.rs b/crates/pinakes-core/src/import.rs index 9d12a49..5d4935d 100644 --- a/crates/pinakes-core/src/import.rs +++ b/crates/pinakes-core/src/import.rs @@ -1,5 +1,6 @@ use std::{ path::{Path, PathBuf}, + sync::Arc, time::SystemTime, }; @@ -12,7 +13,14 @@ use crate::{ links, media_type::{BuiltinMediaType, MediaType}, metadata, - model::*, + model::{ + AuditAction, + CustomField, + CustomFieldType, + MediaId, + MediaItem, + StorageMode, + }, storage::DynStorageBackend, thumbnail, }; @@ -43,7 +51,7 @@ fn get_file_mtime(path: &Path) -> Option { .ok() .and_then(|m| m.modified().ok()) .and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok()) - .map(|d| d.as_secs() as i64) + .map(|d| i64::try_from(d.as_secs()).unwrap_or(i64::MAX)) } /// Validates that a path is within configured root directories. @@ -103,6 +111,10 @@ pub async fn import_file( } /// Import a file with configurable options for incremental scanning +/// +/// # Errors +/// +/// Returns [`PinakesError`] if the file cannot be read, hashed, or stored. pub async fn import_file_with_options( storage: &DynStorageBackend, path: &Path, @@ -161,7 +173,7 @@ pub async fn import_file_with_options( let path_clone = path.clone(); let media_type_clone = media_type.clone(); tokio::task::spawn_blocking(move || { - metadata::extract_metadata(&path_clone, media_type_clone) + metadata::extract_metadata(&path_clone, &media_type_clone) }) .await .map_err(|e| PinakesError::MetadataExtraction(e.to_string()))?? @@ -185,7 +197,7 @@ pub async fn import_file_with_options( thumbnail::generate_thumbnail( media_id, &source, - media_type_clone, + &media_type_clone, &thumb_dir, ) }) @@ -194,7 +206,7 @@ pub async fn import_file_with_options( }; // Generate perceptual hash for image files (if enabled in config) - let perceptual_hash = if options.photo_config.generate_perceptual_hash + let perceptual_hash = if options.photo_config.generate_perceptual_hash() && media_type.category() == crate::media_type::MediaCategory::Image { crate::metadata::image::generate_perceptual_hash(&path) @@ -327,6 +339,12 @@ pub(crate) fn should_ignore( /// Default number of concurrent import tasks. const DEFAULT_IMPORT_CONCURRENCY: usize = 8; +/// Import all supported files in a directory with default options. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if the directory cannot be read or spawned tasks +/// fail. pub async fn import_directory( storage: &DynStorageBackend, dir: &Path, @@ -342,6 +360,13 @@ pub async fn import_directory( .await } +/// Import all supported files in a directory with a specified concurrency +/// limit. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if the directory cannot be read or spawned tasks +/// fail. pub async fn import_directory_with_concurrency( storage: &DynStorageBackend, dir: &Path, @@ -358,7 +383,12 @@ pub async fn import_directory_with_concurrency( .await } -/// Import a directory with full options including incremental scanning support +/// Import a directory with full options including incremental scanning support. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if the directory cannot be read or spawned tasks +/// fail. pub async fn import_directory_with_options( storage: &DynStorageBackend, dir: &Path, @@ -377,8 +407,8 @@ pub async fn import_directory_with_options( walkdir::WalkDir::new(&dir) .follow_links(true) .into_iter() - .filter_map(|e| e.ok()) - .filter(|e| e.file_type().is_file()) + .filter_map(std::result::Result::ok) + .filter(|e| !e.file_type().is_dir()) .filter(|e| MediaType::from_path(e.path()).is_some()) .filter(|e| !should_ignore(e.path(), &patterns)) .map(|e| e.path().to_path_buf()) @@ -392,7 +422,7 @@ pub async fn import_directory_with_options( let mut join_set = tokio::task::JoinSet::new(); for entry_path in entries { - let storage = storage.clone(); + let storage = Arc::clone(storage); let path = entry_path.clone(); let opts = options.clone(); diff --git a/crates/pinakes-core/src/integrity.rs b/crates/pinakes-core/src/integrity.rs index 6f3b856..625bbe2 100644 --- a/crates/pinakes-core/src/integrity.rs +++ b/crates/pinakes-core/src/integrity.rs @@ -85,6 +85,10 @@ impl std::str::FromStr for IntegrityStatus { /// # Returns /// /// Report containing orphaned items, untracked files, and moved files +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn detect_orphans( storage: &DynStorageBackend, ) -> Result { @@ -283,6 +287,10 @@ async fn detect_untracked_files( } /// Resolve orphaned media items by deleting them from the database. +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn resolve_orphans( storage: &DynStorageBackend, action: OrphanAction, @@ -302,6 +310,10 @@ pub async fn resolve_orphans( } /// Verify integrity of media files by recomputing hashes and comparing. +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn verify_integrity( storage: &DynStorageBackend, media_ids: Option<&[MediaId]>, @@ -361,6 +373,11 @@ pub async fn verify_integrity( } /// Clean up orphaned thumbnail files that don't correspond to any media item. +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation or +/// filesystem access fails. pub async fn cleanup_orphaned_thumbnails( storage: &DynStorageBackend, thumbnail_dir: &Path, diff --git a/crates/pinakes-core/src/links.rs b/crates/pinakes-core/src/links.rs index bdcdca7..851521b 100644 --- a/crates/pinakes-core/src/links.rs +++ b/crates/pinakes-core/src/links.rs @@ -15,18 +15,17 @@ use uuid::Uuid; use crate::model::{LinkType, MarkdownLink, MediaId}; -// Compile regexes once at startup to avoid recompilation on every call -static WIKILINK_RE: LazyLock = LazyLock::new(|| { - Regex::new(r"\[\[([^\]|]+)(?:\|([^\]]+))?\]\]").expect("valid wikilink regex") -}); +// Compile regexes once at startup to avoid recompilation on every call. +// Stored as Option so that initialization failure is handled gracefully +// rather than panicking. +static WIKILINK_RE: LazyLock> = + LazyLock::new(|| Regex::new(r"\[\[([^\]|]+)(?:\|([^\]]+))?\]\]").ok()); -static EMBED_RE: LazyLock = LazyLock::new(|| { - Regex::new(r"!\[\[([^\]|]+)(?:\|([^\]]+))?\]\]").expect("valid embed regex") -}); +static EMBED_RE: LazyLock> = + LazyLock::new(|| Regex::new(r"!\[\[([^\]|]+)(?:\|([^\]]+))?\]\]").ok()); -static MARKDOWN_LINK_RE: LazyLock = LazyLock::new(|| { - Regex::new(r"\[([^\]]+)\]\(([^)]+)\)").expect("valid markdown link regex") -}); +static MARKDOWN_LINK_RE: LazyLock> = + LazyLock::new(|| Regex::new(r"\[([^\]]+)\]\(([^)]+)\)").ok()); /// Configuration for context extraction around links const CONTEXT_CHARS_BEFORE: usize = 50; @@ -38,6 +37,7 @@ const CONTEXT_CHARS_AFTER: usize = 50; /// - Wikilinks: `[[target]]` and `[[target|display text]]` /// - Embeds: `![[target]]` /// - Markdown links: `[text](path)` (internal paths only, no http/https) +#[must_use] pub fn extract_links( source_media_id: MediaId, content: &str, @@ -63,10 +63,13 @@ fn extract_wikilinks( source_media_id: MediaId, content: &str, ) -> Vec { + let Some(re) = WIKILINK_RE.as_ref() else { + return Vec::new(); + }; let mut links = Vec::new(); for (line_num, line) in content.lines().enumerate() { - for cap in WIKILINK_RE.captures_iter(line) { + for cap in re.captures_iter(line) { let Some(full_match) = cap.get(0) else { continue; }; @@ -100,7 +103,11 @@ fn extract_wikilinks( target_media_id: None, // Will be resolved later link_type: LinkType::Wikilink, link_text: display_text.or_else(|| Some(target.to_string())), - line_number: Some(line_num as i32 + 1), // 1-indexed + line_number: Some( + i32::try_from(line_num) + .unwrap_or(i32::MAX) + .saturating_add(1), + ), // 1-indexed context: Some(context), created_at: chrono::Utc::now(), }); @@ -116,10 +123,13 @@ fn extract_embeds( source_media_id: MediaId, content: &str, ) -> Vec { + let Some(re) = EMBED_RE.as_ref() else { + return Vec::new(); + }; let mut links = Vec::new(); for (line_num, line) in content.lines().enumerate() { - for cap in EMBED_RE.captures_iter(line) { + for cap in re.captures_iter(line) { let Some(full_match) = cap.get(0) else { continue; }; @@ -143,7 +153,11 @@ fn extract_embeds( target_media_id: None, link_type: LinkType::Embed, link_text: display_text.or_else(|| Some(target.to_string())), - line_number: Some(line_num as i32 + 1), + line_number: Some( + i32::try_from(line_num) + .unwrap_or(i32::MAX) + .saturating_add(1), + ), context: Some(context), created_at: chrono::Utc::now(), }); @@ -159,10 +173,13 @@ fn extract_markdown_links( source_media_id: MediaId, content: &str, ) -> Vec { + let Some(re) = MARKDOWN_LINK_RE.as_ref() else { + return Vec::new(); + }; let mut links = Vec::new(); for (line_num, line) in content.lines().enumerate() { - for cap in MARKDOWN_LINK_RE.captures_iter(line) { + for cap in re.captures_iter(line) { let Some(full_match) = cap.get(0) else { continue; }; @@ -215,7 +232,11 @@ fn extract_markdown_links( target_media_id: None, link_type: LinkType::MarkdownLink, link_text: Some(text.to_string()), - line_number: Some(line_num as i32 + 1), + line_number: Some( + i32::try_from(line_num) + .unwrap_or(i32::MAX) + .saturating_add(1), + ), context: Some(context), created_at: chrono::Utc::now(), }); @@ -278,6 +299,7 @@ pub enum ResolutionStrategy { /// Resolve a link target to possible file paths. /// /// Returns a list of candidate paths to check, in order of preference. +#[must_use] pub fn resolve_link_candidates( target: &str, source_path: &Path, @@ -307,7 +329,7 @@ pub fn resolve_link_candidates( candidates.push(relative.clone()); // Also try with .md extension - if !target.ends_with(".md") { + if !target.to_ascii_lowercase().ends_with(".md") { candidates.push(relative.with_extension("md")); let mut with_md = relative.clone(); with_md.set_file_name(format!( @@ -319,10 +341,10 @@ pub fn resolve_link_candidates( } // 3. Filename with .md extension in root dirs - let target_with_md = if target.ends_with(".md") { + let target_with_md = if target.to_ascii_lowercase().ends_with(".md") { target.to_string() } else { - format!("{}.md", target) + format!("{target}.md") }; for root in root_dirs { @@ -340,6 +362,7 @@ pub fn resolve_link_candidates( /// /// Obsidian uses the `aliases` field in frontmatter to define alternative names /// for a note that can be used in wikilinks. +#[must_use] pub fn extract_aliases(content: &str) -> Vec { let Ok(parsed) = gray_matter::Matter::::new().parse(content) @@ -441,7 +464,7 @@ mod tests { #[test] fn test_multiple_links() { - let content = r#" + let content = r" # My Note This links to [[Note A]] and also [[Note B|Note B Title]]. @@ -449,7 +472,7 @@ This links to [[Note A]] and also [[Note B|Note B Title]]. We also have a markdown link to [config](./config.md). And an embedded image: ![[diagram.png]] -"#; +"; let links = extract_links(test_media_id(), content); assert_eq!(links.len(), 4); @@ -488,7 +511,7 @@ And an embedded image: ![[diagram.png]] #[test] fn test_extract_aliases() { - let content = r#"--- + let content = r"--- title: My Note aliases: - Alternative Name @@ -496,20 +519,20 @@ aliases: --- # Content here -"#; +"; let aliases = extract_aliases(content); assert_eq!(aliases, vec!["Alternative Name", "Another Alias"]); } #[test] fn test_extract_single_alias() { - let content = r#"--- + let content = r"--- title: My Note aliases: Single Alias --- # Content -"#; +"; let aliases = extract_aliases(content); assert_eq!(aliases, vec!["Single Alias"]); } @@ -538,7 +561,7 @@ aliases: Single Alias #[test] fn test_exclude_markdown_images() { // Test that markdown images ![alt](image.png) are NOT extracted as links - let content = r#" + let content = r" # My Note Here's a regular link: [documentation](docs/guide.md) @@ -551,15 +574,14 @@ Multiple images: ![Logo](logo.png) and ![Banner](banner.jpg) Mixed: [link](file.md) then ![image](pic.png) then [another](other.md) -"#; +"; let links = extract_links(test_media_id(), content); // Should only extract the 4 markdown links, not the 4 images assert_eq!( links.len(), 4, - "Should extract 4 links, not images. Got: {:#?}", - links + "Should extract 4 links, not images. Got: {links:#?}" ); // Verify all extracted items are MarkdownLink type (not images) diff --git a/crates/pinakes-core/src/managed_storage.rs b/crates/pinakes-core/src/managed_storage.rs index 631732e..be2a83f 100644 --- a/crates/pinakes-core/src/managed_storage.rs +++ b/crates/pinakes-core/src/managed_storage.rs @@ -28,7 +28,8 @@ pub struct ManagedStorageService { impl ManagedStorageService { /// Create a new managed storage service. - pub fn new( + #[must_use] + pub const fn new( root_dir: PathBuf, max_upload_size: u64, verify_on_read: bool, @@ -41,6 +42,10 @@ impl ManagedStorageService { } /// Initialize the storage directory structure. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the directory cannot be created. pub async fn init(&self) -> Result<()> { fs::create_dir_all(&self.root_dir).await?; info!(path = %self.root_dir.display(), "initialized managed storage"); @@ -50,6 +55,7 @@ impl ManagedStorageService { /// Get the storage path for a content hash. /// /// Layout: `///` + #[must_use] pub fn path(&self, hash: &ContentHash) -> PathBuf { let h = &hash.0; if h.len() >= 4 { @@ -61,7 +67,8 @@ impl ManagedStorageService { } /// Check if a blob exists in storage. - pub async fn exists(&self, hash: &ContentHash) -> bool { + #[must_use] + pub fn exists(&self, hash: &ContentHash) -> bool { self.path(hash).exists() } @@ -70,6 +77,11 @@ impl ManagedStorageService { /// Returns the content hash and file size. /// If the file already exists with the same hash, returns early /// (deduplication). + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the file cannot be stored or exceeds the size + /// limit. pub async fn store_stream( &self, mut reader: R, @@ -119,14 +131,13 @@ impl ManagedStorageService { debug!(hash = %hash, "blob already exists, deduplicating"); let _ = fs::remove_file(&temp_path).await; return Ok((hash, total_size)); - } else { - warn!( - hash = %hash, - expected = total_size, - actual = existing_meta.len(), - "size mismatch for existing blob, replacing" - ); } + warn!( + hash = %hash, + expected = total_size, + actual = existing_meta.len(), + "size mismatch for existing blob, replacing" + ); } // Move temp file to final location @@ -140,6 +151,10 @@ impl ManagedStorageService { } /// Store a file from a path. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the file cannot be opened or stored. pub async fn store_file(&self, path: &Path) -> Result<(ContentHash, u64)> { let file = fs::File::open(path).await?; let reader = BufReader::new(file); @@ -147,6 +162,11 @@ impl ManagedStorageService { } /// Store bytes directly. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the data cannot be stored or exceeds the size + /// limit. pub async fn store_bytes(&self, data: &[u8]) -> Result<(ContentHash, u64)> { use std::io::Cursor; let cursor = Cursor::new(data); @@ -154,6 +174,10 @@ impl ManagedStorageService { } /// Open a blob for reading. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the blob does not exist or cannot be opened. pub async fn open(&self, hash: &ContentHash) -> Result { let path = self.path(hash); if !path.exists() { @@ -168,6 +192,11 @@ impl ManagedStorageService { } /// Read a blob entirely into memory. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the blob does not exist, cannot be read, or + /// fails integrity check. pub async fn read(&self, hash: &ContentHash) -> Result> { let path = self.path(hash); if !path.exists() { @@ -180,8 +209,7 @@ impl ManagedStorageService { let computed = blake3::hash(&data); if computed.to_hex().to_string() != hash.0 { return Err(PinakesError::StorageIntegrity(format!( - "hash mismatch for blob {}", - hash + "hash mismatch for blob {hash}" ))); } } @@ -190,6 +218,11 @@ impl ManagedStorageService { } /// Verify the integrity of a stored blob. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the blob cannot be read or has a hash + /// mismatch. pub async fn verify(&self, hash: &ContentHash) -> Result { let path = self.path(hash); if !path.exists() { @@ -217,8 +250,7 @@ impl ManagedStorageService { "blob integrity check failed" ); return Err(PinakesError::StorageIntegrity(format!( - "hash mismatch: expected {}, computed {}", - hash, computed + "hash mismatch: expected {hash}, computed {computed}" ))); } @@ -227,6 +259,10 @@ impl ManagedStorageService { } /// Delete a blob from storage. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the blob cannot be removed. pub async fn delete(&self, hash: &ContentHash) -> Result<()> { let path = self.path(hash); if path.exists() { @@ -245,6 +281,11 @@ impl ManagedStorageService { } /// Get the size of a stored blob. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the blob does not exist or metadata cannot be + /// read. pub async fn size(&self, hash: &ContentHash) -> Result { let path = self.path(hash); if !path.exists() { @@ -255,18 +296,23 @@ impl ManagedStorageService { } /// List all blob hashes in storage. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the storage directory cannot be read. pub async fn list_all(&self) -> Result> { let mut hashes = Vec::new(); let mut entries = fs::read_dir(&self.root_dir).await?; while let Some(entry) = entries.next_entry().await? { let path = entry.path(); - if path.is_dir() && path.file_name().map(|n| n.len()) == Some(2) { + if path.is_dir() && path.file_name().map(std::ffi::OsStr::len) == Some(2) + { let mut sub_entries = fs::read_dir(&path).await?; while let Some(sub_entry) = sub_entries.next_entry().await? { let sub_path = sub_entry.path(); if sub_path.is_dir() - && sub_path.file_name().map(|n| n.len()) == Some(2) + && sub_path.file_name().map(std::ffi::OsStr::len) == Some(2) { let mut file_entries = fs::read_dir(&sub_path).await?; while let Some(file_entry) = file_entries.next_entry().await? { @@ -287,6 +333,10 @@ impl ManagedStorageService { } /// Calculate total storage used by all blobs. + /// + /// # Errors + /// + /// Returns [`StorageError`] if listing blobs or querying sizes fails. pub async fn total_size(&self) -> Result { let hashes = self.list_all().await?; let mut total = 0u64; @@ -299,6 +349,10 @@ impl ManagedStorageService { } /// Clean up any orphaned temp files. + /// + /// # Errors + /// + /// Returns [`PinakesError`] if the temp directory cannot be read. pub async fn cleanup_temp(&self) -> Result { let temp_dir = self.root_dir.join("temp"); if !temp_dir.exists() { @@ -349,7 +403,7 @@ mod tests { let (hash, size) = service.store_bytes(data).await.unwrap(); assert_eq!(size, data.len() as u64); - assert!(service.exists(&hash).await); + assert!(service.exists(&hash)); let retrieved = service.read(&hash).await.unwrap(); assert_eq!(retrieved, data); @@ -405,9 +459,9 @@ mod tests { let data = b"delete me"; let (hash, _) = service.store_bytes(data).await.unwrap(); - assert!(service.exists(&hash).await); + assert!(service.exists(&hash)); service.delete(&hash).await.unwrap(); - assert!(!service.exists(&hash).await); + assert!(!service.exists(&hash)); } } diff --git a/crates/pinakes-core/src/media_type/builtin.rs b/crates/pinakes-core/src/media_type/builtin.rs index bd84aca..93701b7 100644 --- a/crates/pinakes-core/src/media_type/builtin.rs +++ b/crates/pinakes-core/src/media_type/builtin.rs @@ -62,7 +62,8 @@ pub enum MediaCategory { impl BuiltinMediaType { /// Get the unique, stable ID for this media type. - pub fn id(&self) -> &'static str { + #[must_use] + pub const fn id(&self) -> &'static str { match self { Self::Mp3 => "mp3", Self::Flac => "flac", @@ -98,6 +99,7 @@ impl BuiltinMediaType { } /// Get the display name for this media type + #[must_use] pub fn name(&self) -> String { match self { Self::Mp3 => "MP3 Audio".to_string(), @@ -133,6 +135,7 @@ impl BuiltinMediaType { } } + #[must_use] pub fn from_extension(ext: &str) -> Option { match ext.to_ascii_lowercase().as_str() { "mp3" => Some(Self::Mp3), @@ -176,7 +179,8 @@ impl BuiltinMediaType { .and_then(Self::from_extension) } - pub fn mime_type(&self) -> &'static str { + #[must_use] + pub const fn mime_type(&self) -> &'static str { match self { Self::Mp3 => "audio/mpeg", Self::Flac => "audio/flac", @@ -211,7 +215,8 @@ impl BuiltinMediaType { } } - pub fn category(&self) -> MediaCategory { + #[must_use] + pub const fn category(&self) -> MediaCategory { match self { Self::Mp3 | Self::Flac @@ -240,7 +245,8 @@ impl BuiltinMediaType { } } - pub fn extensions(&self) -> &'static [&'static str] { + #[must_use] + pub const fn extensions(&self) -> &'static [&'static str] { match self { Self::Mp3 => &["mp3"], Self::Flac => &["flac"], @@ -276,7 +282,8 @@ impl BuiltinMediaType { } /// Returns true if this is a RAW image format. - pub fn is_raw(&self) -> bool { + #[must_use] + pub const fn is_raw(&self) -> bool { matches!( self, Self::Cr2 | Self::Nef | Self::Arw | Self::Dng | Self::Orf | Self::Rw2 diff --git a/crates/pinakes-core/src/media_type/mod.rs b/crates/pinakes-core/src/media_type/mod.rs index 9d221c5..2c73ef0 100644 --- a/crates/pinakes-core/src/media_type/mod.rs +++ b/crates/pinakes-core/src/media_type/mod.rs @@ -31,6 +31,7 @@ impl MediaType { } /// Get the type ID as a string + #[must_use] pub fn id(&self) -> String { match self { Self::Builtin(b) => b.id().to_string(), @@ -40,6 +41,7 @@ impl MediaType { /// Get the display name for this media type /// For custom types without a registry, returns the ID as the name + #[must_use] pub fn name(&self) -> String { match self { Self::Builtin(b) => b.name(), @@ -48,14 +50,14 @@ impl MediaType { } /// Get the display name for this media type with registry support + #[must_use] pub fn name_with_registry(&self, registry: &MediaTypeRegistry) -> String { match self { Self::Builtin(b) => b.name(), Self::Custom(id) => { registry .get(id) - .map(|d| d.name.clone()) - .unwrap_or_else(|| id.clone()) + .map_or_else(|| id.clone(), |d| d.name.clone()) }, } } @@ -63,7 +65,8 @@ impl MediaType { /// Get the category for this media type /// For custom types without a registry, returns [`MediaCategory::Document`] /// as default - pub fn category(&self) -> MediaCategory { + #[must_use] + pub const fn category(&self) -> MediaCategory { match self { Self::Builtin(b) => b.category(), Self::Custom(_) => MediaCategory::Document, @@ -71,6 +74,7 @@ impl MediaType { } /// Get the category for this media type with registry support + #[must_use] pub fn category_with_registry( &self, registry: &MediaTypeRegistry, @@ -88,6 +92,7 @@ impl MediaType { /// Get the MIME type /// For custom types without a registry, returns "application/octet-stream" + #[must_use] pub fn mime_type(&self) -> String { match self { Self::Builtin(b) => b.mime_type().to_string(), @@ -96,6 +101,7 @@ impl MediaType { } /// Get the MIME type with registry support + #[must_use] pub fn mime_type_with_registry( &self, registry: &MediaTypeRegistry, @@ -113,23 +119,31 @@ impl MediaType { /// Get file extensions /// For custom types without a registry, returns an empty vec + #[must_use] pub fn extensions(&self) -> Vec { match self { Self::Builtin(b) => { - b.extensions().iter().map(|s| s.to_string()).collect() + b.extensions() + .iter() + .map(std::string::ToString::to_string) + .collect() }, Self::Custom(_) => vec![], } } /// Get file extensions with registry support + #[must_use] pub fn extensions_with_registry( &self, registry: &MediaTypeRegistry, ) -> Vec { match self { Self::Builtin(b) => { - b.extensions().iter().map(|s| s.to_string()).collect() + b.extensions() + .iter() + .map(std::string::ToString::to_string) + .collect() }, Self::Custom(id) => { registry @@ -141,7 +155,8 @@ impl MediaType { } /// Check if this is a RAW image format - pub fn is_raw(&self) -> bool { + #[must_use] + pub const fn is_raw(&self) -> bool { match self { Self::Builtin(b) => b.is_raw(), Self::Custom(_) => false, @@ -149,13 +164,14 @@ impl MediaType { } /// Resolve a media type from file extension (built-in types only) - /// Use from_extension_with_registry for custom types + /// Use `from_extension_with_registry` for custom types pub fn from_extension(ext: &str) -> Option { BuiltinMediaType::from_extension(ext).map(Self::Builtin) } /// Resolve a media type from file extension with registry (includes custom /// types) + #[must_use] pub fn from_extension_with_registry( ext: &str, registry: &MediaTypeRegistry, @@ -172,7 +188,7 @@ impl MediaType { } /// Resolve a media type from file path (built-in types only) - /// Use from_path_with_registry for custom types + /// Use `from_path_with_registry` for custom types pub fn from_path(path: &Path) -> Option { path .extension() @@ -181,6 +197,7 @@ impl MediaType { } /// Resolve a media type from file path with registry (includes custom types) + #[must_use] pub fn from_path_with_registry( path: &Path, registry: &MediaTypeRegistry, diff --git a/crates/pinakes-core/src/media_type/registry.rs b/crates/pinakes-core/src/media_type/registry.rs index a759f7d..569a3ab 100644 --- a/crates/pinakes-core/src/media_type/registry.rs +++ b/crates/pinakes-core/src/media_type/registry.rs @@ -41,6 +41,7 @@ pub struct MediaTypeRegistry { impl MediaTypeRegistry { /// Create a new empty registry + #[must_use] pub fn new() -> Self { Self { types: HashMap::new(), @@ -78,7 +79,7 @@ impl MediaTypeRegistry { let descriptor = self .types .remove(id) - .ok_or_else(|| anyhow!("Media type not found: {}", id))?; + .ok_or_else(|| anyhow!("Media type not found: {id}"))?; // Remove extensions for ext in &descriptor.extensions { @@ -92,11 +93,13 @@ impl MediaTypeRegistry { } /// Get a media type descriptor by ID + #[must_use] pub fn get(&self, id: &str) -> Option<&MediaTypeDescriptor> { self.types.get(id) } /// Get a media type by file extension + #[must_use] pub fn get_by_extension(&self, ext: &str) -> Option<&MediaTypeDescriptor> { let ext_lower = ext.to_lowercase(); self @@ -106,11 +109,13 @@ impl MediaTypeRegistry { } /// List all registered media types + #[must_use] pub fn list_all(&self) -> Vec<&MediaTypeDescriptor> { self.types.values().collect() } /// List media types from a specific plugin + #[must_use] pub fn list_by_plugin(&self, plugin_id: &str) -> Vec<&MediaTypeDescriptor> { self .types @@ -119,7 +124,8 @@ impl MediaTypeRegistry { .collect() } - /// List built-in media types (plugin_id is None) + /// List built-in media types (`plugin_id` is None) + #[must_use] pub fn list_builtin(&self) -> Vec<&MediaTypeDescriptor> { self .types @@ -129,11 +135,13 @@ impl MediaTypeRegistry { } /// Get count of registered types + #[must_use] pub fn count(&self) -> usize { self.types.len() } /// Check if a media type is registered + #[must_use] pub fn contains(&self, id: &str) -> bool { self.types.contains_key(id) } @@ -170,7 +178,7 @@ mod tests { fn create_test_descriptor(id: &str, ext: &str) -> MediaTypeDescriptor { MediaTypeDescriptor { id: id.to_string(), - name: format!("{} Type", id), + name: format!("{id} Type"), category: Some(MediaCategory::Document), extensions: vec![ext.to_string()], mime_types: vec![format!("application/{}", id)], @@ -183,7 +191,7 @@ mod tests { let mut registry = MediaTypeRegistry::new(); let descriptor = create_test_descriptor("test", "tst"); - registry.register(descriptor.clone()).unwrap(); + registry.register(descriptor).unwrap(); let retrieved = registry.get("test").unwrap(); assert_eq!(retrieved.id, "test"); @@ -271,8 +279,8 @@ mod tests { for i in 1..=3 { let desc = MediaTypeDescriptor { - id: format!("type{}", i), - name: format!("Type {}", i), + id: format!("type{i}"), + name: format!("Type {i}"), category: Some(MediaCategory::Document), extensions: vec![format!("t{}", i)], mime_types: vec![format!("application/type{}", i)], diff --git a/crates/pinakes-core/src/metadata/audio.rs b/crates/pinakes-core/src/metadata/audio.rs index 960544d..576f511 100644 --- a/crates/pinakes-core/src/metadata/audio.rs +++ b/crates/pinakes-core/src/metadata/audio.rs @@ -29,7 +29,7 @@ impl MetadataExtractor for AudioExtractor { meta.artist = tag.artist().map(|s| s.to_string()); meta.album = tag.album().map(|s| s.to_string()); meta.genre = tag.genre().map(|s| s.to_string()); - meta.year = tag.date().map(|ts| ts.year as i32); + meta.year = tag.date().map(|ts| i32::from(ts.year)); } if let Some(tag) = tagged_file diff --git a/crates/pinakes-core/src/metadata/image.rs b/crates/pinakes-core/src/metadata/image.rs index a39f3bd..6652a82 100644 --- a/crates/pinakes-core/src/metadata/image.rs +++ b/crates/pinakes-core/src/metadata/image.rs @@ -15,11 +15,11 @@ impl MetadataExtractor for ImageExtractor { let file = std::fs::File::open(path)?; let mut buf_reader = std::io::BufReader::new(&file); - let exif_data = - match exif::Reader::new().read_from_container(&mut buf_reader) { - Ok(exif) => exif, - Err(_) => return Ok(meta), - }; + let Ok(exif_data) = + exif::Reader::new().read_from_container(&mut buf_reader) + else { + return Ok(meta); + }; // Image dimensions if let Some(width) = exif_data @@ -226,7 +226,7 @@ impl MetadataExtractor for ImageExtractor { fn field_to_u32(field: &exif::Field) -> Option { match &field.value { exif::Value::Long(v) => v.first().copied(), - exif::Value::Short(v) => v.first().map(|&x| x as u32), + exif::Value::Short(v) => v.first().map(|&x| u32::from(x)), _ => None, } } @@ -274,9 +274,11 @@ fn parse_exif_datetime(s: &str) -> Option> { } /// Generate a perceptual hash for an image file. +/// /// Uses DCT (Discrete Cosine Transform) hash algorithm for robust similarity /// detection. Returns a hex-encoded hash string, or None if the image cannot be /// processed. +#[must_use] pub fn generate_perceptual_hash(path: &Path) -> Option { use image_hasher::{HashAlg, HasherConfig}; diff --git a/crates/pinakes-core/src/metadata/mod.rs b/crates/pinakes-core/src/metadata/mod.rs index b622dba..ddb601e 100644 --- a/crates/pinakes-core/src/metadata/mod.rs +++ b/crates/pinakes-core/src/metadata/mod.rs @@ -34,13 +34,25 @@ pub struct ExtractedMetadata { } pub trait MetadataExtractor: Send + Sync { + /// Extract metadata from a file at the given path. + /// + /// # Errors + /// + /// Returns an error if the file cannot be read or parsed. fn extract(&self, path: &Path) -> Result; fn supported_types(&self) -> Vec; } +/// Extract metadata from a file using the appropriate extractor for the given +/// media type. +/// +/// # Errors +/// +/// Returns an error if no extractor supports the media type, or if extraction +/// fails. pub fn extract_metadata( path: &Path, - media_type: MediaType, + media_type: &MediaType, ) -> Result { let extractors: Vec> = vec![ Box::new(audio::AudioExtractor), @@ -51,7 +63,7 @@ pub fn extract_metadata( ]; for extractor in &extractors { - if extractor.supported_types().contains(&media_type) { + if extractor.supported_types().contains(media_type) { return extractor.extract(path); } } diff --git a/crates/pinakes-core/src/metadata/video.rs b/crates/pinakes-core/src/metadata/video.rs index 0e6b827..a0c26f5 100644 --- a/crates/pinakes-core/src/metadata/video.rs +++ b/crates/pinakes-core/src/metadata/video.rs @@ -53,7 +53,7 @@ fn extract_mkv(path: &Path) -> Result { matroska::Settings::Audio(a) => { meta.extra.insert( "sample_rate".to_string(), - format!("{} Hz", a.sample_rate as u32), + format!("{:.0} Hz", a.sample_rate), ); meta .extra @@ -64,7 +64,7 @@ fn extract_mkv(path: &Path) -> Result { .insert("audio_codec".to_string(), track.codec_id.clone()); } }, - _ => {}, + matroska::Settings::None => {}, } } @@ -99,7 +99,7 @@ fn extract_mp4(path: &Path) -> Result { meta.genre = tag .genre() .map(|s: std::borrow::Cow<'_, str>| s.to_string()); - meta.year = tag.date().map(|ts| ts.year as i32); + meta.year = tag.date().map(|ts| i32::from(ts.year)); } let properties = tagged_file.properties(); diff --git a/crates/pinakes-core/src/opener.rs b/crates/pinakes-core/src/opener.rs index 7681a87..5326154 100644 --- a/crates/pinakes-core/src/opener.rs +++ b/crates/pinakes-core/src/opener.rs @@ -67,6 +67,7 @@ impl Opener for WindowsOpener { } /// Returns the platform-appropriate opener. +#[must_use] pub fn default_opener() -> Box { if cfg!(target_os = "macos") { Box::new(MacOpener) diff --git a/crates/pinakes-core/src/path_validation.rs b/crates/pinakes-core/src/path_validation.rs index 3a89b3b..784eb14 100644 --- a/crates/pinakes-core/src/path_validation.rs +++ b/crates/pinakes-core/src/path_validation.rs @@ -33,6 +33,14 @@ use crate::error::{PinakesError, Result}; /// The canonicalized path if valid, or a `PathNotAllowed` error if the path /// is outside all allowed roots. /// +/// # Errors +/// +/// Returns a `PathNotAllowed` error if: +/// - No allowed roots are configured +/// - The path does not exist +/// - The path cannot be canonicalized +/// - The path is outside all allowed roots +/// /// # Example /// /// ```no_run @@ -106,6 +114,11 @@ pub fn validate_path( /// /// This is a convenience wrapper for `validate_path` when you only have one /// root. +/// +/// # Errors +/// +/// Returns a `PathNotAllowed` error if the path is outside the root directory +/// or cannot be canonicalized. pub fn validate_path_single_root(path: &Path, root: &Path) -> Result { validate_path(path, &[root.to_path_buf()]) } @@ -125,6 +138,7 @@ pub fn validate_path_single_root(path: &Path, root: &Path) -> Result { /// /// `true` if the path appears safe (no obvious traversal sequences), /// `false` if it contains suspicious patterns. +#[must_use] pub fn path_looks_safe(path: &str) -> bool { // Reject paths with obvious traversal patterns !path.contains("..") @@ -148,6 +162,7 @@ pub fn path_looks_safe(path: &str) -> bool { /// # Returns /// /// A sanitized filename safe for use on most filesystems. +#[must_use] pub fn sanitize_filename(filename: &str) -> String { let sanitized: String = filename .chars() @@ -186,6 +201,14 @@ pub fn sanitize_filename(filename: &str) -> String { /// /// The joined path if safe, or an error if the relative path would escape the /// base. +/// +/// # Errors +/// +/// Returns a `PathNotAllowed` error if: +/// - The relative path is absolute +/// - The relative path contains `..` +/// - The base path cannot be canonicalized +/// - A path traversal is detected pub fn safe_join(base: &Path, relative: &str) -> Result { // Reject absolute paths in the relative component if relative.starts_with('/') || relative.starts_with('\\') { @@ -215,7 +238,7 @@ pub fn safe_join(base: &Path, relative: &str) -> Result { // The joined path might not exist yet, so we can't canonicalize it directly. // Instead, we check each component - let mut current = canonical_base.clone(); + let mut current = canonical_base; for component in Path::new(relative).components() { use std::path::Component; match component { @@ -227,7 +250,7 @@ pub fn safe_join(base: &Path, relative: &str) -> Result { "path traversal detected".to_string(), )); }, - Component::CurDir => continue, + Component::CurDir => {}, _ => { return Err(PinakesError::PathNotAllowed( "invalid path component".to_string(), diff --git a/crates/pinakes-core/src/plugin/loader.rs b/crates/pinakes-core/src/plugin/loader.rs index 3aa509e..f8242e8 100644 --- a/crates/pinakes-core/src/plugin/loader.rs +++ b/crates/pinakes-core/src/plugin/loader.rs @@ -15,12 +15,17 @@ pub struct PluginLoader { impl PluginLoader { /// Create a new plugin loader - pub fn new(plugin_dirs: Vec) -> Self { + #[must_use] + pub const fn new(plugin_dirs: Vec) -> Self { Self { plugin_dirs } } /// Discover all plugins in configured directories - pub async fn discover_plugins(&self) -> Result> { + /// + /// # Errors + /// + /// Returns an error if a plugin directory cannot be searched. + pub fn discover_plugins(&self) -> Result> { let mut manifests = Vec::new(); for dir in &self.plugin_dirs { @@ -31,25 +36,16 @@ impl PluginLoader { info!("Discovering plugins in: {:?}", dir); - match self.discover_in_directory(dir).await { - Ok(found) => { - info!("Found {} plugins in {:?}", found.len(), dir); - manifests.extend(found); - }, - Err(e) => { - warn!("Error discovering plugins in {:?}: {}", dir, e); - }, - } + let found = Self::discover_in_directory(dir); + info!("Found {} plugins in {:?}", found.len(), dir); + manifests.extend(found); } Ok(manifests) } /// Discover plugins in a specific directory - async fn discover_in_directory( - &self, - dir: &Path, - ) -> Result> { + fn discover_in_directory(dir: &Path) -> Vec { let mut manifests = Vec::new(); // Walk the directory looking for plugin.toml files @@ -83,10 +79,15 @@ impl PluginLoader { } } - Ok(manifests) + manifests } /// Resolve the WASM binary path from a manifest + /// + /// # Errors + /// + /// Returns an error if the WASM binary is not found or its path escapes the + /// plugin directory. pub fn resolve_wasm_path( &self, manifest: &PluginManifest, @@ -114,14 +115,14 @@ impl PluginLoader { // traversal) let canonical_wasm = wasm_path .canonicalize() - .map_err(|e| anyhow!("Failed to canonicalize WASM path: {}", e))?; + .map_err(|e| anyhow!("Failed to canonicalize WASM path: {e}"))?; let canonical_plugin_dir = plugin_dir .canonicalize() - .map_err(|e| anyhow!("Failed to canonicalize plugin dir: {}", e))?; + .map_err(|e| anyhow!("Failed to canonicalize plugin dir: {e}"))?; if !canonical_wasm.starts_with(&canonical_plugin_dir) { return Err(anyhow!( - "WASM binary path escapes plugin directory: {:?}", - wasm_path + "WASM binary path escapes plugin directory: {}", + wasm_path.display() )); } return Ok(canonical_wasm); @@ -135,12 +136,19 @@ impl PluginLoader { } /// Download a plugin from a URL + /// + /// # Errors + /// + /// Returns an error if the URL is not HTTPS, no plugin directories are + /// configured, the download fails, the archive is too large, or extraction + /// fails. pub async fn download_plugin(&self, url: &str) -> Result { + const MAX_PLUGIN_SIZE: u64 = 100 * 1024 * 1024; // 100 MB + // Only allow HTTPS downloads if !url.starts_with("https://") { return Err(anyhow!( - "Only HTTPS URLs are allowed for plugin downloads: {}", - url + "Only HTTPS URLs are allowed for plugin downloads: {url}" )); } @@ -153,15 +161,15 @@ impl PluginLoader { // Download the archive with timeout and size limits let client = reqwest::Client::builder() - .timeout(std::time::Duration::from_secs(300)) + .timeout(std::time::Duration::from_mins(5)) .build() - .map_err(|e| anyhow!("Failed to build HTTP client: {}", e))?; + .map_err(|e| anyhow!("Failed to build HTTP client: {e}"))?; let response = client .get(url) .send() .await - .map_err(|e| anyhow!("Failed to download plugin: {}", e))?; + .map_err(|e| anyhow!("Failed to download plugin: {e}"))?; if !response.status().is_success() { return Err(anyhow!( @@ -171,21 +179,19 @@ impl PluginLoader { } // Check content-length header before downloading - const MAX_PLUGIN_SIZE: u64 = 100 * 1024 * 1024; // 100 MB if let Some(content_length) = response.content_length() && content_length > MAX_PLUGIN_SIZE { return Err(anyhow!( - "Plugin archive too large: {} bytes (max {} bytes)", - content_length, - MAX_PLUGIN_SIZE + "Plugin archive too large: {content_length} bytes (max \ + {MAX_PLUGIN_SIZE} bytes)" )); } let bytes = response .bytes() .await - .map_err(|e| anyhow!("Failed to read plugin response: {}", e))?; + .map_err(|e| anyhow!("Failed to read plugin response: {e}"))?; // Check actual size after download if bytes.len() as u64 > MAX_PLUGIN_SIZE { @@ -204,7 +210,7 @@ impl PluginLoader { // Extract using tar with -C to target directory let canonical_dest = dest_dir .canonicalize() - .map_err(|e| anyhow!("Failed to canonicalize dest dir: {}", e))?; + .map_err(|e| anyhow!("Failed to canonicalize dest dir: {e}"))?; let output = std::process::Command::new("tar") .args([ "xzf", @@ -213,7 +219,7 @@ impl PluginLoader { &canonical_dest.to_string_lossy(), ]) .output() - .map_err(|e| anyhow!("Failed to extract plugin archive: {}", e))?; + .map_err(|e| anyhow!("Failed to extract plugin archive: {e}"))?; // Clean up the archive let _ = std::fs::remove_file(&temp_archive); @@ -231,8 +237,8 @@ impl PluginLoader { let entry_canonical = entry.path().canonicalize()?; if !entry_canonical.starts_with(&canonical_dest) { return Err(anyhow!( - "Extracted file escapes destination directory: {:?}", - entry.path() + "Extracted file escapes destination directory: {}", + entry.path().display() )); } } @@ -255,22 +261,26 @@ impl PluginLoader { } Err(anyhow!( - "No plugin.toml found after extracting archive from: {}", - url + "No plugin.toml found after extracting archive from: {url}" )) } /// Validate a plugin package + /// + /// # Errors + /// + /// Returns an error if the path does not exist, is missing `plugin.toml`, + /// the WASM binary is not found, or the WASM file is invalid. pub fn validate_plugin_package(&self, path: &Path) -> Result<()> { // Check that the path exists if !path.exists() { - return Err(anyhow!("Plugin path does not exist: {:?}", path)); + return Err(anyhow!("Plugin path does not exist: {}", path.display())); } // Check for plugin.toml let manifest_path = path.join("plugin.toml"); if !manifest_path.exists() { - return Err(anyhow!("Missing plugin.toml in {:?}", path)); + return Err(anyhow!("Missing plugin.toml in {}", path.display())); } // Parse and validate manifest @@ -291,21 +301,22 @@ impl PluginLoader { let canonical_path = path.canonicalize()?; if !canonical_wasm.starts_with(&canonical_path) { return Err(anyhow!( - "WASM binary path escapes plugin directory: {:?}", - wasm_path + "WASM binary path escapes plugin directory: {}", + wasm_path.display() )); } // Validate WASM file let wasm_bytes = std::fs::read(&wasm_path)?; if wasm_bytes.len() < 4 || &wasm_bytes[0..4] != b"\0asm" { - return Err(anyhow!("Invalid WASM file: {:?}", wasm_path)); + return Err(anyhow!("Invalid WASM file: {}", wasm_path.display())); } Ok(()) } /// Get plugin directory path for a given plugin name + #[must_use] pub fn get_plugin_dir(&self, plugin_name: &str) -> Option { for dir in &self.plugin_dirs { let plugin_dir = dir.join(plugin_name); @@ -323,17 +334,17 @@ mod tests { use super::*; - #[tokio::test] - async fn test_discover_plugins_empty() { + #[test] + fn test_discover_plugins_empty() { let temp_dir = TempDir::new().unwrap(); let loader = PluginLoader::new(vec![temp_dir.path().to_path_buf()]); - let manifests = loader.discover_plugins().await.unwrap(); + let manifests = loader.discover_plugins().unwrap(); assert_eq!(manifests.len(), 0); } - #[tokio::test] - async fn test_discover_plugins_with_manifest() { + #[test] + fn test_discover_plugins_with_manifest() { let temp_dir = TempDir::new().unwrap(); let plugin_dir = temp_dir.path().join("test-plugin"); std::fs::create_dir(&plugin_dir).unwrap(); @@ -356,7 +367,7 @@ wasm = "plugin.wasm" .unwrap(); let loader = PluginLoader::new(vec![temp_dir.path().to_path_buf()]); - let manifests = loader.discover_plugins().await.unwrap(); + let manifests = loader.discover_plugins().unwrap(); assert_eq!(manifests.len(), 1); assert_eq!(manifests[0].plugin.name, "test-plugin"); diff --git a/crates/pinakes-core/src/plugin/mod.rs b/crates/pinakes-core/src/plugin/mod.rs index 0afd7b7..74a7ed2 100644 --- a/crates/pinakes-core/src/plugin/mod.rs +++ b/crates/pinakes-core/src/plugin/mod.rs @@ -97,6 +97,11 @@ impl From for PluginManagerConfig { impl PluginManager { /// Create a new plugin manager + /// + /// # Errors + /// + /// Returns an error if the data or cache directories cannot be created, or + /// if the WASM runtime cannot be initialized. pub fn new( data_dir: PathBuf, cache_dir: PathBuf, @@ -123,10 +128,14 @@ impl PluginManager { } /// Discover and load all plugins from configured directories + /// + /// # Errors + /// + /// Returns an error if plugin discovery fails. pub async fn discover_and_load_all(&self) -> Result> { info!("Discovering plugins from {:?}", self.config.plugin_dirs); - let manifests = self.loader.discover_plugins().await?; + let manifests = self.loader.discover_plugins()?; let mut loaded_plugins = Vec::new(); for manifest in manifests { @@ -145,6 +154,12 @@ impl PluginManager { } /// Load a plugin from a manifest file + /// + /// # Errors + /// + /// Returns an error if the plugin ID is invalid, capability validation + /// fails, the WASM binary cannot be loaded, or the plugin cannot be + /// registered. async fn load_plugin_from_manifest( &self, manifest: &pinakes_plugin_api::PluginManifest, @@ -156,7 +171,7 @@ impl PluginManager { || plugin_id.contains('\\') || plugin_id.contains("..") { - return Err(anyhow::anyhow!("Invalid plugin ID: {}", plugin_id)); + return Err(anyhow::anyhow!("Invalid plugin ID: {plugin_id}")); } // Check if already loaded @@ -202,7 +217,7 @@ impl PluginManager { // Load WASM binary let wasm_path = self.loader.resolve_wasm_path(manifest)?; - let wasm_plugin = self.runtime.load_plugin(&wasm_path, context).await?; + let wasm_plugin = self.runtime.load_plugin(&wasm_path, context)?; // Initialize plugin let init_succeeded = match wasm_plugin @@ -246,13 +261,20 @@ impl PluginManager { enabled: init_succeeded, }; - let mut registry = self.registry.write().await; - registry.register(registered)?; + { + let mut registry = self.registry.write().await; + registry.register(registered)?; + } Ok(plugin_id) } /// Install a plugin from a file or URL + /// + /// # Errors + /// + /// Returns an error if the plugin cannot be downloaded, the manifest cannot + /// be read, or the plugin cannot be loaded. pub async fn install_plugin(&self, source: &str) -> Result { info!("Installing plugin from: {}", source); @@ -276,13 +298,18 @@ impl PluginManager { } /// Uninstall a plugin + /// + /// # Errors + /// + /// Returns an error if the plugin ID is invalid, the plugin cannot be shut + /// down, cannot be unregistered, or its data directories cannot be removed. pub async fn uninstall_plugin(&self, plugin_id: &str) -> Result<()> { // Validate plugin_id to prevent path traversal if plugin_id.contains('/') || plugin_id.contains('\\') || plugin_id.contains("..") { - return Err(anyhow::anyhow!("Invalid plugin ID: {}", plugin_id)); + return Err(anyhow::anyhow!("Invalid plugin ID: {plugin_id}")); } info!("Uninstalling plugin: {}", plugin_id); @@ -291,8 +318,10 @@ impl PluginManager { self.shutdown_plugin(plugin_id).await?; // Remove from registry - let mut registry = self.registry.write().await; - registry.unregister(plugin_id)?; + { + let mut registry = self.registry.write().await; + registry.unregister(plugin_id)?; + } // Remove plugin data and cache let plugin_data_dir = self.data_dir.join(plugin_id); @@ -309,37 +338,55 @@ impl PluginManager { } /// Enable a plugin + /// + /// # Errors + /// + /// Returns an error if the plugin ID is not found in the registry. pub async fn enable_plugin(&self, plugin_id: &str) -> Result<()> { let mut registry = self.registry.write().await; registry.enable(plugin_id) } /// Disable a plugin + /// + /// # Errors + /// + /// Returns an error if the plugin ID is not found in the registry. pub async fn disable_plugin(&self, plugin_id: &str) -> Result<()> { let mut registry = self.registry.write().await; registry.disable(plugin_id) } /// Shutdown a specific plugin + /// + /// # Errors + /// + /// Returns an error if the plugin ID is not found in the registry. pub async fn shutdown_plugin(&self, plugin_id: &str) -> Result<()> { debug!("Shutting down plugin: {}", plugin_id); let registry = self.registry.read().await; if let Some(plugin) = registry.get(plugin_id) { - plugin.wasm_plugin.call_function("shutdown", &[]).await.ok(); + let _ = plugin.wasm_plugin.call_function("shutdown", &[]).await; Ok(()) } else { - Err(anyhow::anyhow!("Plugin not found: {}", plugin_id)) + Err(anyhow::anyhow!("Plugin not found: {plugin_id}")) } } /// Shutdown all plugins + /// + /// # Errors + /// + /// This function always returns `Ok(())`. Individual plugin shutdown errors + /// are logged but do not cause the overall operation to fail. pub async fn shutdown_all(&self) -> Result<()> { info!("Shutting down all plugins"); - let registry = self.registry.read().await; - let plugin_ids: Vec = - registry.list_all().iter().map(|p| p.id.clone()).collect(); + let plugin_ids: Vec = { + let registry = self.registry.read().await; + registry.list_all().iter().map(|p| p.id.clone()).collect() + }; for plugin_id in plugin_ids { if let Err(e) = self.shutdown_plugin(&plugin_id).await { @@ -373,6 +420,11 @@ impl PluginManager { } /// Reload a plugin (for hot-reload during development) + /// + /// # Errors + /// + /// Returns an error if hot-reload is disabled, the plugin is not found, it + /// cannot be shut down, or the reloaded plugin cannot be registered. pub async fn reload_plugin(&self, plugin_id: &str) -> Result<()> { if !self.config.enable_hot_reload { return Err(anyhow::anyhow!("Hot-reload is disabled")); @@ -387,15 +439,21 @@ impl PluginManager { let plugin = registry .get(plugin_id) .ok_or_else(|| anyhow::anyhow!("Plugin not found"))?; - if let Some(ref manifest_path) = plugin.manifest_path { - pinakes_plugin_api::PluginManifest::from_file(manifest_path) - .unwrap_or_else(|e| { - warn!("Failed to re-read manifest from disk, using cached: {}", e); - plugin.manifest.clone() - }) - } else { - plugin.manifest.clone() - } + let manifest = plugin.manifest_path.as_ref().map_or_else( + || plugin.manifest.clone(), + |manifest_path| { + pinakes_plugin_api::PluginManifest::from_file(manifest_path) + .unwrap_or_else(|e| { + warn!( + "Failed to re-read manifest from disk, using cached: {}", + e + ); + plugin.manifest.clone() + }) + }, + ); + drop(registry); + manifest }; // Shutdown and unload current version diff --git a/crates/pinakes-core/src/plugin/registry.rs b/crates/pinakes-core/src/plugin/registry.rs index dd920d7..1439280 100644 --- a/crates/pinakes-core/src/plugin/registry.rs +++ b/crates/pinakes-core/src/plugin/registry.rs @@ -26,6 +26,7 @@ pub struct PluginRegistry { impl PluginRegistry { /// Create a new empty registry + #[must_use] pub fn new() -> Self { Self { plugins: HashMap::new(), @@ -33,6 +34,10 @@ impl PluginRegistry { } /// Register a new plugin + /// + /// # Errors + /// + /// Returns an error if a plugin with the same ID is already registered. pub fn register(&mut self, plugin: RegisteredPlugin) -> Result<()> { if self.plugins.contains_key(&plugin.id) { return Err(anyhow!("Plugin already registered: {}", plugin.id)); @@ -43,15 +48,20 @@ impl PluginRegistry { } /// Unregister a plugin by ID + /// + /// # Errors + /// + /// Returns an error if the plugin ID is not found. pub fn unregister(&mut self, plugin_id: &str) -> Result<()> { self .plugins .remove(plugin_id) - .ok_or_else(|| anyhow!("Plugin not found: {}", plugin_id))?; + .ok_or_else(|| anyhow!("Plugin not found: {plugin_id}"))?; Ok(()) } /// Get a plugin by ID + #[must_use] pub fn get(&self, plugin_id: &str) -> Option<&RegisteredPlugin> { self.plugins.get(plugin_id) } @@ -62,48 +72,61 @@ impl PluginRegistry { } /// Check if a plugin is loaded + #[must_use] pub fn is_loaded(&self, plugin_id: &str) -> bool { self.plugins.contains_key(plugin_id) } /// Check if a plugin is enabled. Returns `None` if the plugin is not found. + #[must_use] pub fn is_enabled(&self, plugin_id: &str) -> Option { self.plugins.get(plugin_id).map(|p| p.enabled) } /// Enable a plugin + /// + /// # Errors + /// + /// Returns an error if the plugin ID is not found. pub fn enable(&mut self, plugin_id: &str) -> Result<()> { let plugin = self .plugins .get_mut(plugin_id) - .ok_or_else(|| anyhow!("Plugin not found: {}", plugin_id))?; + .ok_or_else(|| anyhow!("Plugin not found: {plugin_id}"))?; plugin.enabled = true; Ok(()) } /// Disable a plugin + /// + /// # Errors + /// + /// Returns an error if the plugin ID is not found. pub fn disable(&mut self, plugin_id: &str) -> Result<()> { let plugin = self .plugins .get_mut(plugin_id) - .ok_or_else(|| anyhow!("Plugin not found: {}", plugin_id))?; + .ok_or_else(|| anyhow!("Plugin not found: {plugin_id}"))?; plugin.enabled = false; Ok(()) } /// List all registered plugins + #[must_use] pub fn list_all(&self) -> Vec<&RegisteredPlugin> { self.plugins.values().collect() } /// List all enabled plugins + #[must_use] pub fn list_enabled(&self) -> Vec<&RegisteredPlugin> { self.plugins.values().filter(|p| p.enabled).collect() } - /// Get plugins by kind (e.g., "media_type", "metadata_extractor") + /// Get plugins by kind (e.g., "`media_type`", "`metadata_extractor`") + #[must_use] pub fn get_by_kind(&self, kind: &str) -> Vec<&RegisteredPlugin> { self .plugins @@ -113,11 +136,13 @@ impl PluginRegistry { } /// Get count of registered plugins + #[must_use] pub fn count(&self) -> usize { self.plugins.len() } /// Get count of enabled plugins + #[must_use] pub fn count_enabled(&self) -> usize { self.plugins.values().filter(|p| p.enabled).count() } @@ -182,7 +207,7 @@ mod tests { let plugin = create_test_plugin("test-plugin", vec!["media_type".to_string()]); - registry.register(plugin.clone()).unwrap(); + registry.register(plugin).unwrap(); assert!(registry.is_loaded("test-plugin")); assert!(registry.get("test-plugin").is_some()); diff --git a/crates/pinakes-core/src/plugin/runtime.rs b/crates/pinakes-core/src/plugin/runtime.rs index 2564cf7..c47f12e 100644 --- a/crates/pinakes-core/src/plugin/runtime.rs +++ b/crates/pinakes-core/src/plugin/runtime.rs @@ -4,7 +4,7 @@ use std::{path::Path, sync::Arc}; use anyhow::{Result, anyhow}; use pinakes_plugin_api::PluginContext; -use wasmtime::*; +use wasmtime::{Caller, Config, Engine, Linker, Module, Store, Val, anyhow}; /// WASM runtime wrapper for executing plugins pub struct WasmRuntime { @@ -13,6 +13,11 @@ pub struct WasmRuntime { impl WasmRuntime { /// Create a new WASM runtime + /// + /// # Errors + /// + /// Returns an error if the WASM engine cannot be created with the given + /// configuration. pub fn new() -> Result { let mut config = Config::new(); config.wasm_component_model(true); @@ -25,13 +30,18 @@ impl WasmRuntime { } /// Load a plugin from a WASM file - pub async fn load_plugin( + /// + /// # Errors + /// + /// Returns an error if the WASM file does not exist, cannot be read, or + /// cannot be compiled. + pub fn load_plugin( &self, wasm_path: &Path, context: PluginContext, ) -> Result { if !wasm_path.exists() { - return Err(anyhow!("WASM file not found: {:?}", wasm_path)); + return Err(anyhow!("WASM file not found: {}", wasm_path.display())); } let wasm_bytes = std::fs::read(wasm_path)?; @@ -59,7 +69,8 @@ pub struct WasmPlugin { impl WasmPlugin { /// Get the plugin context - pub fn context(&self) -> &PluginContext { + #[must_use] + pub const fn context(&self) -> &PluginContext { &self.context } @@ -67,6 +78,11 @@ impl WasmPlugin { /// /// Creates a fresh store and instance per invocation with host functions /// linked, calls the requested exported function, and returns the result. + /// + /// # Errors + /// + /// Returns an error if the function cannot be found, instantiation fails, + /// or the function call returns an error. pub async fn call_function( &self, function_name: &str, @@ -105,19 +121,23 @@ impl WasmPlugin { let offset = if let Ok(alloc) = instance.get_typed_func::(&mut store, "alloc") { - let result = alloc.call_async(&mut store, params.len() as i32).await?; + let result = alloc + .call_async( + &mut store, + i32::try_from(params.len()).unwrap_or(i32::MAX), + ) + .await?; if result < 0 { return Err(anyhow!( - "plugin alloc returned negative offset: {}", - result + "plugin alloc returned negative offset: {result}" )); } - result as usize + u32::try_from(result).unwrap_or(0) as usize } else { 0 }; - alloc_offset = offset as i32; + alloc_offset = i32::try_from(offset).unwrap_or(i32::MAX); let mem_data = mem.data_mut(&mut store); if offset + params.len() <= mem_data.len() { mem_data[offset..offset + params.len()].copy_from_slice(params); @@ -128,7 +148,7 @@ impl WasmPlugin { instance .get_func(&mut store, function_name) .ok_or_else(|| { - anyhow!("exported function '{}' not found", function_name) + anyhow!("exported function '{function_name}' not found") })?; let func_ty = func.ty(&store); @@ -143,7 +163,10 @@ impl WasmPlugin { func .call_async( &mut store, - &[Val::I32(alloc_offset), Val::I32(params.len() as i32)], + &[ + Val::I32(alloc_offset), + Val::I32(i32::try_from(params.len()).unwrap_or(i32::MAX)), + ], &mut results, ) .await?; @@ -152,7 +175,7 @@ impl WasmPlugin { } else { // Generic: fill with zeroes let params_vals: Vec = - (0..param_count).map(|_| Val::I32(0)).collect(); + std::iter::repeat_n(Val::I32(0), param_count).collect(); func .call_async(&mut store, ¶ms_vals, &mut results) .await?; @@ -177,7 +200,7 @@ impl WasmPlugin { impl Default for WasmPlugin { fn default() -> Self { let engine = Engine::default(); - let module = Module::new(&engine, br#"(module)"#).unwrap(); + let module = Module::new(&engine, br"(module)").unwrap(); Self { module: Arc::new(module), @@ -198,6 +221,10 @@ impl HostFunctions { /// Registers all host ABI functions (`host_log`, `host_read_file`, /// `host_write_file`, `host_http_request`, `host_get_config`, /// `host_get_buffer`) into the given linker. + /// + /// # Errors + /// + /// Returns an error if any host function cannot be registered in the linker. pub fn setup_linker(linker: &mut Linker) -> Result<()> { linker.func_wrap( "env", @@ -209,11 +236,13 @@ impl HostFunctions { if ptr < 0 || len < 0 { return; } - let memory = caller.get_export("memory").and_then(|e| e.into_memory()); + let memory = caller + .get_export("memory") + .and_then(wasmtime::Extern::into_memory); if let Some(mem) = memory { let data = mem.data(&caller); - let start = ptr as usize; - let end = start + len as usize; + let start = u32::try_from(ptr).unwrap_or(0) as usize; + let end = start + u32::try_from(len).unwrap_or(0) as usize; if end <= data.len() && let Ok(msg) = std::str::from_utf8(&data[start..end]) { @@ -238,12 +267,14 @@ impl HostFunctions { if path_ptr < 0 || path_len < 0 { return -1; } - let memory = caller.get_export("memory").and_then(|e| e.into_memory()); + let memory = caller + .get_export("memory") + .and_then(wasmtime::Extern::into_memory); let Some(mem) = memory else { return -1 }; let data = mem.data(&caller); - let start = path_ptr as usize; - let end = start + path_len as usize; + let start = u32::try_from(path_ptr).unwrap_or(0) as usize; + let end = start + u32::try_from(path_len).unwrap_or(0) as usize; if end > data.len() { return -1; } @@ -254,9 +285,8 @@ impl HostFunctions { }; // Canonicalize path before checking permissions to prevent traversal - let path = match std::path::Path::new(&path_str).canonicalize() { - Ok(p) => p, - Err(_) => return -1, + let Ok(path) = std::path::Path::new(&path_str).canonicalize() else { + return -1; }; // Check read permission against canonicalized path @@ -276,14 +306,11 @@ impl HostFunctions { return -2; } - match std::fs::read(&path) { - Ok(contents) => { - let len = contents.len() as i32; - caller.data_mut().exchange_buffer = contents; - len - }, - Err(_) => -1, - } + std::fs::read(&path).map_or(-1, |contents| { + let len = i32::try_from(contents.len()).unwrap_or(i32::MAX); + caller.data_mut().exchange_buffer = contents; + len + }) }, )?; @@ -299,14 +326,18 @@ impl HostFunctions { if path_ptr < 0 || path_len < 0 || data_ptr < 0 || data_len < 0 { return -1; } - let memory = caller.get_export("memory").and_then(|e| e.into_memory()); + let memory = caller + .get_export("memory") + .and_then(wasmtime::Extern::into_memory); let Some(mem) = memory else { return -1 }; let mem_data = mem.data(&caller); - let path_start = path_ptr as usize; - let path_end = path_start + path_len as usize; - let data_start = data_ptr as usize; - let data_end = data_start + data_len as usize; + let path_start = u32::try_from(path_ptr).unwrap_or(0) as usize; + let path_end = + path_start + u32::try_from(path_len).unwrap_or(0) as usize; + let data_start = u32::try_from(data_ptr).unwrap_or(0) as usize; + let data_end = + data_start + u32::try_from(data_len).unwrap_or(0) as usize; if path_end > mem_data.len() || data_end > mem_data.len() { return -1; @@ -369,12 +400,14 @@ impl HostFunctions { if url_ptr < 0 || url_len < 0 { return -1; } - let memory = caller.get_export("memory").and_then(|e| e.into_memory()); + let memory = caller + .get_export("memory") + .and_then(wasmtime::Extern::into_memory); let Some(mem) = memory else { return -1 }; let data = mem.data(&caller); - let start = url_ptr as usize; - let end = start + url_len as usize; + let start = u32::try_from(url_ptr).unwrap_or(0) as usize; + let end = start + u32::try_from(url_len).unwrap_or(0) as usize; if end > data.len() { return -1; } @@ -413,7 +446,7 @@ impl HostFunctions { match result { Ok(Ok(bytes)) => { - let len = bytes.len() as i32; + let len = i32::try_from(bytes.len()).unwrap_or(i32::MAX); caller.data_mut().exchange_buffer = bytes.to_vec(); len }, @@ -421,26 +454,19 @@ impl HostFunctions { Err(_) => { // block_in_place panicked (e.g. current-thread runtime); // fall back to blocking client with timeout - let client = match reqwest::blocking::Client::builder() + let Ok(client) = reqwest::blocking::Client::builder() .timeout(std::time::Duration::from_secs(30)) .build() - { - Ok(c) => c, - Err(_) => return -1, + else { + return -1; }; - match client.get(&url_str).send() { - Ok(resp) => { - match resp.bytes() { - Ok(bytes) => { - let len = bytes.len() as i32; - caller.data_mut().exchange_buffer = bytes.to_vec(); - len - }, - Err(_) => -1, - } - }, - Err(_) => -1, - } + client.get(&url_str).send().map_or(-1, |resp| { + resp.bytes().map_or(-1, |bytes| { + let len = i32::try_from(bytes.len()).unwrap_or(i32::MAX); + caller.data_mut().exchange_buffer = bytes.to_vec(); + len + }) + }) }, } }, @@ -456,12 +482,14 @@ impl HostFunctions { if key_ptr < 0 || key_len < 0 { return -1; } - let memory = caller.get_export("memory").and_then(|e| e.into_memory()); + let memory = caller + .get_export("memory") + .and_then(wasmtime::Extern::into_memory); let Some(mem) = memory else { return -1 }; let data = mem.data(&caller); - let start = key_ptr as usize; - let end = start + key_len as usize; + let start = u32::try_from(key_ptr).unwrap_or(0) as usize; + let end = start + u32::try_from(key_len).unwrap_or(0) as usize; if end > data.len() { return -1; } @@ -471,16 +499,17 @@ impl HostFunctions { Err(_) => return -1, }; - match caller.data().context.config.get(&key_str) { - Some(value) => { - let json = value.to_string(); - let bytes = json.into_bytes(); - let len = bytes.len() as i32; - caller.data_mut().exchange_buffer = bytes; - len - }, - None => -1, - } + let bytes = caller + .data() + .context + .config + .get(&key_str) + .map(|value| value.to_string().into_bytes()); + bytes.map_or(-1, |b| { + let len = i32::try_from(b.len()).unwrap_or(i32::MAX); + caller.data_mut().exchange_buffer = b; + len + }) }, )?; @@ -495,19 +524,22 @@ impl HostFunctions { return -1; } let buf = caller.data().exchange_buffer.clone(); - let copy_len = buf.len().min(dest_len as usize); + let copy_len = + buf.len().min(u32::try_from(dest_len).unwrap_or(0) as usize); - let memory = caller.get_export("memory").and_then(|e| e.into_memory()); + let memory = caller + .get_export("memory") + .and_then(wasmtime::Extern::into_memory); let Some(mem) = memory else { return -1 }; let mem_data = mem.data_mut(&mut caller); - let start = dest_ptr as usize; + let start = u32::try_from(dest_ptr).unwrap_or(0) as usize; if start + copy_len > mem_data.len() { return -1; } mem_data[start..start + copy_len].copy_from_slice(&buf[..copy_len]); - copy_len as i32 + i32::try_from(copy_len).unwrap_or(i32::MAX) }, )?; diff --git a/crates/pinakes-core/src/plugin/security.rs b/crates/pinakes-core/src/plugin/security.rs index 9948695..5f887fb 100644 --- a/crates/pinakes-core/src/plugin/security.rs +++ b/crates/pinakes-core/src/plugin/security.rs @@ -25,7 +25,8 @@ pub struct CapabilityEnforcer { impl CapabilityEnforcer { /// Create a new capability enforcer with default limits - pub fn new() -> Self { + #[must_use] + pub const fn new() -> Self { Self { max_memory_limit: 512 * 1024 * 1024, // 512 MB max_cpu_time_limit: 60 * 1000, // 60 seconds @@ -36,36 +37,47 @@ impl CapabilityEnforcer { } /// Set maximum memory limit - pub fn with_max_memory(mut self, bytes: usize) -> Self { + #[must_use] + pub const fn with_max_memory(mut self, bytes: usize) -> Self { self.max_memory_limit = bytes; self } /// Set maximum CPU time limit - pub fn with_max_cpu_time(mut self, milliseconds: u64) -> Self { + #[must_use] + pub const fn with_max_cpu_time(mut self, milliseconds: u64) -> Self { self.max_cpu_time_limit = milliseconds; self } /// Add allowed read path + #[must_use] pub fn allow_read_path(mut self, path: PathBuf) -> Self { self.allowed_read_paths.push(path); self } /// Add allowed write path + #[must_use] pub fn allow_write_path(mut self, path: PathBuf) -> Self { self.allowed_write_paths.push(path); self } /// Set default network access policy - pub fn with_network_default(mut self, allow: bool) -> Self { + #[must_use] + pub const fn with_network_default(mut self, allow: bool) -> Self { self.allow_network_default = allow; self } /// Validate capabilities requested by a plugin + /// + /// # Errors + /// + /// Returns an error if the plugin requests capabilities that exceed the + /// configured system limits, such as memory, CPU time, filesystem paths, or + /// network access. pub fn validate_capabilities( &self, capabilities: &Capabilities, @@ -115,8 +127,8 @@ impl CapabilityEnforcer { for path in &capabilities.filesystem.read { if !self.is_read_allowed(path) { return Err(anyhow!( - "Plugin requests read access to {:?} which is not in allowed paths", - path + "Plugin requests read access to {} which is not in allowed paths", + path.display() )); } } @@ -125,8 +137,8 @@ impl CapabilityEnforcer { for path in &capabilities.filesystem.write { if !self.is_write_allowed(path) { return Err(anyhow!( - "Plugin requests write access to {:?} which is not in allowed paths", - path + "Plugin requests write access to {} which is not in allowed paths", + path.display() )); } } @@ -135,6 +147,7 @@ impl CapabilityEnforcer { } /// Check if a path is allowed for reading + #[must_use] pub fn is_read_allowed(&self, path: &Path) -> bool { if self.allowed_read_paths.is_empty() { return false; // deny-all when unconfigured @@ -150,6 +163,7 @@ impl CapabilityEnforcer { } /// Check if a path is allowed for writing + #[must_use] pub fn is_write_allowed(&self, path: &Path) -> bool { if self.allowed_write_paths.is_empty() { return false; // deny-all when unconfigured @@ -173,11 +187,13 @@ impl CapabilityEnforcer { } /// Check if network access is allowed for a plugin - pub fn is_network_allowed(&self, capabilities: &Capabilities) -> bool { + #[must_use] + pub const fn is_network_allowed(&self, capabilities: &Capabilities) -> bool { capabilities.network.enabled && self.allow_network_default } /// Check if a specific domain is allowed + #[must_use] pub fn is_domain_allowed( &self, capabilities: &Capabilities, @@ -197,11 +213,13 @@ impl CapabilityEnforcer { .network .allowed_domains .as_ref() - .map(|domains| domains.iter().any(|d| d.eq_ignore_ascii_case(domain))) - .unwrap_or(false) + .is_some_and(|domains| { + domains.iter().any(|d| d.eq_ignore_ascii_case(domain)) + }) } /// Get effective memory limit for a plugin + #[must_use] pub fn get_memory_limit(&self, capabilities: &Capabilities) -> usize { capabilities .max_memory_bytes @@ -210,6 +228,7 @@ impl CapabilityEnforcer { } /// Get effective CPU time limit for a plugin + #[must_use] pub fn get_cpu_time_limit(&self, capabilities: &Capabilities) -> u64 { capabilities .max_cpu_time_ms @@ -264,8 +283,7 @@ mod tests { let test_file = allowed_dir.join("test.txt"); std::fs::write(&test_file, "test").unwrap(); - let enforcer = - CapabilityEnforcer::new().allow_read_path(allowed_dir.clone()); + let enforcer = CapabilityEnforcer::new().allow_read_path(allowed_dir); assert!(enforcer.is_read_allowed(&test_file)); assert!(!enforcer.is_read_allowed(Path::new("/etc/passwd"))); diff --git a/crates/pinakes-core/src/scan.rs b/crates/pinakes-core/src/scan.rs index 5d2dc00..8fe459f 100644 --- a/crates/pinakes-core/src/scan.rs +++ b/crates/pinakes-core/src/scan.rs @@ -46,6 +46,7 @@ pub struct ScanProgress { const MAX_STORED_ERRORS: usize = 100; impl ScanProgress { + #[must_use] pub fn new() -> Self { Self { is_scanning: Arc::new(AtomicBool::new(false)), @@ -56,6 +57,7 @@ impl ScanProgress { } } + #[must_use] pub fn snapshot(&self) -> ScanStatus { let errors = self .error_messages @@ -112,6 +114,10 @@ impl Default for ScanProgress { /// # Returns /// /// Scan status with counts and any errors +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the scan fails. pub async fn scan_directory( storage: &DynStorageBackend, dir: &Path, @@ -140,6 +146,10 @@ pub async fn scan_directory( /// # Returns /// /// Scan status with counts and any errors +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the scan fails. pub async fn scan_directory_incremental( storage: &DynStorageBackend, dir: &Path, @@ -165,6 +175,10 @@ pub async fn scan_directory_incremental( /// # Returns /// /// Scan status with counts and any errors +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the scan fails. pub async fn scan_directory_with_progress( storage: &DynStorageBackend, dir: &Path, @@ -182,7 +196,11 @@ pub async fn scan_directory_with_progress( } /// Scan a directory with full options including progress tracking and -/// incremental mode +/// incremental mode. +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the scan fails. pub async fn scan_directory_with_options( storage: &DynStorageBackend, dir: &Path, @@ -276,6 +294,10 @@ pub async fn scan_directory_with_options( /// # Returns /// /// Status for each root directory +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if listing roots or scanning fails. pub async fn scan_all_roots( storage: &DynStorageBackend, ignore_patterns: &[String], @@ -299,6 +321,10 @@ pub async fn scan_all_roots( /// # Returns /// /// Status for each root directory +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if listing roots or scanning fails. pub async fn scan_all_roots_incremental( storage: &DynStorageBackend, ignore_patterns: &[String], @@ -321,6 +347,10 @@ pub async fn scan_all_roots_incremental( /// # Returns /// /// Status for each root directory +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if listing roots or scanning fails. pub async fn scan_all_roots_with_progress( storage: &DynStorageBackend, ignore_patterns: &[String], @@ -347,6 +377,10 @@ pub async fn scan_all_roots_with_progress( /// # Returns /// /// Status for each root directory +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if listing roots or scanning fails. pub async fn scan_all_roots_with_options( storage: &DynStorageBackend, ignore_patterns: &[String], @@ -391,6 +425,11 @@ pub struct FileWatcher { impl FileWatcher { /// Creates a new file watcher for the given directories. + /// + /// # Errors + /// + /// Returns [`crate::error::PinakesError`] if no filesystem watcher could be + /// created. pub fn new(dirs: &[PathBuf]) -> Result { let (tx, rx) = mpsc::channel(1024); @@ -419,7 +458,7 @@ impl FileWatcher { dirs: &[PathBuf], tx: mpsc::Sender, ) -> std::result::Result, notify::Error> { - let tx_clone = tx.clone(); + let tx_clone = tx; let mut watcher = notify::recommended_watcher( move |res: notify::Result| { if let Ok(event) = res { @@ -444,7 +483,7 @@ impl FileWatcher { dirs: &[PathBuf], tx: mpsc::Sender, ) -> Result> { - let tx_clone = tx.clone(); + let tx_clone = tx; let poll_interval = std::time::Duration::from_secs(5); let config = notify::Config::default().with_poll_interval(poll_interval); @@ -479,6 +518,10 @@ impl FileWatcher { } /// Watches directories and imports files on change. +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the watcher cannot be started. pub async fn watch_and_import( storage: DynStorageBackend, dirs: Vec, diff --git a/crates/pinakes-core/src/scheduler.rs b/crates/pinakes-core/src/scheduler.rs index afbc4ee..fa66f37 100644 --- a/crates/pinakes-core/src/scheduler.rs +++ b/crates/pinakes-core/src/scheduler.rs @@ -29,12 +29,14 @@ pub enum Schedule { } impl Schedule { + #[must_use] pub fn next_run(&self, from: DateTime) -> DateTime { match self { - Schedule::Interval { secs } => { - from + chrono::Duration::seconds(*secs as i64) + Self::Interval { secs } => { + from + + chrono::Duration::seconds(i64::try_from(*secs).unwrap_or(i64::MAX)) }, - Schedule::Daily { hour, minute } => { + Self::Daily { hour, minute } => { let today = from .date_naive() .and_hms_opt(*hour, *minute, 0) @@ -46,26 +48,26 @@ impl Schedule { today_utc + chrono::Duration::days(1) } }, - Schedule::Weekly { day, hour, minute } => { + Self::Weekly { day, hour, minute } => { let current_day = from.weekday().num_days_from_monday(); let target_day = *day; - let days_ahead = if target_day > current_day { - target_day - current_day - } else if target_day < current_day { - 7 - (current_day - target_day) - } else { - let today = from - .date_naive() - .and_hms_opt(*hour, *minute, 0) - .unwrap_or_default() - .and_utc(); - if today > from { - return today; - } - 7 + let days_ahead = match target_day.cmp(¤t_day) { + std::cmp::Ordering::Greater => target_day - current_day, + std::cmp::Ordering::Less => 7 - (current_day - target_day), + std::cmp::Ordering::Equal => { + let today = from + .date_naive() + .and_hms_opt(*hour, *minute, 0) + .unwrap_or_default() + .and_utc(); + if today > from { + return today; + } + 7 + }, }; let target_date = - from.date_naive() + chrono::Duration::days(days_ahead as i64); + from.date_naive() + chrono::Duration::days(i64::from(days_ahead)); target_date .and_hms_opt(*hour, *minute, 0) .unwrap_or_default() @@ -74,21 +76,22 @@ impl Schedule { } } + #[must_use] pub fn display_string(&self) -> String { match self { - Schedule::Interval { secs } => { + Self::Interval { secs } => { if *secs >= 3600 { format!("Every {}h", secs / 3600) } else if *secs >= 60 { format!("Every {}m", secs / 60) } else { - format!("Every {}s", secs) + format!("Every {secs}s") } }, - Schedule::Daily { hour, minute } => { + Self::Daily { hour, minute } => { format!("Daily {hour:02}:{minute:02}") }, - Schedule::Weekly { day, hour, minute } => { + Self::Weekly { day, hour, minute } => { let day_name = match day { 0 => "Mon", 1 => "Tue", @@ -312,13 +315,17 @@ impl TaskScheduler { /// Run a task immediately. Uses a single write lock to avoid TOCTOU races. pub async fn run_now(&self, id: &str) -> Option { - let result = { + let kind = { + let tasks = self.tasks.read().await; + tasks.iter().find(|t| t.id == id)?.kind.clone() + }; + + // Submit the job (cheap: sends to mpsc channel) + let job_id = self.job_queue.submit(kind).await; + + { let mut tasks = self.tasks.write().await; let task = tasks.iter_mut().find(|t| t.id == id)?; - - // Submit the job (cheap: sends to mpsc channel) - let job_id = self.job_queue.submit(task.kind.clone()).await; - task.last_run = Some(Utc::now()); task.last_status = Some("running".to_string()); task.running = true; @@ -326,13 +333,11 @@ impl TaskScheduler { if task.enabled { task.next_run = Some(task.schedule.next_run(Utc::now())); } - - Some(job_id.to_string()) - }; - if result.is_some() { - self.persist_task_state().await; + drop(tasks); } - result + + self.persist_task_state().await; + Some(job_id.to_string()) } /// Main scheduler loop. Uses a two-phase approach per tick to avoid @@ -344,7 +349,7 @@ impl TaskScheduler { loop { tokio::select! { _ = interval.tick() => {} - _ = self.cancel.cancelled() => { + () = self.cancel.cancelled() => { tracing::info!("scheduler shutting down"); return; } @@ -514,7 +519,7 @@ mod tests { let deserialized: ScheduledTask = serde_json::from_str(&json).unwrap(); assert_eq!(deserialized.id, "test"); - assert_eq!(deserialized.enabled, true); + assert!(deserialized.enabled); // running defaults to false on deserialization (skip_serializing) assert!(!deserialized.running); // last_job_id is skipped entirely @@ -603,7 +608,8 @@ mod tests { async fn test_default_tasks_contain_trash_purge() { let cancel = CancellationToken::new(); let config = Arc::new(RwLock::new(Config::default())); - let job_queue = JobQueue::new(1, |_, _, _, _| tokio::spawn(async move {})); + let job_queue = + JobQueue::new(1, 0, |_, _, _, _| tokio::spawn(async move {})); let scheduler = TaskScheduler::new(job_queue, cancel, config, None); let tasks = scheduler.list_tasks().await; @@ -644,7 +650,7 @@ mod tests { let deserialized: ScheduledTask = serde_json::from_str(&json).unwrap(); assert_eq!(deserialized.id, "trash_purge"); - assert_eq!(deserialized.enabled, true); + assert!(deserialized.enabled); assert!(!deserialized.running); assert!(deserialized.last_job_id.is_none()); } @@ -699,7 +705,7 @@ mod tests { let json = serde_json::to_string(&task).unwrap(); let deserialized: ScheduledTask = serde_json::from_str(&json).unwrap(); - assert_eq!(deserialized.running, false); + assert!(!deserialized.running); assert!(deserialized.last_job_id.is_none()); } diff --git a/crates/pinakes-core/src/search.rs b/crates/pinakes-core/src/search.rs index b146d97..1dd10c9 100644 --- a/crates/pinakes-core/src/search.rs +++ b/crates/pinakes-core/src/search.rs @@ -14,9 +14,9 @@ pub enum SearchQuery { field: String, value: String, }, - And(Vec), - Or(Vec), - Not(Box), + And(Vec), + Or(Vec), + Not(Box), Prefix(String), Fuzzy(String), TypeFilter(String), @@ -149,18 +149,13 @@ fn parse_date_value(s: &str) -> Option { /// Returns `None` if the input is invalid or if the value would overflow. fn parse_size_value(s: &str) -> Option { let s = s.to_uppercase(); - let (num_str, multiplier): (&str, i64) = if let Some(n) = s.strip_suffix("GB") - { - (n, 1024 * 1024 * 1024) - } else if let Some(n) = s.strip_suffix("MB") { - (n, 1024 * 1024) - } else if let Some(n) = s.strip_suffix("KB") { - (n, 1024) - } else if let Some(n) = s.strip_suffix('B') { - (n, 1) - } else { - (s.as_str(), 1) - }; + let (num_str, multiplier): (&str, i64) = s + .strip_suffix("GB") + .map(|n| (n, 1024 * 1024 * 1024_i64)) + .or_else(|| s.strip_suffix("MB").map(|n| (n, 1024 * 1024))) + .or_else(|| s.strip_suffix("KB").map(|n| (n, 1024))) + .or_else(|| s.strip_suffix('B').map(|n| (n, 1))) + .unwrap_or((s.as_str(), 1)); let num: i64 = num_str.parse().ok()?; num.checked_mul(multiplier) diff --git a/crates/pinakes-core/src/sync/chunked.rs b/crates/pinakes-core/src/sync/chunked.rs index 0e92730..e3e29a2 100644 --- a/crates/pinakes-core/src/sync/chunked.rs +++ b/crates/pinakes-core/src/sync/chunked.rs @@ -21,22 +21,32 @@ pub struct ChunkedUploadManager { impl ChunkedUploadManager { /// Create a new chunked upload manager. - pub fn new(temp_dir: PathBuf) -> Self { + #[must_use] + pub const fn new(temp_dir: PathBuf) -> Self { Self { temp_dir } } /// Initialize the temp directory. + /// + /// # Errors + /// + /// Returns an error if the directory cannot be created. pub async fn init(&self) -> Result<()> { fs::create_dir_all(&self.temp_dir).await?; Ok(()) } /// Get the temp file path for an upload session. + #[must_use] pub fn temp_path(&self, session_id: Uuid) -> PathBuf { - self.temp_dir.join(format!("{}.upload", session_id)) + self.temp_dir.join(format!("{session_id}.upload")) } /// Create the temp file for a new upload session. + /// + /// # Errors + /// + /// Returns an error if the file cannot be created or sized. pub async fn create_temp_file(&self, session: &UploadSession) -> Result<()> { let path = self.temp_path(session.id); @@ -54,6 +64,11 @@ impl ChunkedUploadManager { } /// Write a chunk to the temp file. + /// + /// # Errors + /// + /// Returns an error if the session file is not found, the chunk index is out + /// of range, the chunk size is wrong, or the write fails. pub async fn write_chunk( &self, session: &UploadSession, @@ -128,6 +143,11 @@ impl ChunkedUploadManager { /// 1. All chunks are received /// 2. File size matches expected /// 3. Content hash matches expected + /// + /// # Errors + /// + /// Returns an error if chunks are missing, the file size does not match, the + /// hash does not match, or the file metadata cannot be read. pub async fn finalize( &self, session: &UploadSession, @@ -147,12 +167,11 @@ impl ChunkedUploadManager { // Verify chunk indices let mut indices: Vec = received_chunks.iter().map(|c| c.chunk_index).collect(); - indices.sort(); + indices.sort_unstable(); for (i, idx) in indices.iter().enumerate() { if *idx != i as u64 { return Err(PinakesError::InvalidData(format!( - "chunk {} missing or out of order", - i + "chunk {i} missing or out of order" ))); } } @@ -187,6 +206,10 @@ impl ChunkedUploadManager { } /// Cancel an upload and clean up temp file. + /// + /// # Errors + /// + /// Returns an error if the temp file cannot be removed. pub async fn cancel(&self, session_id: Uuid) -> Result<()> { let path = self.temp_path(session_id); if path.exists() { @@ -197,6 +220,10 @@ impl ChunkedUploadManager { } /// Clean up expired temp files. + /// + /// # Errors + /// + /// Returns an error if the temp directory cannot be read. pub async fn cleanup_expired(&self, max_age_hours: u64) -> Result { let mut count = 0u64; let max_age = std::time::Duration::from_secs(max_age_hours * 3600); @@ -204,7 +231,7 @@ impl ChunkedUploadManager { let mut entries = fs::read_dir(&self.temp_dir).await?; while let Some(entry) = entries.next_entry().await? { let path = entry.path(); - if path.extension().map(|e| e == "upload").unwrap_or(false) + if path.extension().is_some_and(|e| e == "upload") && let Ok(metadata) = fs::metadata(&path).await && let Ok(modified) = metadata.modified() { @@ -267,7 +294,7 @@ mod tests { expected_hash: ContentHash::new(hash.clone()), expected_size: data.len() as u64, chunk_size, - chunk_count: (data.len() as u64 + chunk_size - 1) / chunk_size, + chunk_count: (data.len() as u64).div_ceil(chunk_size), status: UploadStatus::InProgress, created_at: Utc::now(), expires_at: Utc::now() + chrono::Duration::hours(24), diff --git a/crates/pinakes-core/src/sync/conflict.rs b/crates/pinakes-core/src/sync/conflict.rs index 7f25570..eab7787 100644 --- a/crates/pinakes-core/src/sync/conflict.rs +++ b/crates/pinakes-core/src/sync/conflict.rs @@ -4,6 +4,7 @@ use super::DeviceSyncState; use crate::config::ConflictResolution; /// Detect if there's a conflict between local and server state. +#[must_use] pub fn detect_conflict(state: &DeviceSyncState) -> Option { // If either side has no hash, no conflict possible let local_hash = state.local_hash.as_ref()?; @@ -48,6 +49,7 @@ pub enum ConflictOutcome { } /// Resolve a conflict based on the configured strategy. +#[must_use] pub fn resolve_conflict( conflict: &ConflictInfo, resolution: ConflictResolution, @@ -67,20 +69,21 @@ pub fn resolve_conflict( } /// Generate a new path for the conflicting local file. -/// Format: filename.conflict-.ext +/// Format: filename.conflict-<`short_hash>.ext` fn generate_conflict_path(original_path: &str, local_hash: &str) -> String { let short_hash = &local_hash[..8.min(local_hash.len())]; if let Some((base, ext)) = original_path.rsplit_once('.') { - format!("{}.conflict-{}.{}", base, short_hash, ext) + format!("{base}.conflict-{short_hash}.{ext}") } else { - format!("{}.conflict-{}", original_path, short_hash) + format!("{original_path}.conflict-{short_hash}") } } /// Automatic conflict resolution based on modification times. -/// Useful when ConflictResolution is set to a time-based strategy. -pub fn resolve_by_mtime(conflict: &ConflictInfo) -> ConflictOutcome { +/// Useful when `ConflictResolution` is set to a time-based strategy. +#[must_use] +pub const fn resolve_by_mtime(conflict: &ConflictInfo) -> ConflictOutcome { match (conflict.local_mtime, conflict.server_mtime) { (Some(local), Some(server)) => { if local > server { diff --git a/crates/pinakes-core/src/sync/models.rs b/crates/pinakes-core/src/sync/models.rs index 5b8b917..5814c20 100644 --- a/crates/pinakes-core/src/sync/models.rs +++ b/crates/pinakes-core/src/sync/models.rs @@ -17,6 +17,7 @@ use crate::{ pub struct DeviceId(pub Uuid); impl DeviceId { + #[must_use] pub fn new() -> Self { Self(Uuid::now_v7()) } @@ -70,7 +71,7 @@ impl std::str::FromStr for DeviceType { "tablet" => Ok(Self::Tablet), "server" => Ok(Self::Server), "other" => Ok(Self::Other), - _ => Err(format!("unknown device type: {}", s)), + _ => Err(format!("unknown device type: {s}")), } } } @@ -93,6 +94,7 @@ pub struct SyncDevice { } impl SyncDevice { + #[must_use] pub fn new( user_id: UserId, name: String, @@ -150,7 +152,7 @@ impl std::str::FromStr for SyncChangeType { "deleted" => Ok(Self::Deleted), "moved" => Ok(Self::Moved), "metadata_updated" => Ok(Self::MetadataUpdated), - _ => Err(format!("unknown sync change type: {}", s)), + _ => Err(format!("unknown sync change type: {s}")), } } } @@ -171,6 +173,7 @@ pub struct SyncLogEntry { } impl SyncLogEntry { + #[must_use] pub fn new( change_type: SyncChangeType, path: String, @@ -225,7 +228,7 @@ impl std::str::FromStr for FileSyncStatus { "pending_download" => Ok(Self::PendingDownload), "conflict" => Ok(Self::Conflict), "deleted" => Ok(Self::Deleted), - _ => Err(format!("unknown file sync status: {}", s)), + _ => Err(format!("unknown file sync status: {s}")), } } } @@ -260,6 +263,7 @@ pub struct SyncConflict { } impl SyncConflict { + #[must_use] pub fn new( device_id: DeviceId, path: String, @@ -319,7 +323,7 @@ impl std::str::FromStr for UploadStatus { "failed" => Ok(Self::Failed), "expired" => Ok(Self::Expired), "cancelled" => Ok(Self::Cancelled), - _ => Err(format!("unknown upload status: {}", s)), + _ => Err(format!("unknown upload status: {s}")), } } } @@ -341,6 +345,7 @@ pub struct UploadSession { } impl UploadSession { + #[must_use] pub fn new( device_id: DeviceId, target_path: String, diff --git a/crates/pinakes-core/src/sync/protocol.rs b/crates/pinakes-core/src/sync/protocol.rs index be8f095..5c0c486 100644 --- a/crates/pinakes-core/src/sync/protocol.rs +++ b/crates/pinakes-core/src/sync/protocol.rs @@ -90,6 +90,10 @@ pub struct AckRequest { } /// Get changes since a cursor position. +/// +/// # Errors +/// +/// Returns an error if the storage query fails. pub async fn get_changes( storage: &DynStorageBackend, cursor: i64, @@ -101,7 +105,7 @@ pub async fn get_changes( let has_more = changes.len() > limit as usize; let changes: Vec<_> = changes.into_iter().take(limit as usize).collect(); - let new_cursor = changes.last().map(|c| c.sequence).unwrap_or(cursor); + let new_cursor = changes.last().map_or(cursor, |c| c.sequence); Ok(ChangesResponse { changes, @@ -111,6 +115,10 @@ pub async fn get_changes( } /// Record a change in the sync log. +/// +/// # Errors +/// +/// Returns an error if the storage record operation fails. pub async fn record_change( storage: &DynStorageBackend, change_type: SyncChangeType, @@ -138,6 +146,10 @@ pub async fn record_change( } /// Update device cursor after processing changes. +/// +/// # Errors +/// +/// Returns an error if the device lookup or update fails. pub async fn update_device_cursor( storage: &DynStorageBackend, device_id: DeviceId, @@ -152,6 +164,10 @@ pub async fn update_device_cursor( } /// Mark a file as synced for a device. +/// +/// # Errors +/// +/// Returns an error if the storage upsert operation fails. pub async fn mark_synced( storage: &DynStorageBackend, device_id: DeviceId, @@ -176,6 +192,10 @@ pub async fn mark_synced( } /// Mark a file as pending download for a device. +/// +/// # Errors +/// +/// Returns an error if the storage lookup or upsert operation fails. pub async fn mark_pending_download( storage: &DynStorageBackend, device_id: DeviceId, @@ -211,6 +231,7 @@ pub async fn mark_pending_download( } /// Generate a device token using UUIDs for randomness. +#[must_use] pub fn generate_device_token() -> String { // Concatenate two UUIDs for 256 bits of randomness let uuid1 = uuid::Uuid::new_v4(); @@ -219,6 +240,7 @@ pub fn generate_device_token() -> String { } /// Hash a device token for storage. +#[must_use] pub fn hash_device_token(token: &str) -> String { blake3::hash(token.as_bytes()).to_hex().to_string() } diff --git a/crates/pinakes-core/src/tags.rs b/crates/pinakes-core/src/tags.rs index cab6a0d..6e50794 100644 --- a/crates/pinakes-core/src/tags.rs +++ b/crates/pinakes-core/src/tags.rs @@ -17,6 +17,10 @@ use crate::{ /// # Returns /// /// The created tag +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn create_tag( storage: &DynStorageBackend, name: &str, @@ -36,6 +40,10 @@ pub async fn create_tag( /// # Returns /// /// `Ok(())` on success +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn tag_media( storage: &DynStorageBackend, media_id: MediaId, @@ -62,6 +70,10 @@ pub async fn tag_media( /// # Returns /// /// `Ok(())` on success +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn untag_media( storage: &DynStorageBackend, media_id: MediaId, @@ -87,6 +99,10 @@ pub async fn untag_media( /// # Returns /// /// List of child tags +/// +/// # Errors +/// +/// Returns [`crate::error::PinakesError`] if the storage operation fails. pub async fn get_tag_tree( storage: &DynStorageBackend, tag_id: Uuid, diff --git a/crates/pinakes-core/src/thumbnail.rs b/crates/pinakes-core/src/thumbnail.rs index 7dd7852..1656e2f 100644 --- a/crates/pinakes-core/src/thumbnail.rs +++ b/crates/pinakes-core/src/thumbnail.rs @@ -16,7 +16,7 @@ use crate::{ struct TempFileGuard(PathBuf); impl TempFileGuard { - fn new(path: PathBuf) -> Self { + const fn new(path: PathBuf) -> Self { Self(path) } @@ -35,10 +35,14 @@ impl Drop for TempFileGuard { /// /// Supports images (via `image` crate), videos (via ffmpeg), PDFs (via /// pdftoppm), and EPUBs (via cover image extraction). +/// +/// # Errors +/// +/// Returns [`PinakesError`] if thumbnail generation fails. pub fn generate_thumbnail( media_id: MediaId, source_path: &Path, - media_type: MediaType, + media_type: &MediaType, thumbnail_dir: &Path, ) -> Result> { generate_thumbnail_with_config( @@ -50,21 +54,26 @@ pub fn generate_thumbnail( ) } +/// Generate a thumbnail with custom configuration. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if thumbnail generation fails. pub fn generate_thumbnail_with_config( media_id: MediaId, source_path: &Path, - media_type: MediaType, + media_type: &MediaType, thumbnail_dir: &Path, config: &ThumbnailConfig, ) -> Result> { std::fs::create_dir_all(thumbnail_dir)?; - let thumb_path = thumbnail_dir.join(format!("{}.jpg", media_id)); + let thumb_path = thumbnail_dir.join(format!("{media_id}.jpg")); let result = match media_type.category() { MediaCategory::Image => { if media_type.is_raw() { generate_raw_thumbnail(source_path, &thumb_path, config) - } else if media_type == MediaType::Builtin(BuiltinMediaType::Heic) { + } else if *media_type == MediaType::Builtin(BuiltinMediaType::Heic) { generate_heic_thumbnail(source_path, &thumb_path, config) } else { generate_image_thumbnail(source_path, &thumb_path, config) @@ -151,8 +160,7 @@ fn generate_video_thumbnail( if !status.success() { return Err(PinakesError::MetadataExtraction(format!( - "ffmpeg exited with status {}", - status + "ffmpeg exited with status {status}" ))); } @@ -180,8 +188,7 @@ fn generate_pdf_thumbnail( if !status.success() { return Err(PinakesError::MetadataExtraction(format!( - "pdftoppm exited with status {}", - status + "pdftoppm exited with status {status}" ))); } @@ -272,8 +279,7 @@ fn generate_raw_thumbnail( if !status.success() { // Guard drops and cleans up temp_ppm return Err(PinakesError::MetadataExtraction(format!( - "dcraw exited with status {}", - status + "dcraw exited with status {status}" ))); } @@ -320,8 +326,7 @@ fn generate_heic_thumbnail( if !status.success() { // Guard drops and cleans up temp_jpg return Err(PinakesError::MetadataExtraction(format!( - "heif-convert exited with status {}", - status + "heif-convert exited with status {status}" ))); } @@ -357,26 +362,32 @@ pub enum CoverSize { } impl CoverSize { - pub fn dimensions(&self) -> Option<(u32, u32)> { + #[must_use] + pub const fn dimensions(&self) -> Option<(u32, u32)> { match self { - CoverSize::Tiny => Some((64, 64)), - CoverSize::Grid => Some((320, 320)), - CoverSize::Preview => Some((1024, 1024)), - CoverSize::Original => None, // No resizing + Self::Tiny => Some((64, 64)), + Self::Grid => Some((320, 320)), + Self::Preview => Some((1024, 1024)), + Self::Original => None, // No resizing } } - pub fn filename(&self) -> &'static str { + #[must_use] + pub const fn filename(&self) -> &'static str { match self { - CoverSize::Tiny => "tiny.jpg", - CoverSize::Grid => "grid.jpg", - CoverSize::Preview => "preview.jpg", - CoverSize::Original => "original.jpg", + Self::Tiny => "tiny.jpg", + Self::Grid => "grid.jpg", + Self::Preview => "preview.jpg", + Self::Original => "original.jpg", } } } -/// Generate multi-resolution covers for a book +/// Generate multi-resolution covers for a book. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if the cover image cannot be decoded or encoded. pub fn generate_book_covers( media_id: MediaId, source_image: &[u8], @@ -401,26 +412,23 @@ pub fn generate_book_covers( ] { let cover_path = media_cover_dir.join(size.filename()); - match size.dimensions() { - Some((width, height)) => { - // Generate thumbnail - let thumb = img.thumbnail(width, height); - let mut output = std::fs::File::create(&cover_path)?; - let encoder = - image::codecs::jpeg::JpegEncoder::new_with_quality(&mut output, 90); - thumb.write_with_encoder(encoder).map_err(|e| { - PinakesError::MetadataExtraction(format!("cover encode: {e}")) - })?; - }, - None => { - // Save original - let mut output = std::fs::File::create(&cover_path)?; - let encoder = - image::codecs::jpeg::JpegEncoder::new_with_quality(&mut output, 95); - img.write_with_encoder(encoder).map_err(|e| { - PinakesError::MetadataExtraction(format!("cover encode: {e}")) - })?; - }, + if let Some((width, height)) = size.dimensions() { + // Generate thumbnail + let thumb = img.thumbnail(width, height); + let mut output = std::fs::File::create(&cover_path)?; + let encoder = + image::codecs::jpeg::JpegEncoder::new_with_quality(&mut output, 90); + thumb.write_with_encoder(encoder).map_err(|e| { + PinakesError::MetadataExtraction(format!("cover encode: {e}")) + })?; + } else { + // Save original + let mut output = std::fs::File::create(&cover_path)?; + let encoder = + image::codecs::jpeg::JpegEncoder::new_with_quality(&mut output, 95); + img.write_with_encoder(encoder).map_err(|e| { + PinakesError::MetadataExtraction(format!("cover encode: {e}")) + })?; } results.push((size, cover_path)); @@ -429,7 +437,11 @@ pub fn generate_book_covers( Ok(results) } -/// Extract full-size cover from an EPUB file +/// Extract full-size cover from an EPUB file. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if the EPUB cannot be opened or read. pub fn extract_epub_cover(epub_path: &Path) -> Result>> { let mut doc = epub::doc::EpubDoc::new(epub_path) .map_err(|e| PinakesError::MetadataExtraction(format!("EPUB open: {e}")))?; @@ -459,7 +471,11 @@ pub fn extract_epub_cover(epub_path: &Path) -> Result>> { Ok(None) } -/// Extract full-size cover from a PDF file (first page) +/// Extract full-size cover from a PDF file (first page). +/// +/// # Errors +/// +/// Returns [`PinakesError`] if pdftoppm cannot be executed or fails. pub fn extract_pdf_cover(pdf_path: &Path) -> Result>> { let temp_dir = std::env::temp_dir(); let temp_prefix = @@ -476,8 +492,7 @@ pub fn extract_pdf_cover(pdf_path: &Path) -> Result>> { if !status.success() { return Err(PinakesError::MetadataExtraction(format!( - "pdftoppm exited with status {}", - status + "pdftoppm exited with status {status}" ))); } @@ -497,11 +512,13 @@ pub fn extract_pdf_cover(pdf_path: &Path) -> Result>> { } /// Returns the default covers directory under the data dir +#[must_use] pub fn default_covers_dir() -> PathBuf { crate::config::Config::default_data_dir().join("covers") } /// Returns the default thumbnail directory under the data dir. +#[must_use] pub fn default_thumbnail_dir() -> PathBuf { crate::config::Config::default_data_dir().join("thumbnails") } @@ -519,30 +536,37 @@ pub enum ThumbnailSize { impl ThumbnailSize { /// Get the pixel size for this thumbnail variant - pub fn pixels(&self) -> u32 { + #[must_use] + pub const fn pixels(&self) -> u32 { match self { - ThumbnailSize::Tiny => 64, - ThumbnailSize::Grid => 320, - ThumbnailSize::Preview => 1024, + Self::Tiny => 64, + Self::Grid => 320, + Self::Preview => 1024, } } /// Get the subdirectory name for this size - pub fn subdir_name(&self) -> &'static str { + #[must_use] + pub const fn subdir_name(&self) -> &'static str { match self { - ThumbnailSize::Tiny => "tiny", - ThumbnailSize::Grid => "grid", - ThumbnailSize::Preview => "preview", + Self::Tiny => "tiny", + Self::Grid => "grid", + Self::Preview => "preview", } } } -/// Generate all thumbnail sizes for a media file -/// Returns paths to the generated thumbnails (tiny, grid, preview) +/// Generate all thumbnail sizes for a media file. +/// +/// Returns paths to the generated thumbnails (tiny, grid, preview). +/// +/// # Errors +/// +/// Returns [`PinakesError`] if thumbnail generation fails. pub fn generate_all_thumbnail_sizes( media_id: MediaId, source_path: &Path, - media_type: MediaType, + media_type: &MediaType, thumbnail_base_dir: &Path, ) -> Result<(Option, Option, Option)> { let sizes = [ @@ -564,7 +588,7 @@ pub fn generate_all_thumbnail_sizes( let result = generate_thumbnail_with_config( media_id, source_path, - media_type.clone(), + media_type, &size_dir, &config, )?; diff --git a/crates/pinakes-core/src/transcode.rs b/crates/pinakes-core/src/transcode.rs index fe00c7b..12b2b62 100644 --- a/crates/pinakes-core/src/transcode.rs +++ b/crates/pinakes-core/src/transcode.rs @@ -1,4 +1,4 @@ -//! Transcoding service for media files using FFmpeg. +//! Transcoding service for media files using `FFmpeg`. use std::{ collections::HashMap, @@ -33,7 +33,7 @@ pub struct TranscodeSession { /// Duration of the source media in seconds, used for progress calculation. #[serde(default)] pub duration_secs: Option, - /// Handle to cancel the child FFmpeg process. + /// Handle to cancel the child `FFmpeg` process. #[serde(skip)] pub child_cancel: Option>, } @@ -50,7 +50,8 @@ pub enum TranscodeStatus { } impl TranscodeStatus { - pub fn as_str(&self) -> &str { + #[must_use] + pub const fn as_str(&self) -> &str { match self { Self::Pending => "pending", Self::Transcoding => "transcoding", @@ -81,6 +82,7 @@ impl TranscodeStatus { } } + #[must_use] pub fn error_message(&self) -> Option<&str> { match self { Self::Failed { error } => Some(error), @@ -89,7 +91,7 @@ impl TranscodeStatus { } } -/// Service managing transcoding sessions and FFmpeg invocations. +/// Service managing transcoding sessions and `FFmpeg` invocations. pub struct TranscodeService { pub config: TranscodingConfig, pub sessions: Arc>>, @@ -97,6 +99,7 @@ pub struct TranscodeService { } impl TranscodeService { + #[must_use] pub fn new(config: TranscodingConfig) -> Self { let max_concurrent = config.max_concurrent.max(1); Self { @@ -106,10 +109,12 @@ impl TranscodeService { } } - pub fn is_enabled(&self) -> bool { + #[must_use] + pub const fn is_enabled(&self) -> bool { self.config.enabled } + #[must_use] pub fn cache_dir(&self) -> PathBuf { self .config @@ -119,6 +124,11 @@ impl TranscodeService { } /// Start a transcode job for a media item. + /// + /// # Errors + /// + /// Returns an error if the profile is not found, the session directory cannot + /// be created, or the session cannot be stored in the database. pub async fn start_transcode( &self, media_id: MediaId, @@ -135,8 +145,7 @@ impl TranscodeService { .cloned() .ok_or_else(|| { crate::error::PinakesError::InvalidOperation(format!( - "unknown transcode profile: {}", - profile_name + "unknown transcode profile: {profile_name}" )) })?; @@ -144,13 +153,15 @@ impl TranscodeService { let session_dir = self.cache_dir().join(session_id.to_string()); tokio::fs::create_dir_all(&session_dir).await.map_err(|e| { crate::error::PinakesError::InvalidOperation(format!( - "failed to create session directory: {}", - e + "failed to create session directory: {e}" )) })?; let expires_at = Some( - Utc::now() + chrono::Duration::hours(self.config.cache_ttl_hours as i64), + Utc::now() + + chrono::Duration::hours( + i64::try_from(self.config.cache_ttl_hours).unwrap_or(i64::MAX), + ), ); let cancel_notify = Arc::new(tokio::sync::Notify::new()); @@ -166,7 +177,7 @@ impl TranscodeService { created_at: Utc::now(), expires_at, duration_secs, - child_cancel: Some(cancel_notify.clone()), + child_cancel: Some(Arc::clone(&cancel_notify)), }; // Store session in DB @@ -179,12 +190,12 @@ impl TranscodeService { } // Spawn the FFmpeg task - let sessions = self.sessions.clone(); - let semaphore = self.semaphore.clone(); + let sessions = Arc::clone(&self.sessions); + let semaphore = Arc::clone(&self.semaphore); let source = source_path.to_path_buf(); let hw_accel = self.config.hardware_acceleration.clone(); - let storage = storage.clone(); - let cancel = cancel_notify.clone(); + let storage = Arc::clone(storage); + let cancel = Arc::clone(&cancel_notify); tokio::spawn(async move { // Acquire semaphore permit to limit concurrency @@ -192,12 +203,14 @@ impl TranscodeService { Ok(permit) => permit, Err(e) => { tracing::error!("failed to acquire transcode semaphore: {}", e); - let error_msg = format!("semaphore closed: {}", e); - let mut s = sessions.write().await; - if let Some(sess) = s.get_mut(&session_id) { - sess.status = TranscodeStatus::Failed { - error: error_msg.clone(), - }; + let error_msg = format!("semaphore closed: {e}"); + { + let mut s = sessions.write().await; + if let Some(sess) = s.get_mut(&session_id) { + sess.status = TranscodeStatus::Failed { + error: error_msg.clone(), + }; + } } if let Err(e) = storage .update_transcode_status( @@ -234,10 +247,12 @@ impl TranscodeService { .await { Ok(()) => { - let mut s = sessions.write().await; - if let Some(sess) = s.get_mut(&session_id) { - sess.status = TranscodeStatus::Complete; - sess.progress = 1.0; + { + let mut s = sessions.write().await; + if let Some(sess) = s.get_mut(&session_id) { + sess.status = TranscodeStatus::Complete; + sess.progress = 1.0; + } } if let Err(e) = storage .update_transcode_status(session_id, TranscodeStatus::Complete, 1.0) @@ -277,6 +292,10 @@ impl TranscodeService { } /// Cancel a transcode session and clean up cache files. + /// + /// # Errors + /// + /// Returns an error if the database status update fails. pub async fn cancel_transcode( &self, session_id: Uuid, @@ -359,6 +378,7 @@ impl TranscodeService { } /// Resolve the path to a specific segment file on disk. + #[must_use] pub fn segment_path(&self, session_id: Uuid, segment_name: &str) -> PathBuf { // Sanitize segment_name to prevent path traversal let safe_name = std::path::Path::new(segment_name) @@ -381,7 +401,7 @@ impl TranscodeService { .join(safe_name) } - /// Find a session for a given media_id and profile. + /// Find a session for a given `media_id` and profile. pub async fn find_session( &self, media_id: MediaId, @@ -396,24 +416,25 @@ impl TranscodeService { } /// Parse a resolution string like "360p", "720p", "1080p" into (width, height). +#[must_use] pub fn parse_resolution(res: &str) -> (u32, u32) { match res.trim_end_matches('p') { "360" => (640, 360), "480" => (854, 480), - "720" => (1280, 720), "1080" => (1920, 1080), "1440" => (2560, 1440), "2160" | "4k" => (3840, 2160), - _ => (1280, 720), // default to 720p + _ => (1280, 720), // default to 720p (includes "720") } } -/// Estimate bandwidth (bits/sec) from a profile's max_bitrate_kbps. -pub fn estimate_bandwidth(profile: &TranscodeProfile) -> u32 { +/// Estimate bandwidth (bits/sec) from a profile's `max_bitrate_kbps`. +#[must_use] +pub const fn estimate_bandwidth(profile: &TranscodeProfile) -> u32 { profile.max_bitrate_kbps * 1000 } -/// Build FFmpeg CLI arguments for transcoding. +/// Build `FFmpeg` CLI arguments for transcoding. fn get_ffmpeg_args( source: &Path, output_dir: &Path, @@ -441,7 +462,7 @@ fn get_ffmpeg_args( "-b:v".to_string(), format!("{}k", profile.max_bitrate_kbps), "-vf".to_string(), - format!("scale={}:{}", w, h), + format!("scale={w}:{h}"), "-f".to_string(), "hls".to_string(), "-hls_time".to_string(), @@ -457,7 +478,7 @@ fn get_ffmpeg_args( args } -/// Run FFmpeg as a child process, parsing progress from stdout. +/// Run `FFmpeg` as a child process, parsing progress from stdout. async fn run_ffmpeg( args: &[String], sessions: &Arc>>, @@ -477,33 +498,30 @@ async fn run_ffmpeg( .spawn() .map_err(|e| { crate::error::PinakesError::InvalidOperation(format!( - "failed to spawn ffmpeg: {}", - e + "failed to spawn ffmpeg: {e}" )) })?; // Capture stderr in a spawned task for error reporting - let stderr_handle = if let Some(stderr) = child.stderr.take() { + let stderr_handle = child.stderr.take().map(|stderr| { let reader = BufReader::new(stderr); - Some(tokio::spawn(async move { + tokio::spawn(async move { let mut lines = reader.lines(); let mut collected = Vec::new(); while let Ok(Some(line)) = lines.next_line().await { collected.push(line); } collected - })) - } else { - None - }; + }) + }); // Parse progress from stdout - let stdout_handle = if let Some(stdout) = child.stdout.take() { + let stdout_handle = child.stdout.take().map(|stdout| { let reader = BufReader::new(stdout); let mut lines = reader.lines(); - let sessions = sessions.clone(); + let sessions = Arc::clone(sessions); - Some(tokio::spawn(async move { + tokio::spawn(async move { while let Ok(Some(line)) = lines.next_line().await { // FFmpeg progress output: "out_time_us=12345678" if let Some(time_str) = line.strip_prefix("out_time_us=") @@ -512,7 +530,11 @@ async fn run_ffmpeg( let secs = us / 1_000_000.0; // Calculate progress based on known duration let progress = match duration_secs { - Some(dur) if dur > 0.0 => (secs / dur).min(0.99) as f32, + Some(dur) if dur > 0.0 => { + #[allow(clippy::cast_possible_truncation)] + let p = (secs / dur).min(0.99) as f32; + p + }, _ => { // Duration unknown; don't update progress continue; @@ -524,19 +546,17 @@ async fn run_ffmpeg( } } } - })) - } else { - None - }; + }) + }); // Wait for child, but also listen for cancellation let status = tokio::select! { result = child.wait() => { result.map_err(|e| { - crate::error::PinakesError::InvalidOperation(format!("ffmpeg process error: {}", e)) + crate::error::PinakesError::InvalidOperation(format!("ffmpeg process error: {e}")) })? } - _ = cancel.notified() => { + () = cancel.notified() => { // Kill the child process on cancel if let Err(e) = child.kill().await { tracing::error!("failed to kill ffmpeg process: {}", e); @@ -569,8 +589,7 @@ async fn run_ffmpeg( .collect::>() .join("\n"); return Err(crate::error::PinakesError::InvalidOperation(format!( - "ffmpeg exited with status: {}\nstderr:\n{}", - status, last_stderr + "ffmpeg exited with status: {status}\nstderr:\n{last_stderr}" ))); } diff --git a/crates/pinakes-core/src/upload.rs b/crates/pinakes-core/src/upload.rs index 7f88e5d..806c9bd 100644 --- a/crates/pinakes-core/src/upload.rs +++ b/crates/pinakes-core/src/upload.rs @@ -1,6 +1,6 @@ //! Upload processing for managed storage. //! -//! Handles file uploads, metadata extraction, and MediaItem creation +//! Handles file uploads, metadata extraction, and `MediaItem` creation //! for files stored in managed content-addressable storage. use std::{collections::HashMap, path::Path}; @@ -24,7 +24,11 @@ use crate::{ /// 1. Stores the file in managed storage /// 2. Checks for duplicates by content hash /// 3. Extracts metadata from the file -/// 4. Creates or updates the MediaItem +/// 4. Creates or updates the `MediaItem` +/// +/// # Errors +/// +/// Returns [`PinakesError`] if storage, hashing, or metadata extraction fails. pub async fn process_upload( storage: &DynStorageBackend, managed: &ManagedStorageService, @@ -54,13 +58,10 @@ pub async fn process_upload( let blob_path = managed.path(&content_hash); // Extract metadata - let extracted = - metadata::extract_metadata(&blob_path, media_type.clone()).ok(); + let extracted = metadata::extract_metadata(&blob_path, &media_type).ok(); // Create or get blob record - let mime = mime_type - .map(String::from) - .unwrap_or_else(|| media_type.mime_type().to_string()); + let mime = mime_type.map_or_else(|| media_type.mime_type(), String::from); let _blob = storage .get_or_create_blob(&content_hash, file_size, &mime) .await?; @@ -123,6 +124,10 @@ pub async fn process_upload( } /// Process an upload from bytes. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if storage or processing fails. pub async fn process_upload_bytes( storage: &DynStorageBackend, managed: &ManagedStorageService, @@ -138,6 +143,10 @@ pub async fn process_upload_bytes( /// Process an upload from a local file path. /// /// This is useful for migrating existing external files to managed storage. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if the file cannot be opened or processing fails. pub async fn process_upload_file( storage: &DynStorageBackend, managed: &ManagedStorageService, @@ -160,6 +169,11 @@ pub async fn process_upload_file( } /// Migrate an existing external media item to managed storage. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if the media item cannot be found, the file is +/// missing, or the storage migration fails. pub async fn migrate_to_managed( storage: &DynStorageBackend, managed: &ManagedStorageService, @@ -190,7 +204,7 @@ pub async fn migrate_to_managed( } // Get or create blob record - let mime = item.media_type.mime_type().to_string(); + let mime = item.media_type.mime_type().clone(); let _blob = storage .get_or_create_blob(&new_hash, new_size, &mime) .await?; @@ -227,6 +241,11 @@ fn sanitize_filename(name: &str) -> String { } /// Delete a managed media item and clean up the blob if orphaned. +/// +/// # Errors +/// +/// Returns [`PinakesError`] if the media item cannot be found or deletion +/// fails. pub async fn delete_managed_media( storage: &DynStorageBackend, managed: &ManagedStorageService, diff --git a/crates/pinakes-core/src/users.rs b/crates/pinakes-core/src/users.rs index 208a897..27773eb 100644 --- a/crates/pinakes-core/src/users.rs +++ b/crates/pinakes-core/src/users.rs @@ -17,6 +17,7 @@ pub struct UserId(pub Uuid); impl UserId { /// Creates a new user ID. + #[must_use] pub fn new() -> Self { Self(Uuid::now_v7()) } @@ -96,21 +97,25 @@ pub enum LibraryPermission { impl LibraryPermission { /// Checks if read permission is granted. - pub fn can_read(&self) -> bool { + #[must_use] + pub const fn can_read(&self) -> bool { true } /// Checks if write permission is granted. - pub fn can_write(&self) -> bool { + #[must_use] + pub const fn can_write(&self) -> bool { matches!(self, Self::Write | Self::Admin) } /// Checks if admin permission is granted. - pub fn can_admin(&self) -> bool { + #[must_use] + pub const fn can_admin(&self) -> bool { matches!(self, Self::Admin) } - pub fn as_str(&self) -> &'static str { + #[must_use] + pub const fn as_str(&self) -> &'static str { match self { Self::Read => "read", Self::Write => "write", @@ -155,7 +160,7 @@ pub struct UpdateUserRequest { /// User authentication pub mod auth { - use super::*; + use super::{PinakesError, Result}; /// Hash a password using Argon2 pub fn hash_password(password: &str) -> Result { diff --git a/crates/pinakes-core/tests/book_metadata.rs b/crates/pinakes-core/tests/book_metadata.rs index 763b750..2223441 100644 --- a/crates/pinakes-core/tests/book_metadata.rs +++ b/crates/pinakes-core/tests/book_metadata.rs @@ -161,7 +161,7 @@ fn test_book_cover_generation() { // Verify all cover files exist for (size, path) in &covers { - assert!(path.exists(), "Cover {:?} should exist at {:?}", size, path); + assert!(path.exists(), "Cover {size:?} should exist at {path:?}"); } } @@ -176,13 +176,10 @@ async fn test_openlibrary_isbn_fetch() { // Should either succeed or fail gracefully // We don't assert success because network might not be available - match result { - Ok(book) => { - assert!(book.title.is_some()); - }, - Err(_) => { - // Network error or book not found - acceptable in tests - }, + if let Ok(book) = result { + assert!(book.title.is_some()); + } else { + // Network error or book not found - acceptable in tests } } @@ -195,14 +192,11 @@ async fn test_googlebooks_isbn_fetch() { // Use a known ISBN let result = client.fetch_by_isbn("9780547928227").await; - match result { - Ok(books) => { - if !books.is_empty() { - assert!(books[0].volume_info.title.is_some()); - } - }, - Err(_) => { - // Network error - acceptable in tests - }, + if let Ok(books) = result { + if !books.is_empty() { + assert!(books[0].volume_info.title.is_some()); + } + } else { + // Network error - acceptable in tests } } diff --git a/crates/pinakes-core/tests/integrity.rs b/crates/pinakes-core/tests/integrity.rs index 64ad01e..0c94a0b 100644 --- a/crates/pinakes-core/tests/integrity.rs +++ b/crates/pinakes-core/tests/integrity.rs @@ -158,13 +158,13 @@ async fn test_large_directory_performance() { storage.add_root_dir(root_dir.clone()).await.unwrap(); for i in 0..1000 { - let file_path = root_dir.join(format!("file_{}.mp3", i)); - fs::write(&file_path, format!("content {}", i)).unwrap(); + let file_path = root_dir.join(format!("file_{i}.mp3")); + fs::write(&file_path, format!("content {i}")).unwrap(); } for i in 0..500 { - let file_path = root_dir.join(format!("file_{}.mp3", i)); - let item = create_test_media_item(file_path, &format!("hash_{}", i)); + let file_path = root_dir.join(format!("file_{i}.mp3")); + let item = create_test_media_item(file_path, &format!("hash_{i}")); storage.insert_media(&item).await.unwrap(); } @@ -174,8 +174,7 @@ async fn test_large_directory_performance() { assert!( elapsed.as_secs() < 5, - "Detection took too long: {:?}", - elapsed + "Detection took too long: {elapsed:?}" ); assert_eq!(report.untracked_paths.len(), 500); diff --git a/crates/pinakes-core/tests/markdown_links_atomicity.rs b/crates/pinakes-core/tests/markdown_links_atomicity.rs index 12aa81f..0624d6d 100644 --- a/crates/pinakes-core/tests/markdown_links_atomicity.rs +++ b/crates/pinakes-core/tests/markdown_links_atomicity.rs @@ -6,7 +6,7 @@ mod common; fn create_test_note_content(num_links: usize) -> String { let mut content = String::from("# Test Note\n\n"); for i in 0..num_links { - content.push_str(&format!("Link {}: [[note_{}]]\n", i, i)); + content.push_str(&format!("Link {i}: [[note_{i}]]\n")); } content } diff --git a/crates/pinakes-core/tests/session_persistence.rs b/crates/pinakes-core/tests/session_persistence.rs index 9ef4fba..713ab92 100644 --- a/crates/pinakes-core/tests/session_persistence.rs +++ b/crates/pinakes-core/tests/session_persistence.rs @@ -121,7 +121,7 @@ async fn test_delete_user_sessions() { // Create multiple sessions for the same user for i in 0..3 { let session = SessionData { - session_token: format!("token_{}", i), + session_token: format!("token_{i}"), user_id: None, username: "testuser".to_string(), role: "viewer".to_string(), @@ -152,7 +152,7 @@ async fn test_delete_user_sessions() { for i in 0..3 { assert!( storage - .get_session(&format!("token_{}", i)) + .get_session(&format!("token_{i}")) .await .unwrap() .is_none() @@ -217,7 +217,7 @@ async fn test_list_active_sessions() { // Create active sessions for different users for i in 0..3 { let session = SessionData { - session_token: format!("user1_token_{}", i), + session_token: format!("user1_token_{i}"), user_id: None, username: "user1".to_string(), role: "viewer".to_string(), @@ -230,7 +230,7 @@ async fn test_list_active_sessions() { for i in 0..2 { let session = SessionData { - session_token: format!("user2_token_{}", i), + session_token: format!("user2_token_{i}"), user_id: None, username: "user2".to_string(), role: "admin".to_string(), @@ -279,9 +279,9 @@ async fn test_concurrent_session_operations() { let storage = storage.clone(); let handle = tokio::spawn(async move { let session = SessionData { - session_token: format!("concurrent_{}", i), + session_token: format!("concurrent_{i}"), user_id: None, - username: format!("user{}", i), + username: format!("user{i}"), role: "viewer".to_string(), created_at: now, expires_at: now + chrono::Duration::hours(24), @@ -301,7 +301,7 @@ async fn test_concurrent_session_operations() { for i in 0..10 { assert!( storage - .get_session(&format!("concurrent_{}", i)) + .get_session(&format!("concurrent_{i}")) .await .unwrap() .is_some()