chore: bump deps; fix clippy lints & cleanup

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I4c4815ad145650a07f108614034d2e996a6a6964
This commit is contained in:
raf 2026-03-02 17:05:28 +03:00
commit cd1161ee5d
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
41 changed files with 1528 additions and 953 deletions

468
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -24,20 +24,20 @@ tokio-util = { version = "0.7.18", features = ["rt"] }
# Serialization
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"
toml = "0.9.11"
toml = "1.0.3"
# CLI argument parsing
clap = { version = "4.5.57", features = ["derive", "env"] }
clap = { version = "4.5.60", features = ["derive", "env"] }
# Date/time
chrono = { version = "0.4.43", features = ["serde"] }
chrono = { version = "0.4.44", features = ["serde"] }
# IDs
uuid = { version = "1.20.0", features = ["v7", "serde"] }
uuid = { version = "1.21.0", features = ["v7", "serde"] }
# Error handling
thiserror = "2.0.18"
anyhow = "1.0.100"
anyhow = "1.0.102"
# Logging
tracing = "0.1.44"
@ -47,7 +47,7 @@ tracing-subscriber = { version = "0.3.22", features = ["env-filter", "json"] }
blake3 = "1.8.3"
# Metadata extraction
lofty = "0.23.1"
lofty = "0.23.2"
lopdf = "0.39.0"
epub = "2.1.5"
matroska = "0.30.0"
@ -55,7 +55,7 @@ gray_matter = "0.3.2"
kamadak-exif = "0.6.1"
# Database - SQLite
rusqlite = { version = "0.37", features = ["bundled", "column_decltype"] }
rusqlite = { version = "=0.37.0", features = ["bundled", "column_decltype"] }
# Database - PostgreSQL
tokio-postgres = { version = "0.7.16", features = [
@ -66,7 +66,7 @@ tokio-postgres = { version = "0.7.16", features = [
deadpool-postgres = "0.14.1"
postgres-types = { version = "0.2.12", features = ["derive"] }
postgres-native-tls = "0.5.2"
native-tls = "0.2.14"
native-tls = "0.2.18"
# Migrations
refinery = { version = "0.9.0", features = ["rusqlite", "tokio-postgres"] }
@ -87,7 +87,7 @@ governor = "0.10.4"
tower_governor = "0.8.0"
# HTTP client
reqwest = { version = "0.13.1", features = ["json", "query", "blocking"] }
reqwest = { version = "0.13.2", features = ["json", "query", "blocking"] }
# TUI
ratatui = "0.30.0"
@ -100,7 +100,7 @@ dioxus = { version = "0.7.3", features = ["desktop", "router"] }
async-trait = "0.1.89"
# Async utilities
futures = "0.3.31"
futures = "0.3.32"
# Image processing (thumbnails)
image = { version = "0.25.9", default-features = false, features = [
@ -113,7 +113,7 @@ image = { version = "0.25.9", default-features = false, features = [
] }
# Markdown rendering
pulldown-cmark = "0.13.0"
pulldown-cmark = "0.13.1"
ammonia = "4.1.2"
# Password hashing
@ -126,15 +126,77 @@ dioxus-free-icons = { version = "0.10.0", features = ["font-awesome-solid"] }
rfd = "0.17.2"
gloo-timers = { version = "0.3.0", features = ["futures"] }
rand = "0.10.0"
moka = { version = "0.12.13", features = ["future"] }
moka = { version = "0.12.14", features = ["future"] }
urlencoding = "2.1.3"
image_hasher = "3.1.0"
image_hasher = "3.1.1"
percent-encoding = "2.3.2"
http = "1.4.0"
# WASM runtime for plugins
wasmtime = { version = "41.0.3", features = ["component-model"] }
wit-bindgen = "0.52.0"
wasmtime = { version = "42.0.1", features = ["component-model"] }
wit-bindgen = "0.53.1"
# See:
# <https://doc.rust-lang.org/rustc/lints/listing/allowed-by-default.html>
[workspace.lints.clippy]
cargo = { level = "warn", priority = -1 }
complexity = { level = "warn", priority = -1 }
nursery = { level = "warn", priority = -1 }
pedantic = { level = "warn", priority = -1 }
perf = { level = "warn", priority = -1 }
style = { level = "warn", priority = -1 }
# The lint groups above enable some less-than-desirable rules, we should manually
# enable those to keep our sanity.
absolute_paths = "allow"
arbitrary_source_item_ordering = "allow"
clone_on_ref_ptr = "warn"
dbg_macro = "warn"
empty_drop = "warn"
empty_structs_with_brackets = "warn"
exit = "warn"
filetype_is_file = "warn"
get_unwrap = "warn"
implicit_return = "allow"
infinite_loop = "warn"
map_with_unused_argument_over_ranges = "warn"
missing_docs_in_private_items = "allow"
multiple_crate_versions = "allow" # :(
non_ascii_literal = "allow"
non_std_lazy_statics = "warn"
pathbuf_init_then_push = "warn"
pattern_type_mismatch = "allow"
question_mark_used = "allow"
rc_buffer = "warn"
rc_mutex = "warn"
rest_pat_in_fully_bound_structs = "warn"
similar_names = "allow"
single_call_fn = "allow"
std_instead_of_core = "allow"
too_long_first_doc_paragraph = "allow"
too_many_lines = "allow"
undocumented_unsafe_blocks = "warn"
unnecessary_safety_comment = "warn"
unused_result_ok = "warn"
unused_trait_names = "allow"
# False positive:
# clippy's build script check doesn't recognize workspace-inherited metadata
# which means in our current workspace layout, we get pranked by Clippy.
cargo_common_metadata = "allow"
# In the honor of a recent Cloudflare regression
panic = "deny"
unwrap_used = "deny"
# Less dangerous, but we'd like to know
# Those must be opt-in, and are fine ONLY in tests and examples.
expect_used = "warn"
print_stderr = "warn"
print_stdout = "warn"
todo = "warn"
unimplemented = "warn"
unreachable = "warn"
[profile.dev.package]
blake3 = { opt-level = 3 }

View file

@ -6,6 +6,22 @@ use crate::{
storage::DynStorageBackend,
};
/// Records an audit action for a media item.
///
/// # Arguments
///
/// * `storage` - Storage backend for persistence
/// * `media_id` - Optional media item that was affected
/// * `action` - The action being performed
/// * `details` - Optional additional details
///
/// # Returns
///
/// `Ok(())` on success
///
/// # Errors
///
/// Returns errors from the storage backend
pub async fn record_action(
storage: &DynStorageBackend,
media_id: Option<MediaId>,

View file

@ -2,6 +2,19 @@ use uuid::Uuid;
use crate::{error::Result, model::*, storage::DynStorageBackend};
/// Creates a new collection.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `name` - Collection name
/// * `kind` - Manual or virtual collection
/// * `description` - Optional description
/// * `filter_query` - For virtual collections, the search query
///
/// # Returns
///
/// The created collection
pub async fn create_collection(
storage: &DynStorageBackend,
name: &str,
@ -14,6 +27,18 @@ pub async fn create_collection(
.await
}
/// Adds a media item to a collection.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `collection_id` - Target collection
/// * `media_id` - Media item to add
/// * `position` - Position in the collection order
///
/// # Returns
///
/// `Ok(())` on success
pub async fn add_member(
storage: &DynStorageBackend,
collection_id: Uuid,
@ -32,6 +57,17 @@ pub async fn add_member(
.await
}
/// Removes a media item from a collection.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `collection_id` - Target collection
/// * `media_id` - Media item to remove
///
/// # Returns
///
/// `Ok(())` on success
pub async fn remove_member(
storage: &DynStorageBackend,
collection_id: Uuid,
@ -49,6 +85,19 @@ pub async fn remove_member(
.await
}
/// Returns all media items in a collection.
///
/// Virtual collections are evaluated dynamically using their filter query.
/// Manual collections return stored members.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `collection_id` - Collection to query
///
/// # Returns
///
/// List of media items in the collection
pub async fn get_members(
storage: &DynStorageBackend,
collection_id: Uuid,

View file

@ -110,6 +110,8 @@ pub struct Config {
pub sync: SyncConfig,
#[serde(default)]
pub sharing: SharingConfig,
#[serde(default)]
pub trash: TrashConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -284,8 +286,6 @@ impl std::fmt::Display for UserRole {
}
}
// ===== Plugin Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PluginsConfig {
#[serde(default)]
@ -337,8 +337,6 @@ impl Default for PluginsConfig {
}
}
// ===== Transcoding Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscodingConfig {
#[serde(default)]
@ -400,8 +398,6 @@ impl Default for TranscodingConfig {
}
}
// ===== Enrichment Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct EnrichmentConfig {
#[serde(default)]
@ -432,8 +428,6 @@ pub struct EnrichmentSource {
pub api_endpoint: Option<String>,
}
// ===== Cloud Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CloudConfig {
#[serde(default)]
@ -483,8 +477,6 @@ impl Default for CloudConfig {
}
}
// ===== Analytics Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AnalyticsConfig {
#[serde(default)]
@ -509,8 +501,6 @@ impl Default for AnalyticsConfig {
}
}
// ===== Photo Management Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PhotoConfig {
/// Generate perceptual hashes for image duplicate detection (CPU-intensive)
@ -568,8 +558,6 @@ impl Default for PhotoConfig {
}
}
// ===== Managed Storage Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManagedStorageConfig {
/// Enable managed storage for file uploads
@ -613,23 +601,18 @@ impl Default for ManagedStorageConfig {
}
}
// ===== Sync Configuration =====
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[derive(
Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default,
)]
#[serde(rename_all = "snake_case")]
pub enum ConflictResolution {
ServerWins,
ClientWins,
#[default]
KeepBoth,
Manual,
}
impl Default for ConflictResolution {
fn default() -> Self {
Self::KeepBoth
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncConfig {
/// Enable cross-device sync functionality
@ -697,8 +680,6 @@ impl Default for SyncConfig {
}
}
// ===== Sharing Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SharingConfig {
/// Enable sharing functionality
@ -750,7 +731,29 @@ impl Default for SharingConfig {
}
}
// ===== Storage Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TrashConfig {
#[serde(default)]
pub enabled: bool,
#[serde(default = "default_trash_retention_days")]
pub retention_days: u64,
#[serde(default)]
pub auto_empty: bool,
}
fn default_trash_retention_days() -> u64 {
30
}
impl Default for TrashConfig {
fn default() -> Self {
Self {
enabled: false,
retention_days: default_trash_retention_days(),
auto_empty: false,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StorageConfig {
@ -982,19 +985,19 @@ impl Config {
/// Ensure all directories needed by this config exist and are writable.
pub fn ensure_dirs(&self) -> crate::error::Result<()> {
if let Some(ref sqlite) = self.storage.sqlite {
if let Some(parent) = sqlite.path.parent() {
// Skip if parent is empty string (happens with bare filenames like
// "pinakes.db")
if !parent.as_os_str().is_empty() {
std::fs::create_dir_all(parent)?;
let metadata = std::fs::metadata(parent)?;
if metadata.permissions().readonly() {
return Err(crate::error::PinakesError::Config(format!(
"directory is not writable: {}",
parent.display()
)));
}
if let Some(ref sqlite) = self.storage.sqlite
&& let Some(parent) = sqlite.path.parent()
{
// Skip if parent is empty string (happens with bare filenames like
// "pinakes.db")
if !parent.as_os_str().is_empty() {
std::fs::create_dir_all(parent)?;
let metadata = std::fs::metadata(parent)?;
if metadata.permissions().readonly() {
return Err(crate::error::PinakesError::Config(format!(
"directory is not writable: {}",
parent.display()
)));
}
}
}
@ -1139,6 +1142,7 @@ impl Default for Config {
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
trash: TrashConfig::default(),
}
}
}

View file

@ -78,7 +78,7 @@ pub fn detect_events(
}
// Sort by date_taken
items.sort_by(|a, b| a.date_taken.unwrap().cmp(&b.date_taken.unwrap()));
items.sort_by_key(|a| a.date_taken.unwrap());
let mut events: Vec<DetectedEvent> = Vec::new();
let mut current_event_items: Vec<MediaId> = vec![items[0].id];
@ -181,7 +181,7 @@ pub fn detect_bursts(
}
// Sort by date_taken
items.sort_by(|a, b| a.date_taken.unwrap().cmp(&b.date_taken.unwrap()));
items.sort_by_key(|a| a.date_taken.unwrap());
let mut bursts: Vec<Vec<MediaId>> = Vec::new();
let mut current_burst: Vec<MediaId> = vec![items[0].id];

View file

@ -4,6 +4,19 @@ use crate::{error::Result, model::ContentHash};
const BUFFER_SIZE: usize = 65536;
/// Computes the BLAKE3 hash of a file asynchronously.
///
/// # Arguments
///
/// * `path` - Path to the file to hash
///
/// # Returns
///
/// The content hash
///
/// # Errors
///
/// Returns I/O errors or task execution errors
pub async fn compute_file_hash(path: &Path) -> Result<ContentHash> {
let path = path.to_path_buf();
let hash = tokio::task::spawn_blocking(move || -> Result<ContentHash> {
@ -24,6 +37,7 @@ pub async fn compute_file_hash(path: &Path) -> Result<ContentHash> {
Ok(hash)
}
/// Computes the BLAKE3 hash of a byte slice synchronously.
pub fn compute_hash_sync(data: &[u8]) -> ContentHash {
let hash = blake3::hash(data);
ContentHash::new(hash.to_hex().to_string())

View file

@ -17,6 +17,7 @@ use crate::{
thumbnail,
};
/// Result of importing a single file.
pub struct ImportResult {
pub media_id: MediaId,
pub was_duplicate: bool,
@ -26,7 +27,7 @@ pub struct ImportResult {
}
/// Options for import operations
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Default)]
pub struct ImportOptions {
/// Skip files that haven't changed since last scan (based on mtime)
pub incremental: bool,
@ -36,16 +37,6 @@ pub struct ImportOptions {
pub photo_config: crate::config::PhotoConfig,
}
impl Default for ImportOptions {
fn default() -> Self {
Self {
incremental: false,
force: false,
photo_config: crate::config::PhotoConfig::default(),
}
}
}
/// Get the modification time of a file as a Unix timestamp
fn get_file_mtime(path: &Path) -> Option<i64> {
std::fs::metadata(path)
@ -55,9 +46,20 @@ fn get_file_mtime(path: &Path) -> Option<i64> {
.map(|d| d.as_secs() as i64)
}
/// Check that a canonicalized path falls under at least one configured root
/// directory. If no roots are configured, all paths are allowed (for ad-hoc
/// imports).
/// Validates that a path is within configured root directories.
///
/// # Arguments
///
/// * `storage` - Storage backend to query root directories
/// * `path` - Path to validate
///
/// # Returns
///
/// `Ok(())` if path is within roots or no roots configured
///
/// # Errors
///
/// Returns `InvalidOperation` if path is outside all root directories
pub async fn validate_path_in_roots(
storage: &DynStorageBackend,
path: &Path,
@ -79,6 +81,20 @@ pub async fn validate_path_in_roots(
)))
}
/// Imports a file using default options.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `path` - Path to the file to import
///
/// # Returns
///
/// Import result with media ID and status
///
/// # Errors
///
/// Returns `FileNotFound` if path doesn't exist
pub async fn import_file(
storage: &DynStorageBackend,
path: &Path,
@ -236,15 +252,15 @@ pub async fn import_file_with_options(
storage.insert_media(&item).await?;
// Extract and store markdown links for markdown files
if is_markdown {
if let Err(e) = extract_and_store_links(storage, media_id, &path).await {
tracing::warn!(
media_id = %media_id,
path = %path.display(),
error = %e,
"failed to extract markdown links"
);
}
if is_markdown
&& let Err(e) = extract_and_store_links(storage, media_id, &path).await
{
tracing::warn!(
media_id = %media_id,
path = %path.display(),
error = %e,
"failed to extract markdown links"
);
}
// Store extracted extra metadata as custom fields
@ -419,12 +435,10 @@ async fn extract_and_store_links(
media_id: MediaId,
path: &Path,
) -> Result<()> {
// Read file content
let content = tokio::fs::read_to_string(path).await.map_err(|e| {
PinakesError::Io(std::io::Error::new(
std::io::ErrorKind::Other,
format!("failed to read markdown file for link extraction: {e}"),
))
PinakesError::Io(std::io::Error::other(format!(
"failed to read markdown file for link extraction: {e}"
)))
})?;
// Extract links

View file

@ -14,6 +14,7 @@ use crate::{
storage::DynStorageBackend,
};
/// Report of orphaned, untracked, and moved files.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OrphanReport {
/// Media items whose files no longer exist on disk.
@ -24,6 +25,7 @@ pub struct OrphanReport {
pub moved_files: Vec<(MediaId, PathBuf, PathBuf)>,
}
/// Action to take when resolving orphans.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum OrphanAction {
@ -31,6 +33,7 @@ pub enum OrphanAction {
Ignore,
}
/// Report of file integrity verification results.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerificationReport {
pub verified: usize,
@ -39,6 +42,7 @@ pub struct VerificationReport {
pub errors: Vec<(MediaId, String)>,
}
/// Status of a media item's file integrity.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum IntegrityStatus {
@ -72,9 +76,15 @@ impl std::str::FromStr for IntegrityStatus {
}
}
/// Detect orphaned media items (files that no longer exist on disk),
/// untracked files (files on disk not in database), and moved files (same hash,
/// different path).
/// Detect orphaned, untracked, and moved files.
///
/// # Arguments
///
/// * `storage` - Storage backend to query
///
/// # Returns
///
/// Report containing orphaned items, untracked files, and moved files
pub async fn detect_orphans(
storage: &DynStorageBackend,
) -> Result<OrphanReport> {

View file

@ -35,6 +35,7 @@ pub enum JobKind {
media_ids: Vec<MediaId>,
},
CleanupAnalytics,
TrashPurge,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -167,7 +168,7 @@ impl JobQueue {
cancel,
};
// If the channel is full we still record the job — it'll stay Pending
// If the channel is full we still record the job; it will stay Pending
let _ = self.tx.send(item).await;
id
}

View file

@ -164,7 +164,7 @@ impl ManagedStorageService {
self.verify(hash).await?;
}
fs::File::open(&path).await.map_err(|e| PinakesError::Io(e))
fs::File::open(&path).await.map_err(PinakesError::Io)
}
/// Read a blob entirely into memory.
@ -271,11 +271,11 @@ impl ManagedStorageService {
let mut file_entries = fs::read_dir(&sub_path).await?;
while let Some(file_entry) = file_entries.next_entry().await? {
let file_path = file_entry.path();
if file_path.is_file() {
if let Some(name) = file_path.file_name() {
hashes
.push(ContentHash::new(name.to_string_lossy().to_string()));
}
if file_path.is_file()
&& let Some(name) = file_path.file_name()
{
hashes
.push(ContentHash::new(name.to_string_lossy().to_string()));
}
}
}
@ -311,15 +311,15 @@ impl ManagedStorageService {
let path = entry.path();
if path.is_file() {
// Check if temp file is old (> 1 hour)
if let Ok(meta) = fs::metadata(&path).await {
if let Ok(modified) = meta.modified() {
let age = std::time::SystemTime::now()
.duration_since(modified)
.unwrap_or_default();
if age.as_secs() > 3600 {
let _ = fs::remove_file(&path).await;
count += 1;
}
if let Ok(meta) = fs::metadata(&path).await
&& let Ok(modified) = meta.modified()
{
let age = std::time::SystemTime::now()
.duration_since(modified)
.unwrap_or_default();
if age.as_secs() > 3600 {
let _ = fs::remove_file(&path).await;
count += 1;
}
}
}

View file

@ -6,10 +6,12 @@ use uuid::Uuid;
use crate::media_type::MediaType;
/// Unique identifier for a media item.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct MediaId(pub Uuid);
impl MediaId {
/// Creates a new media ID using UUIDv7.
pub fn new() -> Self {
Self(Uuid::now_v7())
}
@ -27,10 +29,12 @@ impl Default for MediaId {
}
}
/// BLAKE3 content hash for deduplication.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct ContentHash(pub String);
impl ContentHash {
/// Creates a new content hash from a hex string.
pub fn new(hex: String) -> Self {
Self(hex)
}
@ -42,8 +46,6 @@ impl fmt::Display for ContentHash {
}
}
// ===== Managed Storage Types =====
/// Storage mode for media items
#[derive(
Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize,
@ -162,12 +164,14 @@ pub struct MediaItem {
pub links_extracted_at: Option<DateTime<Utc>>,
}
/// A custom field attached to a media item.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CustomField {
pub field_type: CustomFieldType,
pub value: String,
}
/// Type of custom field value.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum CustomFieldType {
@ -177,6 +181,7 @@ pub enum CustomFieldType {
Boolean,
}
/// A tag that can be applied to media items.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Tag {
pub id: Uuid,
@ -185,6 +190,7 @@ pub struct Tag {
pub created_at: DateTime<Utc>,
}
/// A collection of media items.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Collection {
pub id: Uuid,
@ -196,6 +202,7 @@ pub struct Collection {
pub updated_at: DateTime<Utc>,
}
/// Kind of collection.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum CollectionKind {
@ -203,6 +210,7 @@ pub enum CollectionKind {
Virtual,
}
/// A member of a collection with position tracking.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CollectionMember {
pub collection_id: Uuid,
@ -211,6 +219,7 @@ pub struct CollectionMember {
pub added_at: DateTime<Utc>,
}
/// An audit trail entry.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AuditEntry {
pub id: Uuid,
@ -329,6 +338,7 @@ impl fmt::Display for AuditAction {
}
}
/// Pagination parameters for list queries.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Pagination {
pub offset: u64,
@ -337,6 +347,7 @@ pub struct Pagination {
}
impl Pagination {
/// Creates a new pagination instance.
pub fn new(offset: u64, limit: u64, sort: Option<String>) -> Self {
Self {
offset,
@ -356,6 +367,7 @@ impl Default for Pagination {
}
}
/// A saved search query.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SavedSearch {
pub id: Uuid,
@ -367,6 +379,7 @@ pub struct SavedSearch {
// Book Management Types
/// Metadata for book-type media.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BookMetadata {
pub media_id: MediaId,
@ -385,6 +398,7 @@ pub struct BookMetadata {
pub updated_at: DateTime<Utc>,
}
/// Information about a book author.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct AuthorInfo {
pub name: String,
@ -394,6 +408,7 @@ pub struct AuthorInfo {
}
impl AuthorInfo {
/// Creates a new author with the given name.
pub fn new(name: String) -> Self {
Self {
name,
@ -403,6 +418,7 @@ impl AuthorInfo {
}
}
/// Sets the author's role.
pub fn with_role(mut self, role: String) -> Self {
self.role = role;
self
@ -435,6 +451,7 @@ pub struct ExtractedBookMetadata {
pub identifiers: HashMap<String, Vec<String>>,
}
/// Reading progress for a book.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReadingProgress {
pub media_id: MediaId,
@ -446,6 +463,7 @@ pub struct ReadingProgress {
}
impl ReadingProgress {
/// Creates a new reading progress entry.
pub fn new(
media_id: MediaId,
user_id: Uuid,
@ -473,6 +491,7 @@ impl ReadingProgress {
}
}
/// Reading status for a book.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ReadingStatus {
@ -493,8 +512,6 @@ impl fmt::Display for ReadingStatus {
}
}
// ===== Markdown Links (Obsidian-style) =====
/// Type of markdown link
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
@ -530,7 +547,7 @@ impl std::str::FromStr for LinkType {
}
}
/// A markdown link extracted from a file
/// A markdown link extracted from a file.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MarkdownLink {
pub id: Uuid,
@ -549,7 +566,7 @@ pub struct MarkdownLink {
pub created_at: DateTime<Utc>,
}
/// Information about a backlink (incoming link)
/// Information about a backlink (incoming link).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BacklinkInfo {
pub link_id: Uuid,
@ -562,14 +579,14 @@ pub struct BacklinkInfo {
pub link_type: LinkType,
}
/// Graph data for visualization
/// Graph data for visualization.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct GraphData {
pub nodes: Vec<GraphNode>,
pub edges: Vec<GraphEdge>,
}
/// A node in the graph visualization
/// A node in the graph visualization.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GraphNode {
pub id: String,
@ -582,7 +599,7 @@ pub struct GraphNode {
pub backlink_count: u32,
}
/// An edge (link) in the graph visualization
/// An edge (link) in the graph visualization.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GraphEdge {
pub source: String,

View file

@ -15,14 +15,9 @@ impl WasmRuntime {
/// Create a new WASM runtime
pub fn new() -> Result<Self> {
let mut config = Config::new();
// Enable WASM features
config.wasm_component_model(true);
config.async_support(true);
// Set resource limits
config.max_wasm_stack(1024 * 1024); // 1MB stack
config.consume_fuel(true); // Enable fuel metering for CPU limits
config.consume_fuel(true); // enable fuel metering for CPU limits
let engine = Engine::new(&config)?;
@ -39,10 +34,7 @@ impl WasmRuntime {
return Err(anyhow!("WASM file not found: {:?}", wasm_path));
}
// Read WASM bytes
let wasm_bytes = std::fs::read(wasm_path)?;
// Compile module
let module = Module::new(&self.engine, &wasm_bytes)?;
Ok(WasmPlugin {
@ -82,7 +74,6 @@ impl WasmPlugin {
) -> Result<Vec<u8>> {
let engine = self.module.engine();
// Create store with per-invocation data
let store_data = PluginStoreData {
context: self.context.clone(),
exchange_buffer: Vec::new(),
@ -97,17 +88,14 @@ impl WasmPlugin {
store.set_fuel(1_000_000_000)?;
}
// Set up linker with host functions
let mut linker = Linker::new(engine);
HostFunctions::setup_linker(&mut linker)?;
// Instantiate the module
let instance = linker.instantiate_async(&mut store, &self.module).await?;
// Get the memory export (if available)
let memory = instance.get_memory(&mut store, "memory");
// If there are params and memory is available, write them
// If there are params and memory is available, write them to the module
let mut alloc_offset: i32 = 0;
if !params.is_empty()
&& let Some(mem) = &memory
@ -136,7 +124,6 @@ impl WasmPlugin {
}
}
// Look up the exported function and call it
let func =
instance
.get_func(&mut store, function_name)
@ -150,9 +137,9 @@ impl WasmPlugin {
let mut results = vec![Val::I32(0); result_count];
// Call with appropriate params based on function signature
// Call with appropriate params based on function signature; convention:
// (ptr, len)
if param_count == 2 && !params.is_empty() {
// Convention: (ptr, len)
func
.call_async(
&mut store,
@ -171,13 +158,13 @@ impl WasmPlugin {
.await?;
}
// Read result from exchange buffer (host functions may have written data)
// Prefer data written into the exchange buffer by host functions
let exchange = std::mem::take(&mut store.data_mut().exchange_buffer);
if !exchange.is_empty() {
return Ok(exchange);
}
// Otherwise serialize the return values
// Fall back to serialising the WASM return value
if let Some(Val::I32(ret)) = results.first() {
Ok(ret.to_le_bytes().to_vec())
} else {
@ -208,9 +195,10 @@ impl Default for WasmPlugin {
pub struct HostFunctions;
impl HostFunctions {
/// Set up host functions in a linker
/// Registers all host ABI functions (`host_log`, `host_read_file`,
/// `host_write_file`, `host_http_request`, `host_get_config`,
/// `host_get_buffer`) into the given linker.
pub fn setup_linker(linker: &mut Linker<PluginStoreData>) -> Result<()> {
// host_log: log a message from the plugin
linker.func_wrap(
"env",
"host_log",
@ -240,7 +228,6 @@ impl HostFunctions {
},
)?;
// host_read_file: read a file into the exchange buffer
linker.func_wrap(
"env",
"host_read_file",
@ -300,7 +287,6 @@ impl HostFunctions {
},
)?;
// host_write_file: write data to a file
linker.func_wrap(
"env",
"host_write_file",
@ -373,7 +359,6 @@ impl HostFunctions {
},
)?;
// host_http_request: make an HTTP request (blocking)
linker.func_wrap(
"env",
"host_http_request",
@ -461,7 +446,6 @@ impl HostFunctions {
},
)?;
// host_get_config: read a config key into the exchange buffer
linker.func_wrap(
"env",
"host_get_config",
@ -500,7 +484,6 @@ impl HostFunctions {
},
)?;
// host_get_buffer: copy the exchange buffer to WASM memory
linker.func_wrap(
"env",
"host_get_buffer",

View file

@ -13,6 +13,7 @@ use tracing::{info, warn};
use crate::{error::Result, import, storage::DynStorageBackend};
/// Status of a directory scan operation.
pub struct ScanStatus {
pub scanning: bool,
pub files_found: usize,
@ -100,6 +101,17 @@ impl Default for ScanProgress {
}
}
/// Scans a directory with default options.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `dir` - Directory to scan
/// * `ignore_patterns` - Patterns to exclude
///
/// # Returns
///
/// Scan status with counts and any errors
pub async fn scan_directory(
storage: &DynStorageBackend,
dir: &Path,
@ -115,7 +127,19 @@ pub async fn scan_directory(
.await
}
/// Scan a directory with incremental scanning support
/// Scans a directory with incremental scanning support.
///
/// Skips files that haven't changed since last scan based on mtime.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `dir` - Directory to scan
/// * `ignore_patterns` - Patterns to exclude
///
/// # Returns
///
/// Scan status with counts and any errors
pub async fn scan_directory_incremental(
storage: &DynStorageBackend,
dir: &Path,
@ -129,6 +153,18 @@ pub async fn scan_directory_incremental(
.await
}
/// Scans a directory with progress reporting.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `dir` - Directory to scan
/// * `ignore_patterns` - Patterns to exclude
/// * `progress` - Optional progress tracker
///
/// # Returns
///
/// Scan status with counts and any errors
pub async fn scan_directory_with_progress(
storage: &DynStorageBackend,
dir: &Path,
@ -230,6 +266,16 @@ pub async fn scan_directory_with_options(
Ok(status)
}
/// Scans all configured root directories with default options.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `ignore_patterns` - Patterns to exclude
///
/// # Returns
///
/// Status for each root directory
pub async fn scan_all_roots(
storage: &DynStorageBackend,
ignore_patterns: &[String],
@ -243,7 +289,16 @@ pub async fn scan_all_roots(
.await
}
/// Scan all roots incrementally (skip unchanged files)
/// Scans all roots incrementally, skipping unchanged files.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `ignore_patterns` - Patterns to exclude
///
/// # Returns
///
/// Status for each root directory
pub async fn scan_all_roots_incremental(
storage: &DynStorageBackend,
ignore_patterns: &[String],
@ -255,6 +310,17 @@ pub async fn scan_all_roots_incremental(
scan_all_roots_with_options(storage, ignore_patterns, None, &options).await
}
/// Scans all root directories with progress reporting.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `ignore_patterns` - Patterns to exclude
/// * `progress` - Optional progress tracker
///
/// # Returns
///
/// Status for each root directory
pub async fn scan_all_roots_with_progress(
storage: &DynStorageBackend,
ignore_patterns: &[String],
@ -269,7 +335,18 @@ pub async fn scan_all_roots_with_progress(
.await
}
/// Scan all roots with full options including progress and incremental mode
/// Scans all roots with full options including progress and incremental mode.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `ignore_patterns` - Patterns to exclude
/// * `progress` - Optional progress tracker
/// * `scan_options` - Scan configuration
///
/// # Returns
///
/// Status for each root directory
pub async fn scan_all_roots_with_options(
storage: &DynStorageBackend,
ignore_patterns: &[String],
@ -306,12 +383,14 @@ pub async fn scan_all_roots_with_options(
Ok(statuses)
}
/// Watches directories for file changes and imports modified files.
pub struct FileWatcher {
_watcher: Box<dyn Watcher + Send>,
rx: mpsc::Receiver<PathBuf>,
}
impl FileWatcher {
/// Creates a new file watcher for the given directories.
pub fn new(dirs: &[PathBuf]) -> Result<Self> {
let (tx, rx) = mpsc::channel(1024);
@ -393,11 +472,13 @@ impl FileWatcher {
Ok(Box::new(watcher))
}
/// Receives the next changed file path.
pub async fn next_change(&mut self) -> Option<PathBuf> {
self.rx.recv().await
}
}
/// Watches directories and imports files on change.
pub async fn watch_and_import(
storage: DynStorageBackend,
dirs: Vec<PathBuf>,

View file

@ -200,6 +200,22 @@ impl TaskScheduler {
running: false,
last_job_id: None,
},
ScheduledTask {
id: "trash_purge".to_string(),
name: "Trash Purge".to_string(),
kind: JobKind::TrashPurge,
schedule: Schedule::Weekly {
day: 0,
hour: 3,
minute: 0,
},
enabled: false,
last_run: None,
next_run: None,
last_status: None,
running: false,
last_job_id: None,
},
];
Self {
@ -404,6 +420,7 @@ mod tests {
use chrono::TimeZone;
use super::*;
use crate::config::TrashConfig;
#[test]
fn test_interval_next_run() {
@ -453,7 +470,7 @@ mod tests {
#[test]
fn test_weekly_same_day_future() {
// 2025-06-15 is Sunday (day 6). Schedule is Sunday 14:00, current is 10:00
// => today.
let from = Utc.with_ymd_and_hms(2025, 6, 15, 10, 0, 0).unwrap();
let schedule = Schedule::Weekly {
day: 6,
@ -467,7 +484,7 @@ mod tests {
#[test]
fn test_weekly_same_day_past() {
// 2025-06-15 is Sunday (day 6). Schedule is Sunday 08:00, current is 10:00
// => next week.
let from = Utc.with_ymd_and_hms(2025, 6, 15, 10, 0, 0).unwrap();
let schedule = Schedule::Weekly {
day: 6,
@ -545,4 +562,152 @@ mod tests {
"Sun 14:30"
);
}
#[test]
fn test_trash_purge_job_kind_serde() {
let job = JobKind::TrashPurge;
let json = serde_json::to_string(&job).unwrap();
assert_eq!(json, r#"{"type":"trash_purge"}"#);
let deserialized: JobKind = serde_json::from_str(&json).unwrap();
assert!(matches!(deserialized, JobKind::TrashPurge));
}
#[test]
fn test_trash_purge_scheduled_task_defaults() {
let task = ScheduledTask {
id: "trash_purge".to_string(),
name: "Trash Purge".to_string(),
kind: JobKind::TrashPurge,
schedule: Schedule::Weekly {
day: 0,
hour: 3,
minute: 0,
},
enabled: false,
last_run: None,
next_run: None,
last_status: None,
running: false,
last_job_id: None,
};
assert_eq!(task.id, "trash_purge");
assert_eq!(task.name, "Trash Purge");
assert!(matches!(task.kind, JobKind::TrashPurge));
assert!(!task.enabled);
assert!(!task.running);
}
#[tokio::test]
async fn test_default_tasks_contain_trash_purge() {
let cancel = CancellationToken::new();
let config = Arc::new(RwLock::new(Config::default()));
let job_queue = JobQueue::new(1, |_, _, _, _| tokio::spawn(async move {}));
let scheduler = TaskScheduler::new(job_queue, cancel, config, None);
let tasks = scheduler.list_tasks().await;
let trash_task = tasks.iter().find(|t| t.id == "trash_purge");
assert!(
trash_task.is_some(),
"trash_purge task should be in default tasks"
);
let task = trash_task.unwrap();
assert_eq!(task.id, "trash_purge");
assert_eq!(task.name, "Trash Purge");
assert!(matches!(task.kind, JobKind::TrashPurge));
assert!(!task.enabled, "trash_purge should be disabled by default");
}
#[test]
fn test_trash_purge_serde_roundtrip() {
let task = ScheduledTask {
id: "trash_purge".to_string(),
name: "Trash Purge".to_string(),
kind: JobKind::TrashPurge,
schedule: Schedule::Weekly {
day: 0,
hour: 3,
minute: 0,
},
enabled: true,
last_run: Some(Utc.with_ymd_and_hms(2025, 1, 15, 10, 0, 0).unwrap()),
next_run: Some(Utc.with_ymd_and_hms(2025, 1, 19, 3, 0, 0).unwrap()),
last_status: Some("completed".to_string()),
running: false,
last_job_id: Some(Uuid::now_v7()),
};
let json = serde_json::to_string(&task).unwrap();
let deserialized: ScheduledTask = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.id, "trash_purge");
assert_eq!(deserialized.enabled, true);
assert!(!deserialized.running);
assert!(deserialized.last_job_id.is_none());
}
#[test]
fn test_all_job_kinds_serde() {
let kinds: Vec<JobKind> = vec![
JobKind::Scan { path: None },
JobKind::Scan {
path: Some(PathBuf::from("/test")),
},
JobKind::GenerateThumbnails { media_ids: vec![] },
JobKind::VerifyIntegrity { media_ids: vec![] },
JobKind::OrphanDetection,
JobKind::CleanupThumbnails,
JobKind::TrashPurge,
];
for kind in kinds {
let json = serde_json::to_string(&kind).unwrap();
let deserialized: JobKind = serde_json::from_str(&json).unwrap();
assert!(
matches!(deserialized, JobKind::Scan { path: None })
|| matches!(deserialized, JobKind::Scan { path: Some(_) })
|| matches!(deserialized, JobKind::GenerateThumbnails { .. })
|| matches!(deserialized, JobKind::VerifyIntegrity { .. })
|| matches!(deserialized, JobKind::OrphanDetection)
|| matches!(deserialized, JobKind::CleanupThumbnails)
|| matches!(deserialized, JobKind::TrashPurge)
);
}
}
#[test]
fn test_task_serde_skips_runtime_fields() {
let task = ScheduledTask {
id: "test".to_string(),
name: "Test".to_string(),
kind: JobKind::TrashPurge,
schedule: Schedule::Daily {
hour: 0,
minute: 0,
},
enabled: true,
last_run: Some(Utc::now()),
next_run: Some(Utc::now()),
last_status: Some("running".to_string()),
running: true,
last_job_id: Some(Uuid::now_v7()),
};
let json = serde_json::to_string(&task).unwrap();
let deserialized: ScheduledTask = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.running, false);
assert!(deserialized.last_job_id.is_none());
}
#[test]
fn test_trash_config_defaults() {
let config = TrashConfig::default();
assert!(!config.enabled);
assert_eq!(config.retention_days, 30);
assert!(!config.auto_empty);
}
}

View file

@ -6,6 +6,7 @@ use winnow::{
token::{take_till, take_while},
};
/// Represents a parsed search query.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum SearchQuery {
FullText(String),
@ -39,6 +40,7 @@ pub enum SearchQuery {
},
}
/// Comparison operators for range queries.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum CompareOp {
GreaterThan,
@ -47,6 +49,7 @@ pub enum CompareOp {
LessOrEqual,
}
/// Date values for date-based queries.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum DateValue {
Today,
@ -61,6 +64,7 @@ pub enum DateValue {
DaysAgo(u32),
}
/// Request for executing a search.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SearchRequest {
pub query: SearchQuery,
@ -68,12 +72,14 @@ pub struct SearchRequest {
pub pagination: crate::model::Pagination,
}
/// Results of a search operation.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SearchResults {
pub items: Vec<crate::model::MediaItem>,
pub total_count: u64,
}
/// Sorting options for search results.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[derive(Default)]
@ -139,19 +145,25 @@ fn parse_date_value(s: &str) -> Option<DateValue> {
}
/// Parse size strings like "10MB", "1GB", "500KB" to bytes
///
/// Returns `None` if the input is invalid or if the value would overflow.
fn parse_size_value(s: &str) -> Option<i64> {
let s = s.to_uppercase();
if let Some(num) = s.strip_suffix("GB") {
num.parse::<i64>().ok().map(|n| n * 1024 * 1024 * 1024)
} else if let Some(num) = s.strip_suffix("MB") {
num.parse::<i64>().ok().map(|n| n * 1024 * 1024)
} else if let Some(num) = s.strip_suffix("KB") {
num.parse::<i64>().ok().map(|n| n * 1024)
} else if let Some(num) = s.strip_suffix('B') {
num.parse::<i64>().ok()
let (num_str, multiplier): (&str, i64) = if let Some(n) = s.strip_suffix("GB")
{
(n, 1024 * 1024 * 1024)
} else if let Some(n) = s.strip_suffix("MB") {
(n, 1024 * 1024)
} else if let Some(n) = s.strip_suffix("KB") {
(n, 1024)
} else if let Some(n) = s.strip_suffix('B') {
(n, 1)
} else {
s.parse::<i64>().ok()
}
(s.as_str(), 1)
};
let num: i64 = num_str.parse().ok()?;
num.checked_mul(multiplier)
}
fn field_match(input: &mut &str) -> ModalResult<SearchQuery> {
@ -332,6 +344,22 @@ fn or_expr(input: &mut &str) -> ModalResult<SearchQuery> {
}
}
/// Parses a search query string into a structured query.
///
/// Supports full-text search, field matches, operators (AND/OR/NOT),
/// prefixes, fuzzy matching, and type/tag filters.
///
/// # Arguments
///
/// * `input` - Raw query string
///
/// # Returns
///
/// Parsed query tree
///
/// # Errors
///
/// Returns `SearchParse` error for invalid syntax
pub fn parse_search_query(input: &str) -> crate::error::Result<SearchQuery> {
let trimmed = input.trim();
if trimmed.is_empty() {

View file

@ -12,13 +12,14 @@ use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::{model::MediaId, users::UserId};
use crate::{error::PinakesError, model::MediaId, users::UserId};
/// Unique identifier for a share.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct ShareId(pub Uuid);
impl ShareId {
/// Creates a new share ID.
pub fn new() -> Self {
Self(Uuid::now_v7())
}
@ -47,6 +48,7 @@ pub enum ShareTarget {
}
impl ShareTarget {
/// Returns the type of target being shared.
pub fn target_type(&self) -> &'static str {
match self {
Self::Media { .. } => "media",
@ -56,6 +58,7 @@ impl ShareTarget {
}
}
/// Returns the ID of the target being shared.
pub fn target_id(&self) -> Uuid {
match self {
Self::Media { media_id } => media_id.0,
@ -87,6 +90,7 @@ pub enum ShareRecipient {
}
impl ShareRecipient {
/// Returns the type of recipient.
pub fn recipient_type(&self) -> &'static str {
match self {
Self::PublicLink { .. } => "public_link",
@ -117,7 +121,7 @@ pub struct SharePermissions {
}
impl SharePermissions {
/// View-only permissions
/// Creates a new share with view-only permissions.
pub fn view_only() -> Self {
Self {
can_view: true,
@ -125,7 +129,7 @@ impl SharePermissions {
}
}
/// Download permissions (includes view)
/// Creates a new share with download permissions.
pub fn download() -> Self {
Self {
can_view: true,
@ -134,7 +138,7 @@ impl SharePermissions {
}
}
/// Edit permissions (includes view and download)
/// Creates a new share with edit permissions.
pub fn edit() -> Self {
Self {
can_view: true,
@ -145,7 +149,7 @@ impl SharePermissions {
}
}
/// Full permissions
/// Creates a new share with full permissions.
pub fn full() -> Self {
Self {
can_view: true,
@ -157,7 +161,7 @@ impl SharePermissions {
}
}
/// Merge permissions (takes the most permissive of each)
/// Merges two permission sets, taking the most permissive values.
pub fn merge(&self, other: &Self) -> Self {
Self {
can_view: self.can_view || other.can_view,
@ -246,17 +250,17 @@ impl Share {
}
}
/// Check if the share has expired.
/// Checks if the share has expired.
pub fn is_expired(&self) -> bool {
self.expires_at.map(|exp| exp < Utc::now()).unwrap_or(false)
}
/// Check if this is a public link share.
/// Checks if this is a public link share.
pub fn is_public(&self) -> bool {
matches!(self.recipient, ShareRecipient::PublicLink { .. })
}
/// Get the public token if this is a public link share.
/// Returns the public token if this is a public link share.
pub fn public_token(&self) -> Option<&str> {
match &self.recipient {
ShareRecipient::PublicLink { token, .. } => Some(token),
@ -322,6 +326,7 @@ pub struct ShareActivity {
}
impl ShareActivity {
/// Creates a new share activity entry.
pub fn new(share_id: ShareId, action: ShareActivityAction) -> Self {
Self {
id: Uuid::now_v7(),
@ -334,16 +339,19 @@ impl ShareActivity {
}
}
/// Sets the actor who performed the activity.
pub fn with_actor(mut self, actor_id: UserId) -> Self {
self.actor_id = Some(actor_id);
self
}
/// Sets the IP address of the actor.
pub fn with_ip(mut self, ip: &str) -> Self {
self.actor_ip = Some(ip.to_string());
self
}
/// Sets additional details about the activity.
pub fn with_details(mut self, details: &str) -> Self {
self.details = Some(details.to_string());
self
@ -400,6 +408,7 @@ pub struct ShareNotification {
}
impl ShareNotification {
/// Creates a new share notification.
pub fn new(
user_id: UserId,
share_id: ShareId,
@ -416,20 +425,18 @@ impl ShareNotification {
}
}
/// Generate a random share token using UUID.
/// Generates a random share token.
pub fn generate_share_token() -> String {
// Use UUIDv4 for random tokens - simple string representation
Uuid::new_v4().simple().to_string()
}
/// Hash a share password.
pub fn hash_share_password(password: &str) -> String {
// Use BLAKE3 for password hashing (in production, use Argon2)
blake3::hash(password.as_bytes()).to_hex().to_string()
/// Hashes a share password using Argon2id.
pub fn hash_share_password(password: &str) -> Result<String, PinakesError> {
crate::users::auth::hash_password(password)
}
/// Verify a share password.
/// Verifies a share password against an Argon2id hash.
pub fn verify_share_password(password: &str, hash: &str) -> bool {
let computed = hash_share_password(password);
computed == hash
crate::users::auth::verify_password(password, hash).unwrap_or(false)
}

View file

@ -142,29 +142,13 @@ pub trait StorageBackend: Send + Sync + 'static {
) -> Result<()>;
// Batch operations (transactional where supported)
async fn batch_delete_media(&self, ids: &[MediaId]) -> Result<u64> {
let mut count = 0u64;
for id in ids {
self.delete_media(*id).await?;
count += 1;
}
Ok(count)
}
async fn batch_delete_media(&self, ids: &[MediaId]) -> Result<u64>;
async fn batch_tag_media(
&self,
media_ids: &[MediaId],
tag_ids: &[Uuid],
) -> Result<u64> {
let mut count = 0u64;
for media_id in media_ids {
for tag_id in tag_ids {
self.tag_media(*media_id, *tag_id).await?;
count += 1;
}
}
Ok(count)
}
) -> Result<u64>;
// Integrity
async fn list_media_paths(
@ -342,7 +326,6 @@ pub trait StorageBackend: Send + Sync + 'static {
}
}
// ===== Ratings =====
async fn rate_media(
&self,
user_id: UserId,
@ -358,7 +341,6 @@ pub trait StorageBackend: Send + Sync + 'static {
) -> Result<Option<Rating>>;
async fn delete_rating(&self, id: Uuid) -> Result<()>;
// ===== Comments =====
async fn add_comment(
&self,
user_id: UserId,
@ -370,7 +352,6 @@ pub trait StorageBackend: Send + Sync + 'static {
-> Result<Vec<Comment>>;
async fn delete_comment(&self, id: Uuid) -> Result<()>;
// ===== Favorites =====
async fn add_favorite(
&self,
user_id: UserId,
@ -392,7 +373,6 @@ pub trait StorageBackend: Send + Sync + 'static {
media_id: MediaId,
) -> Result<bool>;
// ===== Share Links =====
async fn create_share_link(
&self,
media_id: MediaId,
@ -405,7 +385,6 @@ pub trait StorageBackend: Send + Sync + 'static {
async fn increment_share_views(&self, token: &str) -> Result<()>;
async fn delete_share_link(&self, id: Uuid) -> Result<()>;
// ===== Playlists =====
async fn create_playlist(
&self,
owner_id: UserId,
@ -450,7 +429,6 @@ pub trait StorageBackend: Send + Sync + 'static {
new_position: i32,
) -> Result<()>;
// ===== Analytics =====
async fn record_usage_event(&self, event: &UsageEvent) -> Result<()>;
async fn get_usage_events(
&self,
@ -477,7 +455,6 @@ pub trait StorageBackend: Send + Sync + 'static {
) -> Result<Option<f64>>;
async fn cleanup_old_events(&self, before: DateTime<Utc>) -> Result<u64>;
// ===== Subtitles =====
async fn add_subtitle(&self, subtitle: &Subtitle) -> Result<()>;
async fn get_media_subtitles(
&self,
@ -490,7 +467,6 @@ pub trait StorageBackend: Send + Sync + 'static {
offset_ms: i64,
) -> Result<()>;
// ===== External Metadata (Enrichment) =====
async fn store_external_metadata(
&self,
meta: &ExternalMetadata,
@ -501,7 +477,6 @@ pub trait StorageBackend: Send + Sync + 'static {
) -> Result<Vec<ExternalMetadata>>;
async fn delete_external_metadata(&self, id: Uuid) -> Result<()>;
// ===== Transcode Sessions =====
async fn create_transcode_session(
&self,
session: &TranscodeSession,
@ -522,7 +497,6 @@ pub trait StorageBackend: Send + Sync + 'static {
before: DateTime<Utc>,
) -> Result<u64>;
// ===== Session Management =====
/// Create a new session in the database
async fn create_session(&self, session: &SessionData) -> Result<()>;
@ -623,8 +597,6 @@ pub trait StorageBackend: Send + Sync + 'static {
pagination: &Pagination,
) -> Result<Vec<MediaItem>>;
// ===== Managed Storage =====
/// Insert a media item that uses managed storage
async fn insert_managed_media(&self, item: &MediaItem) -> Result<()>;
@ -658,8 +630,6 @@ pub trait StorageBackend: Send + Sync + 'static {
/// Get managed storage statistics
async fn managed_storage_stats(&self) -> Result<ManagedStorageStats>;
// ===== Sync Devices =====
/// Register a new sync device
async fn register_device(
&self,
@ -695,8 +665,6 @@ pub trait StorageBackend: Send + Sync + 'static {
/// Update the last_seen_at timestamp for a device
async fn touch_device(&self, id: crate::sync::DeviceId) -> Result<()>;
// ===== Sync Log =====
/// Record a change in the sync log
async fn record_sync_change(
&self,
@ -716,8 +684,6 @@ pub trait StorageBackend: Send + Sync + 'static {
/// Clean up old sync log entries
async fn cleanup_old_sync_log(&self, before: DateTime<Utc>) -> Result<u64>;
// ===== Device Sync State =====
/// Get sync state for a device and path
async fn get_device_sync_state(
&self,
@ -737,8 +703,6 @@ pub trait StorageBackend: Send + Sync + 'static {
device_id: crate::sync::DeviceId,
) -> Result<Vec<crate::sync::DeviceSyncState>>;
// ===== Upload Sessions (Chunked Uploads) =====
/// Create a new upload session
async fn create_upload_session(
&self,
@ -773,8 +737,6 @@ pub trait StorageBackend: Send + Sync + 'static {
/// Clean up expired upload sessions
async fn cleanup_expired_uploads(&self) -> Result<u64>;
// ===== Sync Conflicts =====
/// Record a sync conflict
async fn record_conflict(
&self,
@ -794,8 +756,6 @@ pub trait StorageBackend: Send + Sync + 'static {
resolution: crate::config::ConflictResolution,
) -> Result<()>;
// ===== Enhanced Sharing =====
/// Create a new share
async fn create_share(
&self,
@ -872,8 +832,6 @@ pub trait StorageBackend: Send + Sync + 'static {
/// Clean up expired shares
async fn cleanup_expired_shares(&self) -> Result<u64>;
// ===== Share Activity =====
/// Record share activity
async fn record_share_activity(
&self,
@ -887,8 +845,6 @@ pub trait StorageBackend: Send + Sync + 'static {
pagination: &Pagination,
) -> Result<Vec<crate::sharing::ShareActivity>>;
// ===== Share Notifications =====
/// Create a share notification
async fn create_share_notification(
&self,
@ -907,8 +863,6 @@ pub trait StorageBackend: Send + Sync + 'static {
/// Mark all notifications as read for a user
async fn mark_all_notifications_read(&self, user_id: UserId) -> Result<()>;
// ===== File Management =====
/// Rename a media item (changes file_name and updates path accordingly).
/// For external storage, this actually renames the file on disk.
/// For managed storage, this only updates the metadata.
@ -939,8 +893,6 @@ pub trait StorageBackend: Send + Sync + 'static {
Ok(results)
}
// ===== Trash / Soft Delete =====
/// Soft delete a media item (set deleted_at timestamp).
async fn soft_delete_media(&self, id: MediaId) -> Result<()>;
@ -960,8 +912,6 @@ pub trait StorageBackend: Send + Sync + 'static {
/// Count items in trash.
async fn count_trash(&self) -> Result<u64>;
// ===== Markdown Links (Obsidian-style) =====
/// Save extracted markdown links for a media item.
/// This replaces any existing links for the source media.
async fn save_markdown_links(

View file

@ -583,8 +583,7 @@ impl StorageBackend for PostgresBackend {
crate::storage::migrations::run_postgres_migrations(client).await
}
// ---- Root directories ----
// Root directories
async fn add_root_dir(&self, path: PathBuf) -> Result<()> {
let client = self
.pool
@ -638,8 +637,7 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ---- Media CRUD ----
// Media CRUD
async fn insert_media(&self, item: &MediaItem) -> Result<()> {
let client = self
.pool
@ -1032,8 +1030,7 @@ impl StorageBackend for PostgresBackend {
Ok(count as u64)
}
// ---- Batch Operations ----
// Batch Operations
async fn batch_delete_media(&self, ids: &[MediaId]) -> Result<u64> {
if ids.is_empty() {
return Ok(0);
@ -1089,8 +1086,7 @@ impl StorageBackend for PostgresBackend {
Ok(rows)
}
// ---- Tags ----
// Tags
async fn create_tag(
&self,
name: &str,
@ -1257,8 +1253,7 @@ impl StorageBackend for PostgresBackend {
rows.iter().map(row_to_tag).collect()
}
// ---- Collections ----
// Collections
async fn create_collection(
&self,
name: &str,
@ -1499,8 +1494,7 @@ impl StorageBackend for PostgresBackend {
Ok(items)
}
// ---- Search ----
// Search
async fn search(&self, request: &SearchRequest) -> Result<SearchResults> {
let client = self
.pool
@ -1666,8 +1660,7 @@ impl StorageBackend for PostgresBackend {
})
}
// ---- Audit ----
// Audit
async fn record_audit(&self, entry: &AuditEntry) -> Result<()> {
let client = self
.pool
@ -1739,8 +1732,7 @@ impl StorageBackend for PostgresBackend {
rows.iter().map(row_to_audit_entry).collect()
}
// ---- Custom fields ----
// Custom fields
async fn set_custom_field(
&self,
media_id: MediaId,
@ -1821,8 +1813,7 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ---- Duplicates ----
// Duplicates
async fn find_duplicates(&self) -> Result<Vec<Vec<MediaItem>>> {
let client = self
.pool
@ -2007,8 +1998,7 @@ impl StorageBackend for PostgresBackend {
Ok(groups)
}
// ---- Database management ----
// Database management
async fn database_stats(&self) -> Result<crate::storage::DatabaseStats> {
let client = self
.pool
@ -2524,7 +2514,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ===== Ratings =====
async fn rate_media(
&self,
user_id: crate::users::UserId,
@ -2635,7 +2624,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ===== Comments =====
async fn add_comment(
&self,
user_id: crate::users::UserId,
@ -2712,7 +2700,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ===== Favorites =====
async fn add_favorite(
&self,
user_id: crate::users::UserId,
@ -2838,7 +2825,6 @@ impl StorageBackend for PostgresBackend {
Ok(count > 0)
}
// ===== Share Links =====
async fn create_share_link(
&self,
media_id: MediaId,
@ -2942,7 +2928,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ===== Playlists =====
async fn create_playlist(
&self,
owner_id: crate::users::UserId,
@ -3250,7 +3235,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ===== Analytics =====
async fn record_usage_event(
&self,
event: &crate::analytics::UsageEvent,
@ -3540,7 +3524,6 @@ impl StorageBackend for PostgresBackend {
Ok(affected)
}
// ===== Subtitles =====
async fn add_subtitle(
&self,
subtitle: &crate::subtitles::Subtitle,
@ -3652,7 +3635,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ===== External Metadata (Enrichment) =====
async fn store_external_metadata(
&self,
meta: &crate::enrichment::ExternalMetadata,
@ -3742,7 +3724,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ===== Transcode Sessions =====
async fn create_transcode_session(
&self,
session: &crate::transcode::TranscodeSession,
@ -3930,8 +3911,6 @@ impl StorageBackend for PostgresBackend {
Ok(affected)
}
// ===== Session Management =====
async fn create_session(
&self,
session: &crate::storage::SessionData,
@ -4666,10 +4645,6 @@ impl StorageBackend for PostgresBackend {
items
}
// =========================================================================
// Managed Storage
// =========================================================================
async fn insert_managed_media(&self, item: &MediaItem) -> Result<()> {
let client = self.pool.get().await.map_err(|e| {
PinakesError::Database(format!("failed to get connection: {e}"))
@ -4967,10 +4942,6 @@ impl StorageBackend for PostgresBackend {
})
}
// =========================================================================
// Sync Devices
// =========================================================================
async fn register_device(
&self,
device: &crate::sync::SyncDevice,
@ -5188,10 +5159,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// =========================================================================
// Sync Log
// =========================================================================
async fn record_sync_change(
&self,
change: &crate::sync::SyncLogEntry,
@ -5310,10 +5277,6 @@ impl StorageBackend for PostgresBackend {
Ok(result)
}
// =========================================================================
// Device Sync State
// =========================================================================
async fn get_device_sync_state(
&self,
device_id: crate::sync::DeviceId,
@ -5437,10 +5400,6 @@ impl StorageBackend for PostgresBackend {
)
}
// =========================================================================
// Upload Sessions
// =========================================================================
async fn create_upload_session(
&self,
session: &crate::sync::UploadSession,
@ -5618,10 +5577,6 @@ impl StorageBackend for PostgresBackend {
Ok(result)
}
// =========================================================================
// Sync Conflicts
// =========================================================================
async fn record_conflict(
&self,
conflict: &crate::sync::SyncConflict,
@ -5737,10 +5692,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// =========================================================================
// Shares
// =========================================================================
async fn create_share(
&self,
share: &crate::sharing::Share,
@ -6050,10 +6001,10 @@ impl StorageBackend for PostgresBackend {
for share in shares {
// Skip expired shares
if let Some(exp) = share.expires_at {
if exp < now {
continue;
}
if let Some(exp) = share.expires_at
&& exp < now
{
continue;
}
match (&share.recipient, user_id) {
@ -6167,10 +6118,6 @@ impl StorageBackend for PostgresBackend {
Ok(result)
}
// =========================================================================
// Share Activity
// =========================================================================
async fn record_share_activity(
&self,
activity: &crate::sharing::ShareActivity,
@ -6244,10 +6191,6 @@ impl StorageBackend for PostgresBackend {
)
}
// =========================================================================
// Share Notifications
// =========================================================================
async fn create_share_notification(
&self,
notification: &crate::sharing::ShareNotification,
@ -6349,8 +6292,6 @@ impl StorageBackend for PostgresBackend {
Ok(())
}
// ===== File Management =====
async fn rename_media(&self, id: MediaId, new_name: &str) -> Result<String> {
// Validate the new name
if new_name.is_empty() || new_name.contains('/') || new_name.contains('\\')
@ -6468,8 +6409,6 @@ impl StorageBackend for PostgresBackend {
Ok(old_path)
}
// ===== Trash / Soft Delete =====
async fn soft_delete_media(&self, id: MediaId) -> Result<()> {
let client = self
.pool
@ -6671,8 +6610,6 @@ impl StorageBackend for PostgresBackend {
Ok(count as u64)
}
// ===== Markdown Links (Obsidian-style) =====
async fn save_markdown_links(
&self,
media_id: MediaId,

File diff suppressed because it is too large Load diff

View file

@ -204,17 +204,16 @@ impl ChunkedUploadManager {
let mut entries = fs::read_dir(&self.temp_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension().map(|e| e == "upload").unwrap_or(false) {
if let Ok(metadata) = fs::metadata(&path).await {
if let Ok(modified) = metadata.modified() {
let age = std::time::SystemTime::now()
.duration_since(modified)
.unwrap_or_default();
if age > max_age {
let _ = fs::remove_file(&path).await;
count += 1;
}
}
if path.extension().map(|e| e == "upload").unwrap_or(false)
&& let Ok(metadata) = fs::metadata(&path).await
&& let Ok(modified) = metadata.modified()
{
let age = std::time::SystemTime::now()
.duration_since(modified)
.unwrap_or_default();
if age > max_age {
let _ = fs::remove_file(&path).await;
count += 1;
}
}
}

View file

@ -35,22 +35,19 @@ impl fmt::Display for DeviceId {
}
/// Type of sync device.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[derive(
Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default,
)]
#[serde(rename_all = "lowercase")]
pub enum DeviceType {
Desktop,
Mobile,
Tablet,
Server,
#[default]
Other,
}
impl Default for DeviceType {
fn default() -> Self {
Self::Other
}
}
impl fmt::Display for DeviceType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
@ -353,7 +350,7 @@ impl UploadSession {
timeout_hours: u64,
) -> Self {
let now = Utc::now();
let chunk_count = (expected_size + chunk_size - 1) / chunk_size;
let chunk_count = expected_size.div_ceil(chunk_size);
Self {
id: Uuid::now_v7(),
device_id,

View file

@ -6,6 +6,17 @@ use crate::{
storage::DynStorageBackend,
};
/// Creates a new tag.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `name` - Tag name
/// * `parent_id` - Optional parent tag for hierarchy
///
/// # Returns
///
/// The created tag
pub async fn create_tag(
storage: &DynStorageBackend,
name: &str,
@ -14,6 +25,17 @@ pub async fn create_tag(
storage.create_tag(name, parent_id).await
}
/// Applies a tag to a media item.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `media_id` - Media item to tag
/// * `tag_id` - Tag to apply
///
/// # Returns
///
/// `Ok(())` on success
pub async fn tag_media(
storage: &DynStorageBackend,
media_id: MediaId,
@ -29,6 +51,17 @@ pub async fn tag_media(
.await
}
/// Removes a tag from a media item.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `media_id` - Media item to untag
/// * `tag_id` - Tag to remove
///
/// # Returns
///
/// `Ok(())` on success
pub async fn untag_media(
storage: &DynStorageBackend,
media_id: MediaId,
@ -44,6 +77,16 @@ pub async fn untag_media(
.await
}
/// Returns all descendants of a tag in the hierarchy.
///
/// # Arguments
///
/// * `storage` - Storage backend
/// * `tag_id` - Root tag to query
///
/// # Returns
///
/// List of child tags
pub async fn get_tag_tree(
storage: &DynStorageBackend,
tag_id: Uuid,

View file

@ -261,7 +261,7 @@ fn generate_raw_thumbnail(
)));
}
// The extracted preview is typically a JPEG try loading it
// The extracted preview is typically a JPEG; try loading it
if temp_ppm.exists() {
let result = image::open(&temp_ppm);
let _ = std::fs::remove_file(&temp_ppm);

View file

@ -16,6 +16,7 @@ use crate::{
pub struct UserId(pub Uuid);
impl UserId {
/// Creates a new user ID.
pub fn new() -> Self {
Self(Uuid::now_v7())
}
@ -94,14 +95,17 @@ pub enum LibraryPermission {
}
impl LibraryPermission {
/// Checks if read permission is granted.
pub fn can_read(&self) -> bool {
true
}
/// Checks if write permission is granted.
pub fn can_write(&self) -> bool {
matches!(self, Self::Write | Self::Admin)
}
/// Checks if admin permission is granted.
pub fn can_admin(&self) -> bool {
matches!(self, Self::Admin)
}

View file

@ -1,3 +1,8 @@
// Common test utilities shared across integration tests
// Functions may appear unused in individual test binaries - they're used across
// the test suite
#![allow(dead_code)]
use std::{collections::HashMap, path::PathBuf, sync::Arc};
use pinakes_core::{

View file

@ -1,9 +1,6 @@
use std::{collections::HashMap, sync::Arc};
use std::collections::HashMap;
use pinakes_core::{
model::*,
storage::{StorageBackend, sqlite::SqliteBackend},
};
use pinakes_core::{model::*, storage::StorageBackend};
mod common;
use common::{make_test_media, setup};

View file

@ -136,7 +136,7 @@ pub fn create_router_with_tls(
)
// Webhooks (read)
.route("/webhooks", get(routes::webhooks::list_webhooks))
// Auth endpoints (self-service) — login handled separately with stricter rate limit
// Auth endpoints (self-service); login is handled separately with a stricter rate limit
.route("/auth/logout", post(routes::auth::logout))
.route("/auth/me", get(routes::auth::me))
.route("/auth/revoke-all", post(routes::auth::revoke_all_sessions))

View file

@ -721,8 +721,6 @@ impl From<pinakes_core::users::UserLibraryAccess> for UserLibraryResponse {
}
}
// ===== Social (Ratings, Comments, Favorites, Shares) =====
#[derive(Debug, Serialize)]
pub struct RatingResponse {
pub id: String,
@ -816,8 +814,6 @@ impl From<pinakes_core::social::ShareLink> for ShareLinkResponse {
}
}
// ===== Playlists =====
#[derive(Debug, Serialize)]
pub struct PlaylistResponse {
pub id: String,
@ -875,8 +871,6 @@ pub struct ReorderPlaylistRequest {
pub new_position: i32,
}
// ===== Analytics =====
#[derive(Debug, Serialize)]
pub struct UsageEventResponse {
pub id: String,
@ -924,8 +918,6 @@ pub struct WatchProgressResponse {
pub progress_secs: f64,
}
// ===== Subtitles =====
#[derive(Debug, Serialize)]
pub struct SubtitleResponse {
pub id: String,
@ -968,8 +960,6 @@ pub struct UpdateSubtitleOffsetRequest {
pub offset_ms: i64,
}
// ===== Enrichment =====
#[derive(Debug, Serialize)]
pub struct ExternalMetadataResponse {
pub id: String,
@ -1005,8 +995,6 @@ impl From<pinakes_core::enrichment::ExternalMetadata>
}
}
// ===== Transcode =====
#[derive(Debug, Serialize)]
pub struct TranscodeSessionResponse {
pub id: String,
@ -1039,8 +1027,6 @@ pub struct CreateTranscodeRequest {
pub profile: String,
}
// ===== Managed Storage / Upload =====
#[derive(Debug, Serialize)]
pub struct UploadResponse {
pub media_id: String,
@ -1081,8 +1067,6 @@ impl From<pinakes_core::model::ManagedStorageStats>
}
}
// ===== Sync =====
#[derive(Debug, Deserialize)]
pub struct RegisterDeviceRequest {
pub name: String,
@ -1269,8 +1253,6 @@ pub struct AcknowledgeChangesRequest {
pub cursor: i64,
}
// ===== Enhanced Sharing =====
#[derive(Debug, Deserialize)]
pub struct CreateShareRequest {
pub target_type: String,

View file

@ -438,13 +438,123 @@ async fn main() -> Result<()> {
}
},
JobKind::Enrich { media_ids } => {
// Enrichment job placeholder
use pinakes_core::{
enrichment::{
MetadataEnricher,
books::BookEnricher,
lastfm::LastFmEnricher,
musicbrainz::MusicBrainzEnricher,
tmdb::TmdbEnricher,
},
media_type::MediaCategory,
};
let enrich_cfg = &config.enrichment;
let mut enrichers: Vec<Box<dyn MetadataEnricher>> = Vec::new();
if enrich_cfg.enabled {
if enrich_cfg.sources.musicbrainz.enabled {
enrichers.push(Box::new(MusicBrainzEnricher::new()));
}
if let (true, Some(key)) = (
enrich_cfg.sources.tmdb.enabled,
enrich_cfg.sources.tmdb.api_key.clone(),
) {
enrichers.push(Box::new(TmdbEnricher::new(key)));
}
if let (true, Some(key)) = (
enrich_cfg.sources.lastfm.enabled,
enrich_cfg.sources.lastfm.api_key.clone(),
) {
enrichers.push(Box::new(LastFmEnricher::new(key)));
}
// BookEnricher handles documents/epub. No dedicated config
// key is required; the Google Books key is optional.
enrichers.push(Box::new(BookEnricher::new(None)));
}
let total = media_ids.len();
let mut enriched: usize = 0;
let mut errors: usize = 0;
'items: for media_id in media_ids {
if cancel.is_cancelled() {
break 'items;
}
let item = match storage.get_media(media_id).await {
Ok(i) => i,
Err(e) => {
tracing::warn!(
%media_id,
error = %e,
"enrich: failed to fetch media item"
);
errors += 1;
continue;
},
};
// Select enrichers appropriate for this media category.
let category = item.media_type.category();
for enricher in &enrichers {
let source = enricher.source();
use pinakes_core::enrichment::EnrichmentSourceType;
let applicable = match source {
EnrichmentSourceType::MusicBrainz
| EnrichmentSourceType::LastFm => {
category == MediaCategory::Audio
},
EnrichmentSourceType::Tmdb => {
category == MediaCategory::Video
},
EnrichmentSourceType::OpenLibrary
| EnrichmentSourceType::GoogleBooks => {
category == MediaCategory::Document
},
};
if !applicable {
continue;
}
match enricher.enrich(&item).await {
Ok(Some(meta)) => {
if let Err(e) = storage.store_external_metadata(&meta).await
{
tracing::warn!(
%media_id,
%source,
error = %e,
"enrich: failed to store external metadata"
);
errors += 1;
} else {
enriched += 1;
}
},
Ok(None) => {},
Err(e) => {
tracing::warn!(
%media_id,
%source,
error = %e,
"enrich: enricher returned error"
);
errors += 1;
},
}
}
}
JobQueue::complete(
&jobs,
job_id,
serde_json::json!({"media_ids": media_ids.len(), "status": "not_implemented"}),
)
.await;
&jobs,
job_id,
serde_json::json!({
"total": total,
"enriched": enriched,
"errors": errors,
}),
)
.await;
},
JobKind::CleanupAnalytics => {
let before = chrono::Utc::now() - chrono::Duration::days(90);
@ -460,6 +570,27 @@ async fn main() -> Result<()> {
Err(e) => JobQueue::fail(&jobs, job_id, e.to_string()).await,
}
},
JobKind::TrashPurge => {
let retention_days = config.trash.retention_days;
let before = chrono::Utc::now()
- chrono::Duration::days(retention_days as i64);
match storage.purge_old_trash(before).await {
Ok(count) => {
tracing::info!(count, "purged {} items from trash", count);
JobQueue::complete(
&jobs,
job_id,
serde_json::json!({"purged": count, "retention_days": retention_days}),
)
.await;
},
Err(e) => {
tracing::error!(error = %e, "failed to purge trash");
JobQueue::fail(&jobs, job_id, e.to_string()).await;
},
}
},
};
drop(cancel);
})

View file

@ -836,8 +836,6 @@ pub async fn get_media_count(
Ok(Json(MediaCountResponse { count }))
}
// ===== File Management Endpoints =====
pub async fn rename_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
@ -978,8 +976,6 @@ pub async fn batch_move_media(
}
}
// ===== Trash Endpoints =====
pub async fn soft_delete_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,

View file

@ -25,8 +25,6 @@ use uuid::Uuid;
use crate::{error::ApiError, state::AppState};
// ===== Response DTOs =====
/// Response for backlinks query
#[derive(Debug, Serialize)]
pub struct BacklinksResponse {
@ -200,8 +198,6 @@ pub struct UnresolvedLinksResponse {
pub count: u64,
}
// ===== Handlers =====
/// Get backlinks (incoming links) to a media item.
///
/// GET /api/v1/media/{id}/backlinks

View file

@ -93,7 +93,12 @@ pub async fn create_share(
let recipient = match req.recipient_type.as_str() {
"public_link" => {
let token = generate_share_token();
let password_hash = req.password.as_ref().map(|p| hash_share_password(p));
let password_hash = req
.password
.as_ref()
.map(|p| hash_share_password(p))
.transpose()
.map_err(ApiError)?;
ShareRecipient::PublicLink {
token,
password_hash,
@ -409,35 +414,37 @@ pub async fn access_shared(
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Check expiration
if let Some(expires_at) = share.expires_at {
if Utc::now() > expires_at {
return Err(ApiError::not_found("Share has expired"));
}
if let Some(expires_at) = share.expires_at
&& Utc::now() > expires_at
{
return Err(ApiError::not_found("Share has expired"));
}
// Check password if required
if let ShareRecipient::PublicLink { password_hash, .. } = &share.recipient {
if let Some(hash) = password_hash {
let provided_password = params
.password
.as_ref()
.ok_or_else(|| ApiError::unauthorized("Password required"))?;
if let ShareRecipient::PublicLink {
password_hash: Some(hash),
..
} = &share.recipient
{
let provided_password = params
.password
.as_ref()
.ok_or_else(|| ApiError::unauthorized("Password required"))?;
if !verify_share_password(provided_password, hash) {
// Log failed attempt
let activity = ShareActivity {
id: Uuid::now_v7(),
share_id: share.id,
actor_id: None,
actor_ip: Some(addr.ip().to_string()),
action: ShareActivityAction::PasswordFailed,
details: None,
timestamp: Utc::now(),
};
let _ = state.storage.record_share_activity(&activity).await;
if !verify_share_password(provided_password, hash) {
// Log failed attempt
let activity = ShareActivity {
id: Uuid::now_v7(),
share_id: share.id,
actor_id: None,
actor_ip: Some(addr.ip().to_string()),
action: ShareActivityAction::PasswordFailed,
details: None,
timestamp: Utc::now(),
};
let _ = state.storage.record_share_activity(&activity).await;
return Err(ApiError::unauthorized("Invalid password"));
}
return Err(ApiError::unauthorized("Invalid password"));
}
}
@ -473,8 +480,6 @@ pub async fn access_shared(
Ok(Json(item.into()))
},
_ => {
// For collections/tags, return a placeholder
// Full implementation would return the collection contents
Err(ApiError::bad_request(
"Collection/tag sharing not yet fully implemented",
))

View file

@ -13,8 +13,6 @@ pub struct ShareLinkQuery {
pub password: Option<String>,
}
// ===== Ratings =====
pub async fn rate_media(
State(state): State<AppState>,
Extension(username): Extension<String>,
@ -46,8 +44,6 @@ pub async fn get_media_ratings(
))
}
// ===== Comments =====
pub async fn add_comment(
State(state): State<AppState>,
Extension(username): Extension<String>,
@ -80,8 +76,6 @@ pub async fn get_media_comments(
))
}
// ===== Favorites =====
pub async fn add_favorite(
State(state): State<AppState>,
Extension(username): Extension<String>,
@ -120,8 +114,6 @@ pub async fn list_favorites(
Ok(Json(items.into_iter().map(MediaResponse::from).collect()))
}
// ===== Share Links =====
pub async fn create_share_link(
State(state): State<AppState>,
Extension(username): Extension<String>,

View file

@ -301,7 +301,7 @@ pub async fn report_changes(
if !config.sync.enabled {
return Err(ApiError::bad_request("Sync is not enabled"));
}
let conflict_resolution = config.sync.default_conflict_resolution.clone();
let conflict_resolution = config.sync.default_conflict_resolution;
drop(config);
let mut accepted = Vec::new();
@ -514,7 +514,7 @@ pub async fn create_upload(
.ok_or_else(|| ApiError::unauthorized("Invalid device token"))?;
let chunk_size = req.chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
let chunk_count = (req.expected_size + chunk_size - 1) / chunk_size;
let chunk_count = req.expected_size.div_ceil(chunk_size);
let now = Utc::now();
let session = UploadSession {
@ -784,10 +784,10 @@ pub async fn cancel_upload(
})?;
// Clean up temp file if manager is available
if let Some(ref manager) = state.chunked_upload_manager {
if let Err(e) = manager.cancel(id).await {
tracing::warn!(session_id = %id, error = %e, "failed to clean up temp file");
}
if let Some(ref manager) = state.chunked_upload_manager
&& let Err(e) = manager.cancel(id).await
{
tracing::warn!(session_id = %id, error = %e, "failed to clean up temp file");
}
session.status = UploadStatus::Cancelled;
@ -827,38 +827,37 @@ pub async fn download_file(
let file_size = metadata.len();
// Check for Range header
if let Some(range_header) = headers.get(header::RANGE) {
if let Ok(range_str) = range_header.to_str() {
if let Some(range) = parse_range_header(range_str, file_size) {
// Partial content response
let (start, end) = range;
let length = end - start + 1;
if let Some(range_header) = headers.get(header::RANGE)
&& let Ok(range_str) = range_header.to_str()
&& let Some(range) = parse_range_header(range_str, file_size)
{
// Partial content response
let (start, end) = range;
let length = end - start + 1;
let file = tokio::fs::File::open(&item.path).await.map_err(|e| {
ApiError::internal(format!("Failed to reopen file: {}", e))
})?;
let file = tokio::fs::File::open(&item.path).await.map_err(|e| {
ApiError::internal(format!("Failed to reopen file: {}", e))
})?;
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
return Ok(
return Ok(
(
StatusCode::PARTIAL_CONTENT,
[
(header::CONTENT_TYPE, item.media_type.mime_type()),
(header::CONTENT_LENGTH, length.to_string()),
(
StatusCode::PARTIAL_CONTENT,
[
(header::CONTENT_TYPE, item.media_type.mime_type()),
(header::CONTENT_LENGTH, length.to_string()),
(
header::CONTENT_RANGE,
format!("bytes {}-{}/{}", start, end, file_size),
),
(header::ACCEPT_RANGES, "bytes".to_string()),
],
body,
)
.into_response(),
);
}
}
header::CONTENT_RANGE,
format!("bytes {}-{}/{}", start, end, file_size),
),
(header::ACCEPT_RANGES, "bytes".to_string()),
],
body,
)
.into_response(),
);
}
// Full content response

View file

@ -29,6 +29,7 @@ use pinakes_core::{
ThumbnailConfig,
TlsConfig,
TranscodingConfig,
TrashConfig,
UiConfig,
UserAccount,
UserRole,
@ -151,6 +152,7 @@ fn default_config() -> Config {
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
trash: TrashConfig::default(),
}
}
@ -298,10 +300,6 @@ async fn response_body(
serde_json::from_slice(&body).unwrap_or(serde_json::Value::Null)
}
// ===================================================================
// Existing tests (no auth)
// ===================================================================
#[tokio::test]
async fn test_list_media_empty() {
let app = setup_app().await;
@ -515,10 +513,6 @@ async fn test_user_duplicate_username() {
assert_eq!(response.status(), StatusCode::CONFLICT);
}
// ===================================================================
// Authentication tests
// ===================================================================
#[tokio::test]
async fn test_unauthenticated_request_rejected() {
let (app, ..) = setup_app_with_auth().await;
@ -623,10 +617,6 @@ async fn test_logout() {
assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
}
// ===================================================================
// Authorization / RBAC tests
// ===================================================================
#[tokio::test]
async fn test_viewer_cannot_access_editor_routes() {
let (app, _, _, viewer_token) = setup_app_with_auth().await;
@ -713,10 +703,6 @@ async fn test_admin_can_access_all() {
assert_eq!(response.status(), StatusCode::OK);
}
// ===================================================================
// Phase 2 feature tests: Social
// ===================================================================
#[tokio::test]
async fn test_rating_invalid_stars_zero() {
let (app, _, editor_token, _) = setup_app_with_auth().await;
@ -775,10 +761,6 @@ async fn test_favorites_list_empty() {
assert!(body.as_array().unwrap().is_empty());
}
// ===================================================================
// Phase 2 feature tests: Playlists
// ===================================================================
#[tokio::test]
async fn test_playlist_crud() {
let (app, _, editor_token, _) = setup_app_with_auth().await;
@ -860,10 +842,6 @@ async fn test_playlist_empty_name() {
assert_eq!(response.status(), StatusCode::BAD_REQUEST);
}
// ===================================================================
// Phase 2 feature tests: Analytics
// ===================================================================
#[tokio::test]
async fn test_most_viewed_empty() {
let (app, _, _, viewer_token) = setup_app_with_auth().await;
@ -896,10 +874,6 @@ async fn test_record_event_and_query() {
assert_eq!(body["recorded"], true);
}
// ===================================================================
// Phase 2 feature tests: Streaming/Transcode
// ===================================================================
#[tokio::test]
async fn test_transcode_session_not_found() {
let (app, _, _, viewer_token) = setup_app_with_auth().await;
@ -951,10 +925,6 @@ async fn test_hls_segment_no_session() {
);
}
// ===================================================================
// Phase 2 feature tests: Subtitles
// ===================================================================
#[tokio::test]
async fn test_subtitles_list() {
let (app, _, _, viewer_token) = setup_app_with_auth().await;
@ -974,10 +944,6 @@ async fn test_subtitles_list() {
);
}
// ===================================================================
// Health: public access test
// ===================================================================
#[tokio::test]
async fn test_health_public() {
let (app, ..) = setup_app_with_auth().await;
@ -988,10 +954,6 @@ async fn test_health_public() {
assert_eq!(response.status(), StatusCode::OK);
}
// ===================================================================
// Input validation & edge case tests
// ===================================================================
#[tokio::test]
async fn test_invalid_uuid_in_path() {
let (app, _, _, viewer_token) = setup_app_with_auth().await;
@ -1026,7 +988,7 @@ async fn test_share_link_expired() {
// (need real media items). Verify the expire check logic works.
let app = setup_app().await;
// First import a dummy file to get a media_id but we can't without a real
// First import a dummy file to get a media_id, but we can't without a real
// file. So let's test the public share access endpoint with a nonexistent
// token.
let response = app

View file

@ -29,6 +29,7 @@ use pinakes_core::{
ThumbnailConfig,
TlsConfig,
TranscodingConfig,
TrashConfig,
UiConfig,
WebhookConfig,
},
@ -118,6 +119,7 @@ async fn setup_app_with_plugins()
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
trash: TrashConfig::default(),
};
let job_queue =

View file

@ -174,7 +174,7 @@ pub fn BacklinksPanel(
for backlink in &data.backlinks {
BacklinkItemView {
backlink: backlink.clone(),
on_navigate: on_navigate.clone(),
on_navigate: on_navigate,
}
}
}
@ -328,7 +328,7 @@ pub fn OutgoingLinksPanel(
for link in &data.links {
OutgoingLinkItemView {
link: link.clone(),
on_navigate: on_navigate.clone(),
on_navigate: on_navigate,
}
}
}

View file

@ -484,7 +484,7 @@ pub fn Detail(
span { class: "detail-value mono", "{media.content_hash}" }
}
// Editable fields conditional by media category
// Editable fields, conditional by media category
div { class: "detail-field",
label { class: "detail-label", "Title" }
input {

View file

@ -100,7 +100,7 @@ pub fn GraphView(
ForceDirectedGraph {
nodes: graph.nodes.clone(),
edges: graph.edges.clone(),
selected_node: selected_node.clone(),
selected_node: selected_node,
on_node_click: move |id: String| {
selected_node.set(Some(id.clone()));
},
@ -525,7 +525,7 @@ fn ForceDirectedGraph(
},
onwheel: move |evt| {
let delta = if evt.delta().strip_units().y < 0.0 { 1.1 } else { 0.9 };
let new_zoom = (*zoom.read() * delta).max(0.1).min(5.0);
let new_zoom = (*zoom.read() * delta).clamp(0.1, 5.0);
zoom.set(new_zoom);
},

View file

@ -80,12 +80,11 @@ pub fn MarkdownViewer(
})();
"#;
if let Ok(result) = eval(check_js).await {
if let Some(target) = result.as_str() {
if !target.is_empty() {
handler.call(target.to_string());
}
}
if let Ok(result) = eval(check_js).await
&& let Some(target) = result.as_str()
&& !target.is_empty()
{
handler.call(target.to_string());
}
}
});