initial commit

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I4a6b498153eccd5407510dd541b7f4816a6a6964
This commit is contained in:
raf 2026-01-30 22:05:46 +03:00
commit 6a73d11c4b
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
124 changed files with 34856 additions and 0 deletions

View file

@ -0,0 +1,30 @@
[package]
name = "pinakes-server"
edition.workspace = true
version.workspace = true
license.workspace = true
[dependencies]
pinakes-core = { path = "../pinakes-core" }
tokio = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
toml = { workspace = true }
chrono = { workspace = true }
uuid = { workspace = true }
thiserror = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
axum = { workspace = true }
tower = { workspace = true }
tower-http = { workspace = true }
governor = { workspace = true }
tower_governor = { workspace = true }
tokio-util = { version = "0.7", features = ["io"] }
argon2 = { workspace = true }
rand = "0.9"
[dev-dependencies]
http-body-util = "0.1"

View file

@ -0,0 +1,244 @@
use std::sync::Arc;
use axum::Router;
use axum::extract::DefaultBodyLimit;
use axum::http::{HeaderValue, Method, header};
use axum::middleware;
use axum::routing::{delete, get, patch, post, put};
use tower_governor::GovernorLayer;
use tower_governor::governor::GovernorConfigBuilder;
use tower_http::cors::CorsLayer;
use tower_http::trace::TraceLayer;
use crate::auth;
use crate::routes;
use crate::state::AppState;
pub fn create_router(state: AppState) -> Router {
// Global rate limit: 100 requests/sec per IP
let global_governor = Arc::new(
GovernorConfigBuilder::default()
.per_second(1)
.burst_size(100)
.finish()
.unwrap(),
);
// Strict rate limit for login: 5 requests/min per IP
let login_governor = Arc::new(
GovernorConfigBuilder::default()
.per_second(12) // replenish one every 12 seconds
.burst_size(5)
.finish()
.unwrap(),
);
// Login route with strict rate limiting
let login_route = Router::new()
.route("/auth/login", post(routes::auth::login))
.layer(GovernorLayer {
config: login_governor,
});
// Read-only routes: any authenticated user (Viewer+)
let viewer_routes = Router::new()
.route("/health", get(routes::health::health))
.route("/media/count", get(routes::media::get_media_count))
.route("/media", get(routes::media::list_media))
.route("/media/{id}", get(routes::media::get_media))
.route("/media/{id}/stream", get(routes::media::stream_media))
.route("/media/{id}/thumbnail", get(routes::media::get_thumbnail))
.route("/media/{media_id}/tags", get(routes::tags::get_media_tags))
.route("/search", get(routes::search::search))
.route("/search", post(routes::search::search_post))
.route("/tags", get(routes::tags::list_tags))
.route("/tags/{id}", get(routes::tags::get_tag))
.route("/collections", get(routes::collections::list_collections))
.route(
"/collections/{id}",
get(routes::collections::get_collection),
)
.route(
"/collections/{id}/members",
get(routes::collections::get_members),
)
.route("/audit", get(routes::audit::list_audit))
.route("/scan/status", get(routes::scan::scan_status))
.route("/config", get(routes::config::get_config))
.route("/config/ui", get(routes::config::get_ui_config))
.route("/database/stats", get(routes::database::database_stats))
.route("/duplicates", get(routes::duplicates::list_duplicates))
// Statistics
.route("/statistics", get(routes::statistics::library_statistics))
// Scheduled tasks (read)
.route(
"/tasks/scheduled",
get(routes::scheduled_tasks::list_scheduled_tasks),
)
// Jobs
.route("/jobs", get(routes::jobs::list_jobs))
.route("/jobs/{id}", get(routes::jobs::get_job))
// Saved searches (read)
.route(
"/searches/saved",
get(routes::saved_searches::list_saved_searches),
)
// Webhooks (read)
.route("/webhooks", get(routes::webhooks::list_webhooks))
// Auth endpoints (self-service) — login handled separately with stricter rate limit
.route("/auth/logout", post(routes::auth::logout))
.route("/auth/me", get(routes::auth::me));
// Write routes: Editor+ required
let editor_routes = Router::new()
.route("/media/import", post(routes::media::import_media))
.route(
"/media/import/options",
post(routes::media::import_with_options),
)
.route("/media/import/batch", post(routes::media::batch_import))
.route(
"/media/import/directory",
post(routes::media::import_directory_endpoint),
)
.route(
"/media/import/preview",
post(routes::media::preview_directory),
)
.route("/media/batch/tag", post(routes::media::batch_tag))
.route("/media/batch/delete", post(routes::media::batch_delete))
.route("/media/batch/update", patch(routes::media::batch_update))
.route(
"/media/batch/collection",
post(routes::media::batch_add_to_collection),
)
.route("/media/all", delete(routes::media::delete_all_media))
.route("/media/{id}", patch(routes::media::update_media))
.route("/media/{id}", delete(routes::media::delete_media))
.route("/media/{id}/open", post(routes::media::open_media))
.route(
"/media/{id}/custom-fields",
post(routes::media::set_custom_field),
)
.route(
"/media/{id}/custom-fields/{name}",
delete(routes::media::delete_custom_field),
)
.route("/tags", post(routes::tags::create_tag))
.route("/tags/{id}", delete(routes::tags::delete_tag))
.route("/media/{media_id}/tags", post(routes::tags::tag_media))
.route(
"/media/{media_id}/tags/{tag_id}",
delete(routes::tags::untag_media),
)
.route("/collections", post(routes::collections::create_collection))
.route(
"/collections/{id}",
delete(routes::collections::delete_collection),
)
.route(
"/collections/{id}/members",
post(routes::collections::add_member),
)
.route(
"/collections/{collection_id}/members/{media_id}",
delete(routes::collections::remove_member),
)
.route("/scan", post(routes::scan::trigger_scan))
.route("/jobs/{id}/cancel", post(routes::jobs::cancel_job))
// Saved searches (write)
.route(
"/searches/saved",
post(routes::saved_searches::create_saved_search),
)
.route(
"/searches/saved/{id}",
delete(routes::saved_searches::delete_saved_search),
)
// Integrity
.route(
"/jobs/orphan-detection",
post(routes::integrity::trigger_orphan_detection),
)
.route(
"/jobs/verify-integrity",
post(routes::integrity::trigger_verify_integrity),
)
.route(
"/jobs/cleanup-thumbnails",
post(routes::integrity::trigger_cleanup_thumbnails),
)
.route(
"/jobs/generate-thumbnails",
post(routes::integrity::generate_all_thumbnails),
)
.route("/orphans/resolve", post(routes::integrity::resolve_orphans))
// Export
.route("/jobs/export", post(routes::export::trigger_export))
.route(
"/jobs/export/options",
post(routes::export::trigger_export_with_options),
)
// Scheduled tasks (write)
.route(
"/tasks/scheduled/{id}/toggle",
post(routes::scheduled_tasks::toggle_scheduled_task),
)
.route(
"/tasks/scheduled/{id}/run-now",
post(routes::scheduled_tasks::run_scheduled_task_now),
)
// Webhooks
.route("/webhooks/test", post(routes::webhooks::test_webhook))
.layer(middleware::from_fn(auth::require_editor));
// Admin-only routes: destructive/config operations
let admin_routes = Router::new()
.route(
"/config/scanning",
put(routes::config::update_scanning_config),
)
.route("/config/roots", post(routes::config::add_root))
.route("/config/roots", delete(routes::config::remove_root))
.route("/config/ui", put(routes::config::update_ui_config))
.route("/database/vacuum", post(routes::database::vacuum_database))
.route("/database/clear", post(routes::database::clear_database))
.layer(middleware::from_fn(auth::require_admin));
let api = Router::new()
.merge(login_route)
.merge(viewer_routes)
.merge(editor_routes)
.merge(admin_routes);
// CORS: allow same-origin by default, plus the desktop UI origin
let cors = CorsLayer::new()
.allow_origin([
"http://localhost:3000".parse::<HeaderValue>().unwrap(),
"http://127.0.0.1:3000".parse::<HeaderValue>().unwrap(),
"tauri://localhost".parse::<HeaderValue>().unwrap(),
])
.allow_methods([
Method::GET,
Method::POST,
Method::PUT,
Method::PATCH,
Method::DELETE,
])
.allow_headers([header::CONTENT_TYPE, header::AUTHORIZATION])
.allow_credentials(true);
Router::new()
.nest("/api/v1", api)
.layer(DefaultBodyLimit::max(10 * 1024 * 1024))
.layer(middleware::from_fn_with_state(
state.clone(),
auth::require_auth,
))
.layer(GovernorLayer {
config: global_governor,
})
.layer(TraceLayer::new_for_http())
.layer(cors)
.with_state(state)
}

View file

@ -0,0 +1,164 @@
use axum::extract::{Request, State};
use axum::http::StatusCode;
use axum::middleware::Next;
use axum::response::{IntoResponse, Response};
use pinakes_core::config::UserRole;
use crate::state::AppState;
/// Constant-time string comparison to prevent timing attacks on API keys.
fn constant_time_eq(a: &str, b: &str) -> bool {
if a.len() != b.len() {
return false;
}
a.as_bytes()
.iter()
.zip(b.as_bytes())
.fold(0u8, |acc, (x, y)| acc | (x ^ y))
== 0
}
/// Axum middleware that checks for a valid Bearer token.
///
/// If `accounts.enabled == true`: look up bearer token in session store.
/// If `accounts.enabled == false`: use existing api_key logic (unchanged behavior).
/// Skips authentication for the `/health` and `/auth/login` path suffixes.
pub async fn require_auth(
State(state): State<AppState>,
mut request: Request,
next: Next,
) -> Response {
let path = request.uri().path().to_string();
// Always allow health and login endpoints
if path.ends_with("/health") || path.ends_with("/auth/login") {
return next.run(request).await;
}
let config = state.config.read().await;
if config.accounts.enabled {
// Session-based auth
let token = request
.headers()
.get("authorization")
.and_then(|v| v.to_str().ok())
.and_then(|s| s.strip_prefix("Bearer "))
.map(|s| s.to_string());
drop(config);
let Some(token) = token else {
tracing::debug!(path = %path, "rejected: missing Authorization header");
return unauthorized("missing Authorization header");
};
let sessions = state.sessions.read().await;
let Some(session) = sessions.get(&token) else {
tracing::debug!(path = %path, "rejected: invalid session token");
return unauthorized("invalid or expired session token");
};
// Check session expiry
if session.is_expired() {
let username = session.username.clone();
drop(sessions);
// Remove expired session
let mut sessions_mut = state.sessions.write().await;
sessions_mut.remove(&token);
tracing::info!(username = %username, "session expired");
return unauthorized("session expired");
}
// Inject role and username into request extensions
request.extensions_mut().insert(session.role);
request.extensions_mut().insert(session.username.clone());
} else {
// Legacy API key auth
let api_key = std::env::var("PINAKES_API_KEY")
.ok()
.or_else(|| config.server.api_key.clone());
drop(config);
if let Some(ref expected_key) = api_key {
if expected_key.is_empty() {
// Empty key means no auth required
request.extensions_mut().insert(UserRole::Admin);
return next.run(request).await;
}
let auth_header = request
.headers()
.get("authorization")
.and_then(|v| v.to_str().ok());
match auth_header {
Some(header) if header.starts_with("Bearer ") => {
let token = &header[7..];
if !constant_time_eq(token, expected_key.as_str()) {
tracing::warn!(path = %path, "rejected: invalid API key");
return unauthorized("invalid api key");
}
}
_ => {
return unauthorized(
"missing or malformed Authorization header, expected: Bearer <api_key>",
);
}
}
}
// When no api_key is configured, or key matches, grant admin
request.extensions_mut().insert(UserRole::Admin);
}
next.run(request).await
}
/// Middleware: requires Editor or Admin role.
pub async fn require_editor(request: Request, next: Next) -> Response {
let role = request
.extensions()
.get::<UserRole>()
.copied()
.unwrap_or(UserRole::Viewer);
if role.can_write() {
next.run(request).await
} else {
forbidden("editor role required")
}
}
/// Middleware: requires Admin role.
pub async fn require_admin(request: Request, next: Next) -> Response {
let role = request
.extensions()
.get::<UserRole>()
.copied()
.unwrap_or(UserRole::Viewer);
if role.can_admin() {
next.run(request).await
} else {
forbidden("admin role required")
}
}
fn unauthorized(message: &str) -> Response {
let body = format!(r#"{{"error":"{message}"}}"#);
(
StatusCode::UNAUTHORIZED,
[("content-type", "application/json")],
body,
)
.into_response()
}
fn forbidden(message: &str) -> Response {
let body = format!(r#"{{"error":"{message}"}}"#);
(
StatusCode::FORBIDDEN,
[("content-type", "application/json")],
body,
)
.into_response()
}

View file

@ -0,0 +1,553 @@
use std::collections::HashMap;
use std::path::PathBuf;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
// Media
#[derive(Debug, Serialize)]
pub struct MediaResponse {
pub id: String,
pub path: String,
pub file_name: String,
pub media_type: String,
pub content_hash: String,
pub file_size: u64,
pub title: Option<String>,
pub artist: Option<String>,
pub album: Option<String>,
pub genre: Option<String>,
pub year: Option<i32>,
pub duration_secs: Option<f64>,
pub description: Option<String>,
pub has_thumbnail: bool,
pub custom_fields: HashMap<String, CustomFieldResponse>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Serialize)]
pub struct CustomFieldResponse {
pub field_type: String,
pub value: String,
}
#[derive(Debug, Deserialize)]
pub struct ImportRequest {
pub path: PathBuf,
}
#[derive(Debug, Serialize)]
pub struct ImportResponse {
pub media_id: String,
pub was_duplicate: bool,
}
#[derive(Debug, Deserialize)]
pub struct UpdateMediaRequest {
pub title: Option<String>,
pub artist: Option<String>,
pub album: Option<String>,
pub genre: Option<String>,
pub year: Option<i32>,
pub description: Option<String>,
}
// Tags
#[derive(Debug, Serialize)]
pub struct TagResponse {
pub id: String,
pub name: String,
pub parent_id: Option<String>,
pub created_at: DateTime<Utc>,
}
#[derive(Debug, Deserialize)]
pub struct CreateTagRequest {
pub name: String,
pub parent_id: Option<Uuid>,
}
#[derive(Debug, Deserialize)]
pub struct TagMediaRequest {
pub tag_id: Uuid,
}
// Collections
#[derive(Debug, Serialize)]
pub struct CollectionResponse {
pub id: String,
pub name: String,
pub description: Option<String>,
pub kind: String,
pub filter_query: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Deserialize)]
pub struct CreateCollectionRequest {
pub name: String,
pub kind: String,
pub description: Option<String>,
pub filter_query: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct AddMemberRequest {
pub media_id: Uuid,
pub position: Option<i32>,
}
// Search
#[derive(Debug, Deserialize)]
pub struct SearchParams {
pub q: String,
pub sort: Option<String>,
pub offset: Option<u64>,
pub limit: Option<u64>,
}
#[derive(Debug, Serialize)]
pub struct SearchResponse {
pub items: Vec<MediaResponse>,
pub total_count: u64,
}
// Audit
#[derive(Debug, Serialize)]
pub struct AuditEntryResponse {
pub id: String,
pub media_id: Option<String>,
pub action: String,
pub details: Option<String>,
pub timestamp: DateTime<Utc>,
}
// Search (POST body)
#[derive(Debug, Deserialize)]
pub struct SearchRequestBody {
pub q: String,
pub sort: Option<String>,
pub offset: Option<u64>,
pub limit: Option<u64>,
}
// Scan
#[derive(Debug, Deserialize)]
pub struct ScanRequest {
pub path: Option<PathBuf>,
}
#[derive(Debug, Serialize)]
pub struct ScanResponse {
pub files_found: usize,
pub files_processed: usize,
pub errors: Vec<String>,
}
#[derive(Debug, Serialize)]
pub struct ScanJobResponse {
pub job_id: String,
}
#[derive(Debug, Serialize)]
pub struct ScanStatusResponse {
pub scanning: bool,
pub files_found: usize,
pub files_processed: usize,
pub error_count: usize,
pub errors: Vec<String>,
}
// Pagination
#[derive(Debug, Deserialize)]
pub struct PaginationParams {
pub offset: Option<u64>,
pub limit: Option<u64>,
pub sort: Option<String>,
}
// Open
#[derive(Debug, Deserialize)]
pub struct OpenRequest {
pub media_id: Uuid,
}
// Config
#[derive(Debug, Serialize)]
pub struct ConfigResponse {
pub backend: String,
pub database_path: Option<String>,
pub roots: Vec<String>,
pub scanning: ScanningConfigResponse,
pub server: ServerConfigResponse,
pub ui: UiConfigResponse,
pub config_path: Option<String>,
pub config_writable: bool,
}
#[derive(Debug, Serialize)]
pub struct ScanningConfigResponse {
pub watch: bool,
pub poll_interval_secs: u64,
pub ignore_patterns: Vec<String>,
}
#[derive(Debug, Serialize)]
pub struct ServerConfigResponse {
pub host: String,
pub port: u16,
}
#[derive(Debug, Deserialize)]
pub struct UpdateScanningRequest {
pub watch: Option<bool>,
pub poll_interval_secs: Option<u64>,
pub ignore_patterns: Option<Vec<String>>,
}
#[derive(Debug, Deserialize)]
pub struct RootDirRequest {
pub path: String,
}
// Enhanced Import
#[derive(Debug, Deserialize)]
pub struct ImportWithOptionsRequest {
pub path: PathBuf,
pub tag_ids: Option<Vec<Uuid>>,
pub new_tags: Option<Vec<String>>,
pub collection_id: Option<Uuid>,
}
#[derive(Debug, Deserialize)]
pub struct BatchImportRequest {
pub paths: Vec<PathBuf>,
pub tag_ids: Option<Vec<Uuid>>,
pub new_tags: Option<Vec<String>>,
pub collection_id: Option<Uuid>,
}
#[derive(Debug, Serialize)]
pub struct BatchImportResponse {
pub results: Vec<BatchImportItemResult>,
pub total: usize,
pub imported: usize,
pub duplicates: usize,
pub errors: usize,
}
#[derive(Debug, Serialize)]
pub struct BatchImportItemResult {
pub path: String,
pub media_id: Option<String>,
pub was_duplicate: bool,
pub error: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct DirectoryImportRequest {
pub path: PathBuf,
pub tag_ids: Option<Vec<Uuid>>,
pub new_tags: Option<Vec<String>>,
pub collection_id: Option<Uuid>,
}
#[derive(Debug, Serialize)]
pub struct DirectoryPreviewResponse {
pub files: Vec<DirectoryPreviewFile>,
pub total_count: usize,
pub total_size: u64,
}
#[derive(Debug, Serialize)]
pub struct DirectoryPreviewFile {
pub path: String,
pub file_name: String,
pub media_type: String,
pub file_size: u64,
}
// Custom Fields
#[derive(Debug, Deserialize)]
pub struct SetCustomFieldRequest {
pub name: String,
pub field_type: String,
pub value: String,
}
// Media update extended
#[derive(Debug, Deserialize)]
pub struct UpdateMediaFullRequest {
pub title: Option<String>,
pub artist: Option<String>,
pub album: Option<String>,
pub genre: Option<String>,
pub year: Option<i32>,
pub description: Option<String>,
}
// Batch operations
#[derive(Debug, Deserialize)]
pub struct BatchTagRequest {
pub media_ids: Vec<Uuid>,
pub tag_ids: Vec<Uuid>,
}
#[derive(Debug, Deserialize)]
pub struct BatchCollectionRequest {
pub media_ids: Vec<Uuid>,
pub collection_id: Uuid,
}
#[derive(Debug, Deserialize)]
pub struct BatchDeleteRequest {
pub media_ids: Vec<Uuid>,
}
#[derive(Debug, Deserialize)]
pub struct BatchUpdateRequest {
pub media_ids: Vec<Uuid>,
pub title: Option<String>,
pub artist: Option<String>,
pub album: Option<String>,
pub genre: Option<String>,
pub year: Option<i32>,
pub description: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct BatchOperationResponse {
pub processed: usize,
pub errors: Vec<String>,
}
// Search with sort
#[derive(Debug, Serialize)]
pub struct MediaCountResponse {
pub count: u64,
}
// Database management
#[derive(Debug, Serialize)]
pub struct DatabaseStatsResponse {
pub media_count: u64,
pub tag_count: u64,
pub collection_count: u64,
pub audit_count: u64,
pub database_size_bytes: u64,
pub backend_name: String,
}
// UI Config
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct UiConfigResponse {
pub theme: String,
pub default_view: String,
pub default_page_size: usize,
pub default_view_mode: String,
pub auto_play_media: bool,
pub show_thumbnails: bool,
pub sidebar_collapsed: bool,
}
#[derive(Debug, Deserialize)]
pub struct UpdateUiConfigRequest {
pub theme: Option<String>,
pub default_view: Option<String>,
pub default_page_size: Option<usize>,
pub default_view_mode: Option<String>,
pub auto_play_media: Option<bool>,
pub show_thumbnails: Option<bool>,
pub sidebar_collapsed: Option<bool>,
}
impl From<&pinakes_core::config::UiConfig> for UiConfigResponse {
fn from(ui: &pinakes_core::config::UiConfig) -> Self {
Self {
theme: ui.theme.clone(),
default_view: ui.default_view.clone(),
default_page_size: ui.default_page_size,
default_view_mode: ui.default_view_mode.clone(),
auto_play_media: ui.auto_play_media,
show_thumbnails: ui.show_thumbnails,
sidebar_collapsed: ui.sidebar_collapsed,
}
}
}
// Library Statistics
#[derive(Debug, Serialize)]
pub struct LibraryStatisticsResponse {
pub total_media: u64,
pub total_size_bytes: u64,
pub avg_file_size_bytes: u64,
pub media_by_type: Vec<TypeCountResponse>,
pub storage_by_type: Vec<TypeCountResponse>,
pub newest_item: Option<String>,
pub oldest_item: Option<String>,
pub top_tags: Vec<TypeCountResponse>,
pub top_collections: Vec<TypeCountResponse>,
pub total_tags: u64,
pub total_collections: u64,
pub total_duplicates: u64,
}
#[derive(Debug, Serialize)]
pub struct TypeCountResponse {
pub name: String,
pub count: u64,
}
impl From<pinakes_core::storage::LibraryStatistics> for LibraryStatisticsResponse {
fn from(stats: pinakes_core::storage::LibraryStatistics) -> Self {
Self {
total_media: stats.total_media,
total_size_bytes: stats.total_size_bytes,
avg_file_size_bytes: stats.avg_file_size_bytes,
media_by_type: stats
.media_by_type
.into_iter()
.map(|(name, count)| TypeCountResponse { name, count })
.collect(),
storage_by_type: stats
.storage_by_type
.into_iter()
.map(|(name, count)| TypeCountResponse { name, count })
.collect(),
newest_item: stats.newest_item,
oldest_item: stats.oldest_item,
top_tags: stats
.top_tags
.into_iter()
.map(|(name, count)| TypeCountResponse { name, count })
.collect(),
top_collections: stats
.top_collections
.into_iter()
.map(|(name, count)| TypeCountResponse { name, count })
.collect(),
total_tags: stats.total_tags,
total_collections: stats.total_collections,
total_duplicates: stats.total_duplicates,
}
}
}
// Scheduled Tasks
#[derive(Debug, Serialize)]
pub struct ScheduledTaskResponse {
pub id: String,
pub name: String,
pub schedule: String,
pub enabled: bool,
pub last_run: Option<String>,
pub next_run: Option<String>,
pub last_status: Option<String>,
}
// Duplicates
#[derive(Debug, Serialize)]
pub struct DuplicateGroupResponse {
pub content_hash: String,
pub items: Vec<MediaResponse>,
}
// Auth
#[derive(Debug, Deserialize)]
pub struct LoginRequest {
pub username: String,
pub password: String,
}
#[derive(Debug, Serialize)]
pub struct LoginResponse {
pub token: String,
pub username: String,
pub role: String,
}
#[derive(Debug, Serialize)]
pub struct UserInfoResponse {
pub username: String,
pub role: String,
}
// Conversion helpers
impl From<pinakes_core::model::MediaItem> for MediaResponse {
fn from(item: pinakes_core::model::MediaItem) -> Self {
Self {
id: item.id.0.to_string(),
path: item.path.to_string_lossy().to_string(),
file_name: item.file_name,
media_type: serde_json::to_value(item.media_type)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_default(),
content_hash: item.content_hash.0,
file_size: item.file_size,
title: item.title,
artist: item.artist,
album: item.album,
genre: item.genre,
year: item.year,
duration_secs: item.duration_secs,
description: item.description,
has_thumbnail: item.thumbnail_path.is_some(),
custom_fields: item
.custom_fields
.into_iter()
.map(|(k, v)| {
(
k,
CustomFieldResponse {
field_type: format!("{:?}", v.field_type).to_lowercase(),
value: v.value,
},
)
})
.collect(),
created_at: item.created_at,
updated_at: item.updated_at,
}
}
}
impl From<pinakes_core::model::Tag> for TagResponse {
fn from(tag: pinakes_core::model::Tag) -> Self {
Self {
id: tag.id.to_string(),
name: tag.name,
parent_id: tag.parent_id.map(|id| id.to_string()),
created_at: tag.created_at,
}
}
}
impl From<pinakes_core::model::Collection> for CollectionResponse {
fn from(col: pinakes_core::model::Collection) -> Self {
Self {
id: col.id.to_string(),
name: col.name,
description: col.description,
kind: format!("{:?}", col.kind).to_lowercase(),
filter_query: col.filter_query,
created_at: col.created_at,
updated_at: col.updated_at,
}
}
}
impl From<pinakes_core::model::AuditEntry> for AuditEntryResponse {
fn from(entry: pinakes_core::model::AuditEntry) -> Self {
Self {
id: entry.id.to_string(),
media_id: entry.media_id.map(|id| id.0.to_string()),
action: entry.action.to_string(),
details: entry.details,
timestamp: entry.timestamp,
}
}
}

View file

@ -0,0 +1,69 @@
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use serde::Serialize;
#[derive(Debug, Serialize)]
struct ErrorResponse {
error: String,
}
pub struct ApiError(pub pinakes_core::error::PinakesError);
impl IntoResponse for ApiError {
fn into_response(self) -> Response {
use pinakes_core::error::PinakesError;
let (status, message) = match &self.0 {
PinakesError::NotFound(msg) => (StatusCode::NOT_FOUND, msg.clone()),
PinakesError::FileNotFound(path) => {
// Only expose the file name, not the full path
let name = path
.file_name()
.map(|n| n.to_string_lossy().to_string())
.unwrap_or_else(|| "unknown".to_string());
tracing::debug!(path = %path.display(), "file not found");
(StatusCode::NOT_FOUND, format!("file not found: {name}"))
}
PinakesError::TagNotFound(msg) => (StatusCode::NOT_FOUND, msg.clone()),
PinakesError::CollectionNotFound(msg) => (StatusCode::NOT_FOUND, msg.clone()),
PinakesError::DuplicateHash(msg) => (StatusCode::CONFLICT, msg.clone()),
PinakesError::UnsupportedMediaType(path) => {
let name = path
.file_name()
.map(|n| n.to_string_lossy().to_string())
.unwrap_or_else(|| "unknown".to_string());
(
StatusCode::BAD_REQUEST,
format!("unsupported media type: {name}"),
)
}
PinakesError::SearchParse(msg) => (StatusCode::BAD_REQUEST, msg.clone()),
PinakesError::InvalidOperation(msg) => (StatusCode::BAD_REQUEST, msg.clone()),
PinakesError::Config(_) => {
tracing::error!(error = %self.0, "configuration error");
(
StatusCode::INTERNAL_SERVER_ERROR,
"internal configuration error".to_string(),
)
}
_ => {
tracing::error!(error = %self.0, "internal server error");
(
StatusCode::INTERNAL_SERVER_ERROR,
"internal server error".to_string(),
)
}
};
let body = serde_json::to_string(&ErrorResponse {
error: message.clone(),
})
.unwrap_or_else(|_| format!(r#"{{"error":"{}"}}"#, message));
(status, [("content-type", "application/json")], body).into_response()
}
}
impl From<pinakes_core::error::PinakesError> for ApiError {
fn from(e: pinakes_core::error::PinakesError) -> Self {
Self(e)
}
}

View file

@ -0,0 +1,6 @@
pub mod app;
pub mod auth;
pub mod dto;
pub mod error;
pub mod routes;
pub mod state;

View file

@ -0,0 +1,448 @@
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::Result;
use clap::Parser;
use tokio::sync::RwLock;
use tracing::info;
use tracing_subscriber::EnvFilter;
use pinakes_core::config::Config;
use pinakes_core::storage::StorageBackend;
use pinakes_server::app;
use pinakes_server::state::AppState;
/// Pinakes media cataloging server
#[derive(Parser)]
#[command(name = "pinakes-server", version, about)]
struct Cli {
/// Path to configuration file
#[arg(short, long, env = "PINAKES_CONFIG")]
config: Option<PathBuf>,
/// Override listen host
#[arg(long)]
host: Option<String>,
/// Override listen port
#[arg(short, long)]
port: Option<u16>,
/// Set log level (trace, debug, info, warn, error)
#[arg(long, default_value = "info")]
log_level: String,
/// Log output format (compact, full, pretty, json)
#[arg(long, default_value = "compact")]
log_format: String,
/// Run database migrations only, then exit
#[arg(long)]
migrate_only: bool,
}
fn resolve_config_path(explicit: Option<&std::path::Path>) -> PathBuf {
if let Some(path) = explicit {
return path.to_path_buf();
}
// Check current directory
let local = PathBuf::from("pinakes.toml");
if local.exists() {
return local;
}
// XDG default
Config::default_config_path()
}
#[tokio::main]
async fn main() -> Result<()> {
let cli = Cli::parse();
// Initialize logging
let env_filter = EnvFilter::try_new(&cli.log_level).unwrap_or_else(|_| EnvFilter::new("info"));
match cli.log_format.as_str() {
"json" => {
tracing_subscriber::fmt()
.with_env_filter(env_filter)
.json()
.init();
}
"pretty" => {
tracing_subscriber::fmt()
.with_env_filter(env_filter)
.pretty()
.init();
}
"full" => {
tracing_subscriber::fmt().with_env_filter(env_filter).init();
}
_ => {
tracing_subscriber::fmt()
.with_env_filter(env_filter)
.compact()
.init();
}
}
let config_path = resolve_config_path(cli.config.as_deref());
info!(path = %config_path.display(), "loading configuration");
let mut config = Config::load_or_default(&config_path)?;
config.ensure_dirs()?;
config
.validate()
.map_err(|e| anyhow::anyhow!("invalid configuration: {e}"))?;
// Apply CLI overrides
if let Some(host) = cli.host {
config.server.host = host;
}
if let Some(port) = cli.port {
config.server.port = port;
}
// Storage backend initialization
let storage: pinakes_core::storage::DynStorageBackend = match config.storage.backend {
pinakes_core::config::StorageBackendType::Sqlite => {
let sqlite_config = config.storage.sqlite.as_ref().ok_or_else(|| {
anyhow::anyhow!(
"sqlite storage selected but [storage.sqlite] config section missing"
)
})?;
info!(path = %sqlite_config.path.display(), "initializing sqlite storage");
let backend = pinakes_core::storage::sqlite::SqliteBackend::new(&sqlite_config.path)?;
backend.run_migrations().await?;
Arc::new(backend)
}
pinakes_core::config::StorageBackendType::Postgres => {
let pg_config = config.storage.postgres.as_ref().ok_or_else(|| {
anyhow::anyhow!(
"postgres storage selected but [storage.postgres] config section missing"
)
})?;
info!(host = %pg_config.host, port = pg_config.port, database = %pg_config.database, "initializing postgres storage");
let backend = pinakes_core::storage::postgres::PostgresBackend::new(pg_config).await?;
backend.run_migrations().await?;
Arc::new(backend)
}
};
if cli.migrate_only {
info!("migrations complete, exiting");
return Ok(());
}
// Register root directories
for root in &config.directories.roots {
if root.exists() {
storage.add_root_dir(root.clone()).await?;
info!(path = %root.display(), "registered root directory");
} else {
tracing::warn!(path = %root.display(), "root directory does not exist, skipping");
}
}
// Start filesystem watcher if configured
if config.scanning.watch {
let watch_storage = storage.clone();
let watch_dirs = config.directories.roots.clone();
let watch_ignore = config.scanning.ignore_patterns.clone();
tokio::spawn(async move {
if let Err(e) =
pinakes_core::scan::watch_and_import(watch_storage, watch_dirs, watch_ignore).await
{
tracing::error!(error = %e, "filesystem watcher failed");
}
});
info!("filesystem watcher started");
}
let addr = format!("{}:{}", config.server.host, config.server.port);
// Initialize job queue with executor
let job_storage = storage.clone();
let job_config = config.clone();
let job_queue = pinakes_core::jobs::JobQueue::new(
config.jobs.worker_count,
move |job_id, kind, cancel, jobs| {
let storage = job_storage.clone();
let config = job_config.clone();
tokio::spawn(async move {
use pinakes_core::jobs::{JobKind, JobQueue};
let result = match kind {
JobKind::Scan { path } => {
let ignore = config.scanning.ignore_patterns.clone();
let res = if let Some(p) = path {
pinakes_core::scan::scan_directory(&storage, &p, &ignore).await
} else {
pinakes_core::scan::scan_all_roots(&storage, &ignore)
.await
.map(|statuses| {
let total_found: usize =
statuses.iter().map(|s| s.files_found).sum();
let total_processed: usize =
statuses.iter().map(|s| s.files_processed).sum();
let all_errors: Vec<String> =
statuses.into_iter().flat_map(|s| s.errors).collect();
pinakes_core::scan::ScanStatus {
scanning: false,
files_found: total_found,
files_processed: total_processed,
errors: all_errors,
}
})
};
match res {
Ok(status) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::json!({
"files_found": status.files_found,
"files_processed": status.files_processed,
"errors": status.errors,
}),
)
.await;
}
Err(e) => {
JobQueue::fail(&jobs, job_id, e.to_string()).await;
}
}
}
JobKind::GenerateThumbnails { media_ids } => {
let thumb_dir = pinakes_core::thumbnail::default_thumbnail_dir();
let thumb_config = config.thumbnails.clone();
let total = media_ids.len();
let mut generated = 0usize;
let mut errors = Vec::new();
for (i, mid) in media_ids.iter().enumerate() {
if cancel.is_cancelled() {
break;
}
JobQueue::update_progress(
&jobs,
job_id,
i as f32 / total as f32,
format!("{}/{}", i, total),
)
.await;
match storage.get_media(*mid).await {
Ok(item) => {
let source = item.path.clone();
let mt = item.media_type;
let id = item.id;
let td = thumb_dir.clone();
let tc = thumb_config.clone();
let res = tokio::task::spawn_blocking(move || {
pinakes_core::thumbnail::generate_thumbnail_with_config(
id, &source, mt, &td, &tc,
)
})
.await;
match res {
Ok(Ok(Some(path))) => {
let mut updated = item;
updated.thumbnail_path = Some(path);
let _ = storage.update_media(&updated).await;
generated += 1;
}
Ok(Ok(None)) => {}
Ok(Err(e)) => errors.push(format!("{}: {}", mid, e)),
Err(e) => errors.push(format!("{}: {}", mid, e)),
}
}
Err(e) => errors.push(format!("{}: {}", mid, e)),
}
}
JobQueue::complete(
&jobs,
job_id,
serde_json::json!({
"generated": generated, "errors": errors
}),
)
.await;
}
JobKind::VerifyIntegrity { media_ids } => {
let ids = if media_ids.is_empty() {
None
} else {
Some(media_ids.as_slice())
};
match pinakes_core::integrity::verify_integrity(&storage, ids).await {
Ok(report) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::to_value(&report).unwrap_or_default(),
)
.await;
}
Err(e) => JobQueue::fail(&jobs, job_id, e.to_string()).await,
}
}
JobKind::OrphanDetection => {
match pinakes_core::integrity::detect_orphans(&storage).await {
Ok(report) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::to_value(&report).unwrap_or_default(),
)
.await;
}
Err(e) => JobQueue::fail(&jobs, job_id, e.to_string()).await,
}
}
JobKind::CleanupThumbnails => {
let thumb_dir = pinakes_core::thumbnail::default_thumbnail_dir();
match pinakes_core::integrity::cleanup_orphaned_thumbnails(
&storage, &thumb_dir,
)
.await
{
Ok(removed) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::json!({ "removed": removed }),
)
.await;
}
Err(e) => JobQueue::fail(&jobs, job_id, e.to_string()).await,
}
}
JobKind::Export {
format,
destination,
} => {
match pinakes_core::export::export_library(&storage, &format, &destination)
.await
{
Ok(result) => {
JobQueue::complete(
&jobs,
job_id,
serde_json::to_value(&result).unwrap_or_default(),
)
.await;
}
Err(e) => JobQueue::fail(&jobs, job_id, e.to_string()).await,
}
}
};
let _ = result;
drop(cancel);
})
},
);
// Initialize cache layer
let cache = std::sync::Arc::new(pinakes_core::cache::CacheLayer::new(
config.jobs.cache_ttl_secs,
));
// Initialize scheduler with cancellation support
let shutdown_token = tokio_util::sync::CancellationToken::new();
let config_arc = Arc::new(RwLock::new(config));
let scheduler = pinakes_core::scheduler::TaskScheduler::new(
job_queue.clone(),
shutdown_token.clone(),
config_arc.clone(),
Some(config_path.clone()),
);
let scheduler = Arc::new(scheduler);
// Restore saved scheduler state from config
scheduler.restore_state().await;
// Spawn scheduler background loop
{
let scheduler = scheduler.clone();
tokio::spawn(async move {
scheduler.run().await;
});
}
let state = AppState {
storage: storage.clone(),
config: config_arc,
config_path: Some(config_path),
scan_progress: pinakes_core::scan::ScanProgress::new(),
sessions: Arc::new(RwLock::new(std::collections::HashMap::new())),
job_queue,
cache,
scheduler,
};
// Periodic session cleanup (every 15 minutes)
{
let sessions = state.sessions.clone();
let cancel = shutdown_token.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(15 * 60));
loop {
tokio::select! {
_ = interval.tick() => {
pinakes_server::state::cleanup_expired_sessions(&sessions).await;
}
_ = cancel.cancelled() => {
break;
}
}
}
});
}
let router = app::create_router(state);
info!(addr = %addr, "server listening");
let listener = tokio::net::TcpListener::bind(&addr).await?;
axum::serve(
listener,
router.into_make_service_with_connect_info::<std::net::SocketAddr>(),
)
.with_graceful_shutdown(shutdown_signal())
.await?;
shutdown_token.cancel();
info!("server shut down");
Ok(())
}
async fn shutdown_signal() {
let ctrl_c = async {
match tokio::signal::ctrl_c().await {
Ok(()) => {}
Err(e) => {
tracing::warn!(error = %e, "failed to install Ctrl+C handler");
std::future::pending::<()>().await;
}
}
};
#[cfg(unix)]
let terminate = async {
match tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) {
Ok(mut signal) => {
signal.recv().await;
}
Err(e) => {
tracing::warn!(error = %e, "failed to install SIGTERM handler");
std::future::pending::<()>().await;
}
}
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
_ = ctrl_c => info!("received Ctrl+C, shutting down"),
_ = terminate => info!("received SIGTERM, shutting down"),
}
}

View file

@ -0,0 +1,23 @@
use axum::Json;
use axum::extract::{Query, State};
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::Pagination;
pub async fn list_audit(
State(state): State<AppState>,
Query(params): Query<PaginationParams>,
) -> Result<Json<Vec<AuditEntryResponse>>, ApiError> {
let pagination = Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
None,
);
let entries = state.storage.list_audit_entries(None, &pagination).await?;
Ok(Json(
entries.into_iter().map(AuditEntryResponse::from).collect(),
))
}

View file

@ -0,0 +1,119 @@
use axum::Json;
use axum::extract::State;
use axum::http::{HeaderMap, StatusCode};
use crate::dto::{LoginRequest, LoginResponse, UserInfoResponse};
use crate::state::AppState;
pub async fn login(
State(state): State<AppState>,
Json(req): Json<LoginRequest>,
) -> Result<Json<LoginResponse>, StatusCode> {
// Limit input sizes to prevent DoS
if req.username.len() > 255 || req.password.len() > 1024 {
return Err(StatusCode::BAD_REQUEST);
}
let config = state.config.read().await;
if !config.accounts.enabled {
return Err(StatusCode::NOT_FOUND);
}
let user = config
.accounts
.users
.iter()
.find(|u| u.username == req.username);
let user = match user {
Some(u) => u,
None => {
tracing::warn!(username = %req.username, "login failed: unknown user");
return Err(StatusCode::UNAUTHORIZED);
}
};
// Verify password using argon2
use argon2::password_hash::PasswordVerifier;
let hash = &user.password_hash;
let parsed_hash = argon2::password_hash::PasswordHash::new(hash)
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
let valid = argon2::Argon2::default()
.verify_password(req.password.as_bytes(), &parsed_hash)
.is_ok();
if !valid {
tracing::warn!(username = %req.username, "login failed: invalid password");
return Err(StatusCode::UNAUTHORIZED);
}
// Generate session token
use rand::Rng;
let token: String = rand::rng()
.sample_iter(&rand::distr::Alphanumeric)
.take(48)
.map(char::from)
.collect();
let role = user.role;
let username = user.username.clone();
// Store session
{
let mut sessions = state.sessions.write().await;
sessions.insert(
token.clone(),
crate::state::SessionInfo {
username: username.clone(),
role,
created_at: chrono::Utc::now(),
},
);
}
tracing::info!(username = %username, role = %role, "login successful");
Ok(Json(LoginResponse {
token,
username,
role: role.to_string(),
}))
}
pub async fn logout(State(state): State<AppState>, headers: HeaderMap) -> StatusCode {
if let Some(token) = extract_bearer_token(&headers) {
let mut sessions = state.sessions.write().await;
sessions.remove(token);
}
StatusCode::OK
}
pub async fn me(
State(state): State<AppState>,
headers: HeaderMap,
) -> Result<Json<UserInfoResponse>, StatusCode> {
let config = state.config.read().await;
if !config.accounts.enabled {
// When accounts are not enabled, return a default admin user
return Ok(Json(UserInfoResponse {
username: "admin".to_string(),
role: "admin".to_string(),
}));
}
drop(config);
let token = extract_bearer_token(&headers).ok_or(StatusCode::UNAUTHORIZED)?;
let sessions = state.sessions.read().await;
let session = sessions.get(token).ok_or(StatusCode::UNAUTHORIZED)?;
Ok(Json(UserInfoResponse {
username: session.username.clone(),
role: session.role.to_string(),
}))
}
fn extract_bearer_token(headers: &HeaderMap) -> Option<&str> {
headers
.get("authorization")
.and_then(|v| v.to_str().ok())
.and_then(|s| s.strip_prefix("Bearer "))
}

View file

@ -0,0 +1,101 @@
use axum::Json;
use axum::extract::{Path, State};
use uuid::Uuid;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::{CollectionKind, MediaId};
pub async fn create_collection(
State(state): State<AppState>,
Json(req): Json<CreateCollectionRequest>,
) -> Result<Json<CollectionResponse>, ApiError> {
if req.name.is_empty() || req.name.len() > 255 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"collection name must be 1-255 characters".into(),
),
));
}
if let Some(ref desc) = req.description
&& desc.len() > 10_000
{
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"description exceeds 10000 characters".into(),
),
));
}
let kind = match req.kind.as_str() {
"virtual" => CollectionKind::Virtual,
_ => CollectionKind::Manual,
};
let col = pinakes_core::collections::create_collection(
&state.storage,
&req.name,
kind,
req.description.as_deref(),
req.filter_query.as_deref(),
)
.await?;
Ok(Json(CollectionResponse::from(col)))
}
pub async fn list_collections(
State(state): State<AppState>,
) -> Result<Json<Vec<CollectionResponse>>, ApiError> {
let cols = state.storage.list_collections().await?;
Ok(Json(
cols.into_iter().map(CollectionResponse::from).collect(),
))
}
pub async fn get_collection(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<CollectionResponse>, ApiError> {
let col = state.storage.get_collection(id).await?;
Ok(Json(CollectionResponse::from(col)))
}
pub async fn delete_collection(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
state.storage.delete_collection(id).await?;
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn add_member(
State(state): State<AppState>,
Path(collection_id): Path<Uuid>,
Json(req): Json<AddMemberRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
pinakes_core::collections::add_member(
&state.storage,
collection_id,
MediaId(req.media_id),
req.position.unwrap_or(0),
)
.await?;
Ok(Json(serde_json::json!({"added": true})))
}
pub async fn remove_member(
State(state): State<AppState>,
Path((collection_id, media_id)): Path<(Uuid, Uuid)>,
) -> Result<Json<serde_json::Value>, ApiError> {
pinakes_core::collections::remove_member(&state.storage, collection_id, MediaId(media_id))
.await?;
Ok(Json(serde_json::json!({"removed": true})))
}
pub async fn get_members(
State(state): State<AppState>,
Path(collection_id): Path<Uuid>,
) -> Result<Json<Vec<MediaResponse>>, ApiError> {
let items = pinakes_core::collections::get_members(&state.storage, collection_id).await?;
Ok(Json(items.into_iter().map(MediaResponse::from).collect()))
}

View file

@ -0,0 +1,217 @@
use axum::Json;
use axum::extract::State;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
pub async fn get_config(State(state): State<AppState>) -> Result<Json<ConfigResponse>, ApiError> {
let config = state.config.read().await;
let roots = state.storage.list_root_dirs().await?;
let config_path = state
.config_path
.as_ref()
.map(|p| p.to_string_lossy().to_string());
let config_writable = match &state.config_path {
Some(path) => {
if path.exists() {
std::fs::metadata(path)
.map(|m| !m.permissions().readonly())
.unwrap_or(false)
} else {
path.parent()
.map(|parent| {
std::fs::metadata(parent)
.map(|m| !m.permissions().readonly())
.unwrap_or(false)
})
.unwrap_or(false)
}
}
None => false,
};
Ok(Json(ConfigResponse {
backend: format!("{:?}", config.storage.backend).to_lowercase(),
database_path: config
.storage
.sqlite
.as_ref()
.map(|s| s.path.to_string_lossy().to_string()),
roots: roots
.iter()
.map(|p| p.to_string_lossy().to_string())
.collect(),
scanning: ScanningConfigResponse {
watch: config.scanning.watch,
poll_interval_secs: config.scanning.poll_interval_secs,
ignore_patterns: config.scanning.ignore_patterns.clone(),
},
server: ServerConfigResponse {
host: config.server.host.clone(),
port: config.server.port,
},
ui: UiConfigResponse::from(&config.ui),
config_path,
config_writable,
}))
}
pub async fn get_ui_config(
State(state): State<AppState>,
) -> Result<Json<UiConfigResponse>, ApiError> {
let config = state.config.read().await;
Ok(Json(UiConfigResponse::from(&config.ui)))
}
pub async fn update_ui_config(
State(state): State<AppState>,
Json(req): Json<UpdateUiConfigRequest>,
) -> Result<Json<UiConfigResponse>, ApiError> {
let mut config = state.config.write().await;
if let Some(theme) = req.theme {
config.ui.theme = theme;
}
if let Some(default_view) = req.default_view {
config.ui.default_view = default_view;
}
if let Some(default_page_size) = req.default_page_size {
config.ui.default_page_size = default_page_size;
}
if let Some(default_view_mode) = req.default_view_mode {
config.ui.default_view_mode = default_view_mode;
}
if let Some(auto_play) = req.auto_play_media {
config.ui.auto_play_media = auto_play;
}
if let Some(show_thumbs) = req.show_thumbnails {
config.ui.show_thumbnails = show_thumbs;
}
if let Some(collapsed) = req.sidebar_collapsed {
config.ui.sidebar_collapsed = collapsed;
}
if let Some(ref path) = state.config_path {
config.save_to_file(path).map_err(ApiError)?;
}
Ok(Json(UiConfigResponse::from(&config.ui)))
}
pub async fn update_scanning_config(
State(state): State<AppState>,
Json(req): Json<UpdateScanningRequest>,
) -> Result<Json<ConfigResponse>, ApiError> {
let mut config = state.config.write().await;
if let Some(watch) = req.watch {
config.scanning.watch = watch;
}
if let Some(interval) = req.poll_interval_secs {
config.scanning.poll_interval_secs = interval;
}
if let Some(patterns) = req.ignore_patterns {
config.scanning.ignore_patterns = patterns;
}
// Persist to disk if we have a config path
if let Some(ref path) = state.config_path {
config.save_to_file(path).map_err(ApiError)?;
}
let roots = state.storage.list_root_dirs().await?;
let config_path = state
.config_path
.as_ref()
.map(|p| p.to_string_lossy().to_string());
let config_writable = match &state.config_path {
Some(path) => {
if path.exists() {
std::fs::metadata(path)
.map(|m| !m.permissions().readonly())
.unwrap_or(false)
} else {
path.parent()
.map(|parent| {
std::fs::metadata(parent)
.map(|m| !m.permissions().readonly())
.unwrap_or(false)
})
.unwrap_or(false)
}
}
None => false,
};
Ok(Json(ConfigResponse {
backend: format!("{:?}", config.storage.backend).to_lowercase(),
database_path: config
.storage
.sqlite
.as_ref()
.map(|s| s.path.to_string_lossy().to_string()),
roots: roots
.iter()
.map(|p| p.to_string_lossy().to_string())
.collect(),
scanning: ScanningConfigResponse {
watch: config.scanning.watch,
poll_interval_secs: config.scanning.poll_interval_secs,
ignore_patterns: config.scanning.ignore_patterns.clone(),
},
server: ServerConfigResponse {
host: config.server.host.clone(),
port: config.server.port,
},
ui: UiConfigResponse::from(&config.ui),
config_path,
config_writable,
}))
}
pub async fn add_root(
State(state): State<AppState>,
Json(req): Json<RootDirRequest>,
) -> Result<Json<ConfigResponse>, ApiError> {
let path = std::path::PathBuf::from(&req.path);
if !path.exists() {
return Err(ApiError(pinakes_core::error::PinakesError::FileNotFound(
path,
)));
}
state.storage.add_root_dir(path.clone()).await?;
{
let mut config = state.config.write().await;
if !config.directories.roots.contains(&path) {
config.directories.roots.push(path);
}
if let Some(ref config_path) = state.config_path {
config.save_to_file(config_path).map_err(ApiError)?;
}
}
get_config(State(state)).await
}
pub async fn remove_root(
State(state): State<AppState>,
Json(req): Json<RootDirRequest>,
) -> Result<Json<ConfigResponse>, ApiError> {
let path = std::path::PathBuf::from(&req.path);
state.storage.remove_root_dir(&path).await?;
{
let mut config = state.config.write().await;
config.directories.roots.retain(|r| r != &path);
if let Some(ref config_path) = state.config_path {
config.save_to_file(config_path).map_err(ApiError)?;
}
}
get_config(State(state)).await
}

View file

@ -0,0 +1,34 @@
use axum::Json;
use axum::extract::State;
use crate::dto::DatabaseStatsResponse;
use crate::error::ApiError;
use crate::state::AppState;
pub async fn database_stats(
State(state): State<AppState>,
) -> Result<Json<DatabaseStatsResponse>, ApiError> {
let stats = state.storage.database_stats().await?;
Ok(Json(DatabaseStatsResponse {
media_count: stats.media_count,
tag_count: stats.tag_count,
collection_count: stats.collection_count,
audit_count: stats.audit_count,
database_size_bytes: stats.database_size_bytes,
backend_name: stats.backend_name,
}))
}
pub async fn vacuum_database(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
state.storage.vacuum().await?;
Ok(Json(serde_json::json!({"status": "ok"})))
}
pub async fn clear_database(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
state.storage.clear_all_data().await?;
Ok(Json(serde_json::json!({"status": "ok"})))
}

View file

@ -0,0 +1,30 @@
use axum::Json;
use axum::extract::State;
use crate::dto::{DuplicateGroupResponse, MediaResponse};
use crate::error::ApiError;
use crate::state::AppState;
pub async fn list_duplicates(
State(state): State<AppState>,
) -> Result<Json<Vec<DuplicateGroupResponse>>, ApiError> {
let groups = state.storage.find_duplicates().await?;
let response: Vec<DuplicateGroupResponse> = groups
.into_iter()
.map(|items| {
let content_hash = items
.first()
.map(|i| i.content_hash.0.clone())
.unwrap_or_default();
let media_items: Vec<MediaResponse> =
items.into_iter().map(MediaResponse::from).collect();
DuplicateGroupResponse {
content_hash,
items: media_items,
}
})
.collect();
Ok(Json(response))
}

View file

@ -0,0 +1,42 @@
use axum::Json;
use axum::extract::State;
use serde::Deserialize;
use std::path::PathBuf;
use crate::error::ApiError;
use crate::state::AppState;
#[derive(Debug, Deserialize)]
pub struct ExportRequest {
pub format: String,
pub destination: PathBuf,
}
pub async fn trigger_export(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
// Default export to JSON in data dir
let dest = pinakes_core::config::Config::default_data_dir().join("export.json");
let kind = pinakes_core::jobs::JobKind::Export {
format: pinakes_core::jobs::ExportFormat::Json,
destination: dest,
};
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}
pub async fn trigger_export_with_options(
State(state): State<AppState>,
Json(req): Json<ExportRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
let format = match req.format.as_str() {
"csv" => pinakes_core::jobs::ExportFormat::Csv,
_ => pinakes_core::jobs::ExportFormat::Json,
};
let kind = pinakes_core::jobs::JobKind::Export {
format,
destination: req.destination,
};
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}

View file

@ -0,0 +1,8 @@
use axum::Json;
pub async fn health() -> Json<serde_json::Value> {
Json(serde_json::json!({
"status": "ok",
"version": env!("CARGO_PKG_VERSION"),
}))
}

View file

@ -0,0 +1,99 @@
use axum::Json;
use axum::extract::State;
use serde::Deserialize;
use crate::error::ApiError;
use crate::state::AppState;
#[derive(Debug, Deserialize)]
pub struct OrphanResolveRequest {
pub action: String,
pub ids: Vec<uuid::Uuid>,
}
pub async fn trigger_orphan_detection(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
let kind = pinakes_core::jobs::JobKind::OrphanDetection;
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}
pub async fn trigger_verify_integrity(
State(state): State<AppState>,
Json(req): Json<VerifyIntegrityRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
let media_ids = req
.media_ids
.into_iter()
.map(|id| pinakes_core::model::MediaId(id))
.collect();
let kind = pinakes_core::jobs::JobKind::VerifyIntegrity { media_ids };
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}
#[derive(Debug, Deserialize)]
pub struct VerifyIntegrityRequest {
pub media_ids: Vec<uuid::Uuid>,
}
pub async fn trigger_cleanup_thumbnails(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
let kind = pinakes_core::jobs::JobKind::CleanupThumbnails;
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}
#[derive(Debug, Deserialize)]
pub struct GenerateThumbnailsRequest {
/// When true, only generate thumbnails for items that don't have one yet.
/// When false (default), regenerate all thumbnails.
#[serde(default)]
pub only_missing: bool,
}
pub async fn generate_all_thumbnails(
State(state): State<AppState>,
body: Option<Json<GenerateThumbnailsRequest>>,
) -> Result<Json<serde_json::Value>, ApiError> {
let only_missing = body.map(|b| b.only_missing).unwrap_or(false);
let media_ids = state
.storage
.list_media_ids_for_thumbnails(only_missing)
.await?;
let count = media_ids.len();
if count == 0 {
return Ok(Json(serde_json::json!({
"job_id": null,
"media_count": 0,
"message": "no media items to process"
})));
}
let kind = pinakes_core::jobs::JobKind::GenerateThumbnails { media_ids };
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({
"job_id": job_id.to_string(),
"media_count": count
})))
}
pub async fn resolve_orphans(
State(state): State<AppState>,
Json(req): Json<OrphanResolveRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
let action = match req.action.as_str() {
"delete" => pinakes_core::integrity::OrphanAction::Delete,
_ => pinakes_core::integrity::OrphanAction::Ignore,
};
let ids: Vec<pinakes_core::model::MediaId> = req
.ids
.into_iter()
.map(pinakes_core::model::MediaId)
.collect();
let count = pinakes_core::integrity::resolve_orphans(&state.storage, action, &ids)
.await
.map_err(|e| ApiError(e))?;
Ok(Json(serde_json::json!({ "resolved": count })))
}

View file

@ -0,0 +1,34 @@
use axum::Json;
use axum::extract::{Path, State};
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::jobs::Job;
pub async fn list_jobs(State(state): State<AppState>) -> Json<Vec<Job>> {
Json(state.job_queue.list().await)
}
pub async fn get_job(
State(state): State<AppState>,
Path(id): Path<uuid::Uuid>,
) -> Result<Json<Job>, ApiError> {
state.job_queue.status(id).await.map(Json).ok_or_else(|| {
pinakes_core::error::PinakesError::NotFound(format!("job not found: {id}")).into()
})
}
pub async fn cancel_job(
State(state): State<AppState>,
Path(id): Path<uuid::Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
let cancelled = state.job_queue.cancel(id).await;
if cancelled {
Ok(Json(serde_json::json!({ "cancelled": true })))
} else {
Err(pinakes_core::error::PinakesError::NotFound(format!(
"job not found or already finished: {id}"
))
.into())
}
}

View file

@ -0,0 +1,795 @@
use axum::Json;
use axum::extract::{Path, Query, State};
use uuid::Uuid;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::{MediaId, Pagination};
use pinakes_core::storage::DynStorageBackend;
/// Apply tags and add to collection after a successful import.
/// Shared logic used by import_with_options, batch_import, and import_directory_endpoint.
async fn apply_import_post_processing(
storage: &DynStorageBackend,
media_id: MediaId,
tag_ids: Option<&[Uuid]>,
new_tags: Option<&[String]>,
collection_id: Option<Uuid>,
) {
if let Some(tag_ids) = tag_ids {
for tid in tag_ids {
if let Err(e) = pinakes_core::tags::tag_media(storage, media_id, *tid).await {
tracing::warn!(error = %e, "failed to apply tag during import");
}
}
}
if let Some(new_tags) = new_tags {
for name in new_tags {
match pinakes_core::tags::create_tag(storage, name, None).await {
Ok(tag) => {
if let Err(e) = pinakes_core::tags::tag_media(storage, media_id, tag.id).await {
tracing::warn!(error = %e, "failed to apply new tag during import");
}
}
Err(e) => {
tracing::warn!(tag_name = %name, error = %e, "failed to create tag during import");
}
}
}
}
if let Some(col_id) = collection_id
&& let Err(e) = pinakes_core::collections::add_member(storage, col_id, media_id, 0).await
{
tracing::warn!(error = %e, "failed to add to collection during import");
}
}
pub async fn import_media(
State(state): State<AppState>,
Json(req): Json<ImportRequest>,
) -> Result<Json<ImportResponse>, ApiError> {
let result = pinakes_core::import::import_file(&state.storage, &req.path).await?;
Ok(Json(ImportResponse {
media_id: result.media_id.0.to_string(),
was_duplicate: result.was_duplicate,
}))
}
pub async fn list_media(
State(state): State<AppState>,
Query(params): Query<PaginationParams>,
) -> Result<Json<Vec<MediaResponse>>, ApiError> {
let pagination = Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
params.sort,
);
let items = state.storage.list_media(&pagination).await?;
Ok(Json(items.into_iter().map(MediaResponse::from).collect()))
}
pub async fn get_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<MediaResponse>, ApiError> {
let item = state.storage.get_media(MediaId(id)).await?;
Ok(Json(MediaResponse::from(item)))
}
/// Maximum length for short text fields (title, artist, album, genre).
const MAX_SHORT_TEXT: usize = 500;
/// Maximum length for long text fields (description).
const MAX_LONG_TEXT: usize = 10_000;
fn validate_optional_text(field: &Option<String>, name: &str, max: usize) -> Result<(), ApiError> {
if let Some(v) = field
&& v.len() > max
{
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(format!(
"{name} exceeds {max} characters"
)),
));
}
Ok(())
}
pub async fn update_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(req): Json<UpdateMediaRequest>,
) -> Result<Json<MediaResponse>, ApiError> {
validate_optional_text(&req.title, "title", MAX_SHORT_TEXT)?;
validate_optional_text(&req.artist, "artist", MAX_SHORT_TEXT)?;
validate_optional_text(&req.album, "album", MAX_SHORT_TEXT)?;
validate_optional_text(&req.genre, "genre", MAX_SHORT_TEXT)?;
validate_optional_text(&req.description, "description", MAX_LONG_TEXT)?;
let mut item = state.storage.get_media(MediaId(id)).await?;
if let Some(title) = req.title {
item.title = Some(title);
}
if let Some(artist) = req.artist {
item.artist = Some(artist);
}
if let Some(album) = req.album {
item.album = Some(album);
}
if let Some(genre) = req.genre {
item.genre = Some(genre);
}
if let Some(year) = req.year {
item.year = Some(year);
}
if let Some(description) = req.description {
item.description = Some(description);
}
item.updated_at = chrono::Utc::now();
state.storage.update_media(&item).await?;
pinakes_core::audit::record_action(
&state.storage,
Some(item.id),
pinakes_core::model::AuditAction::Updated,
None,
)
.await?;
Ok(Json(MediaResponse::from(item)))
}
pub async fn delete_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
let media_id = MediaId(id);
// Fetch item first to get thumbnail path for cleanup
let item = state.storage.get_media(media_id).await?;
// Record audit BEFORE delete to avoid FK constraint violation
pinakes_core::audit::record_action(
&state.storage,
Some(media_id),
pinakes_core::model::AuditAction::Deleted,
None,
)
.await?;
state.storage.delete_media(media_id).await?;
// Clean up thumbnail file if it exists
if let Some(ref thumb_path) = item.thumbnail_path
&& let Err(e) = tokio::fs::remove_file(thumb_path).await
&& e.kind() != std::io::ErrorKind::NotFound
{
tracing::warn!(path = %thumb_path.display(), error = %e, "failed to remove thumbnail");
}
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn open_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
let item = state.storage.get_media(MediaId(id)).await?;
let opener = pinakes_core::opener::default_opener();
opener.open(&item.path)?;
pinakes_core::audit::record_action(
&state.storage,
Some(item.id),
pinakes_core::model::AuditAction::Opened,
None,
)
.await?;
Ok(Json(serde_json::json!({"opened": true})))
}
pub async fn stream_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
headers: axum::http::HeaderMap,
) -> Result<axum::response::Response, ApiError> {
use axum::body::Body;
use axum::http::{StatusCode, header};
use tokio::io::{AsyncReadExt, AsyncSeekExt};
use tokio_util::io::ReaderStream;
let item = state.storage.get_media(MediaId(id)).await?;
let file = tokio::fs::File::open(&item.path).await.map_err(|_e| {
ApiError(pinakes_core::error::PinakesError::FileNotFound(
item.path.clone(),
))
})?;
let metadata = file
.metadata()
.await
.map_err(|e| ApiError(pinakes_core::error::PinakesError::Io(e)))?;
let total_size = metadata.len();
let content_type = item.media_type.mime_type();
// Parse Range header
if let Some(range_header) = headers.get(header::RANGE)
&& let Ok(range_str) = range_header.to_str()
&& let Some(range) = parse_range(range_str, total_size)
{
let (start, end) = range;
let content_length = end - start + 1;
let mut file = file;
file.seek(std::io::SeekFrom::Start(start))
.await
.map_err(|e| ApiError(pinakes_core::error::PinakesError::Io(e)))?;
let limited = file.take(content_length);
let stream = ReaderStream::new(limited);
let body = Body::from_stream(stream);
return axum::response::Response::builder()
.status(StatusCode::PARTIAL_CONTENT)
.header(header::CONTENT_TYPE, content_type)
.header(header::CONTENT_LENGTH, content_length)
.header(header::ACCEPT_RANGES, "bytes")
.header(
header::CONTENT_RANGE,
format!("bytes {start}-{end}/{total_size}"),
)
.header(
header::CONTENT_DISPOSITION,
format!("inline; filename=\"{}\"", item.file_name),
)
.body(body)
.map_err(|e| {
ApiError(pinakes_core::error::PinakesError::InvalidOperation(
format!("failed to build response: {e}"),
))
});
}
// Full response (no Range header)
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
axum::response::Response::builder()
.header(header::CONTENT_TYPE, content_type)
.header(header::CONTENT_LENGTH, total_size)
.header(header::ACCEPT_RANGES, "bytes")
.header(
header::CONTENT_DISPOSITION,
format!("inline; filename=\"{}\"", item.file_name),
)
.body(body)
.map_err(|e| {
ApiError(pinakes_core::error::PinakesError::InvalidOperation(
format!("failed to build response: {e}"),
))
})
}
/// Parse a `Range: bytes=START-END` header value.
/// Returns `Some((start, end))` inclusive, or `None` if malformed.
fn parse_range(header: &str, total_size: u64) -> Option<(u64, u64)> {
let bytes_prefix = header.strip_prefix("bytes=")?;
let (start_str, end_str) = bytes_prefix.split_once('-')?;
if start_str.is_empty() {
// Suffix range: bytes=-500 means last 500 bytes
let suffix_len: u64 = end_str.parse().ok()?;
let start = total_size.saturating_sub(suffix_len);
Some((start, total_size - 1))
} else {
let start: u64 = start_str.parse().ok()?;
let end = if end_str.is_empty() {
total_size - 1
} else {
end_str.parse::<u64>().ok()?.min(total_size - 1)
};
if start > end || start >= total_size {
return None;
}
Some((start, end))
}
}
pub async fn import_with_options(
State(state): State<AppState>,
Json(req): Json<ImportWithOptionsRequest>,
) -> Result<Json<ImportResponse>, ApiError> {
let result = pinakes_core::import::import_file(&state.storage, &req.path).await?;
if !result.was_duplicate {
apply_import_post_processing(
&state.storage,
result.media_id,
req.tag_ids.as_deref(),
req.new_tags.as_deref(),
req.collection_id,
)
.await;
}
Ok(Json(ImportResponse {
media_id: result.media_id.0.to_string(),
was_duplicate: result.was_duplicate,
}))
}
pub async fn batch_import(
State(state): State<AppState>,
Json(req): Json<BatchImportRequest>,
) -> Result<Json<BatchImportResponse>, ApiError> {
if req.paths.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let mut results = Vec::new();
let mut imported = 0usize;
let mut duplicates = 0usize;
let mut errors = 0usize;
for path in &req.paths {
match pinakes_core::import::import_file(&state.storage, path).await {
Ok(result) => {
if result.was_duplicate {
duplicates += 1;
} else {
imported += 1;
apply_import_post_processing(
&state.storage,
result.media_id,
req.tag_ids.as_deref(),
req.new_tags.as_deref(),
req.collection_id,
)
.await;
}
results.push(BatchImportItemResult {
path: path.to_string_lossy().to_string(),
media_id: Some(result.media_id.0.to_string()),
was_duplicate: result.was_duplicate,
error: None,
});
}
Err(e) => {
errors += 1;
results.push(BatchImportItemResult {
path: path.to_string_lossy().to_string(),
media_id: None,
was_duplicate: false,
error: Some(e.to_string()),
});
}
}
}
let total = results.len();
Ok(Json(BatchImportResponse {
results,
total,
imported,
duplicates,
errors,
}))
}
pub async fn import_directory_endpoint(
State(state): State<AppState>,
Json(req): Json<DirectoryImportRequest>,
) -> Result<Json<BatchImportResponse>, ApiError> {
let config = state.config.read().await;
let ignore_patterns = config.scanning.ignore_patterns.clone();
let concurrency = config.scanning.import_concurrency;
drop(config);
let import_results = pinakes_core::import::import_directory_with_concurrency(
&state.storage,
&req.path,
&ignore_patterns,
concurrency,
)
.await?;
let mut results = Vec::new();
let mut imported = 0usize;
let mut duplicates = 0usize;
let mut errors = 0usize;
for r in import_results {
match r {
Ok(result) => {
if result.was_duplicate {
duplicates += 1;
} else {
imported += 1;
apply_import_post_processing(
&state.storage,
result.media_id,
req.tag_ids.as_deref(),
req.new_tags.as_deref(),
req.collection_id,
)
.await;
}
results.push(BatchImportItemResult {
path: result.path.to_string_lossy().to_string(),
media_id: Some(result.media_id.0.to_string()),
was_duplicate: result.was_duplicate,
error: None,
});
}
Err(e) => {
errors += 1;
results.push(BatchImportItemResult {
path: String::new(),
media_id: None,
was_duplicate: false,
error: Some(e.to_string()),
});
}
}
}
let total = results.len();
Ok(Json(BatchImportResponse {
results,
total,
imported,
duplicates,
errors,
}))
}
pub async fn preview_directory(
State(state): State<AppState>,
Json(req): Json<serde_json::Value>,
) -> Result<Json<DirectoryPreviewResponse>, ApiError> {
let path_str = req.get("path").and_then(|v| v.as_str()).ok_or_else(|| {
pinakes_core::error::PinakesError::InvalidOperation("path required".into())
})?;
let recursive = req
.get("recursive")
.and_then(|v| v.as_bool())
.unwrap_or(true);
let dir = std::path::PathBuf::from(path_str);
if !dir.is_dir() {
return Err(pinakes_core::error::PinakesError::FileNotFound(dir).into());
}
// Validate the directory is under a configured root (if roots are configured)
let roots = state.storage.list_root_dirs().await?;
if !roots.is_empty() {
let canonical = dir.canonicalize().map_err(|_| {
pinakes_core::error::PinakesError::InvalidOperation("cannot resolve path".into())
})?;
let allowed = roots.iter().any(|root| canonical.starts_with(root));
if !allowed {
return Err(pinakes_core::error::PinakesError::InvalidOperation(
"path is not under a configured root directory".into(),
)
.into());
}
}
let files: Vec<DirectoryPreviewFile> = tokio::task::spawn_blocking(move || {
let mut result = Vec::new();
fn walk_dir(
dir: &std::path::Path,
recursive: bool,
result: &mut Vec<DirectoryPreviewFile>,
) {
let Ok(entries) = std::fs::read_dir(dir) else {
return;
};
for entry in entries.flatten() {
let path = entry.path();
// Skip hidden files/dirs
if path
.file_name()
.map(|n| n.to_string_lossy().starts_with('.'))
.unwrap_or(false)
{
continue;
}
if path.is_dir() {
if recursive {
walk_dir(&path, recursive, result);
}
} else if path.is_file()
&& let Some(mt) = pinakes_core::media_type::MediaType::from_path(&path)
{
let size = entry.metadata().ok().map(|m| m.len()).unwrap_or(0);
let file_name = path
.file_name()
.map(|n| n.to_string_lossy().to_string())
.unwrap_or_default();
let media_type = serde_json::to_value(mt)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_default();
result.push(DirectoryPreviewFile {
path: path.to_string_lossy().to_string(),
file_name,
media_type,
file_size: size,
});
}
}
}
walk_dir(&dir, recursive, &mut result);
result
})
.await
.map_err(|e| pinakes_core::error::PinakesError::Io(std::io::Error::other(e)))?;
let total_count = files.len();
let total_size = files.iter().map(|f| f.file_size).sum();
Ok(Json(DirectoryPreviewResponse {
files,
total_count,
total_size,
}))
}
pub async fn set_custom_field(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(req): Json<SetCustomFieldRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
if req.name.is_empty() || req.name.len() > 255 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"field name must be 1-255 characters".into(),
),
));
}
if req.value.len() > MAX_LONG_TEXT {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(format!(
"field value exceeds {} characters",
MAX_LONG_TEXT
)),
));
}
use pinakes_core::model::{CustomField, CustomFieldType};
let field_type = match req.field_type.as_str() {
"number" => CustomFieldType::Number,
"date" => CustomFieldType::Date,
"boolean" => CustomFieldType::Boolean,
_ => CustomFieldType::Text,
};
let field = CustomField {
field_type,
value: req.value,
};
state
.storage
.set_custom_field(MediaId(id), &req.name, &field)
.await?;
Ok(Json(serde_json::json!({"set": true})))
}
pub async fn delete_custom_field(
State(state): State<AppState>,
Path((id, name)): Path<(Uuid, String)>,
) -> Result<Json<serde_json::Value>, ApiError> {
state
.storage
.delete_custom_field(MediaId(id), &name)
.await?;
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn batch_tag(
State(state): State<AppState>,
Json(req): Json<BatchTagRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
match state
.storage
.batch_tag_media(&media_ids, &req.tag_ids)
.await
{
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn delete_all_media(
State(state): State<AppState>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
// Record audit entry before deletion
if let Err(e) = pinakes_core::audit::record_action(
&state.storage,
None,
pinakes_core::model::AuditAction::Deleted,
Some("delete all media".to_string()),
)
.await
{
tracing::warn!(error = %e, "failed to record audit entry");
}
match state.storage.delete_all_media().await {
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn batch_delete(
State(state): State<AppState>,
Json(req): Json<BatchDeleteRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
// Record audit entries BEFORE delete to avoid FK constraint violation.
// Use None for media_id since they'll be deleted; include ID in details.
for id in &media_ids {
if let Err(e) = pinakes_core::audit::record_action(
&state.storage,
None,
pinakes_core::model::AuditAction::Deleted,
Some(format!("batch delete: media_id={}", id.0)),
)
.await
{
tracing::warn!(error = %e, "failed to record audit entry");
}
}
match state.storage.batch_delete_media(&media_ids).await {
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn batch_add_to_collection(
State(state): State<AppState>,
Json(req): Json<BatchCollectionRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let mut processed = 0;
let mut errors = Vec::new();
for (i, media_id) in req.media_ids.iter().enumerate() {
match pinakes_core::collections::add_member(
&state.storage,
req.collection_id,
MediaId(*media_id),
i as i32,
)
.await
{
Ok(_) => processed += 1,
Err(e) => errors.push(format!("{media_id}: {e}")),
}
}
Ok(Json(BatchOperationResponse { processed, errors }))
}
pub async fn batch_update(
State(state): State<AppState>,
Json(req): Json<BatchUpdateRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
match state
.storage
.batch_update_media(
&media_ids,
req.title.as_deref(),
req.artist.as_deref(),
req.album.as_deref(),
req.genre.as_deref(),
req.year,
req.description.as_deref(),
)
.await
{
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn get_thumbnail(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<axum::response::Response, ApiError> {
use axum::body::Body;
use axum::http::header;
use tokio_util::io::ReaderStream;
let item = state.storage.get_media(MediaId(id)).await?;
let thumb_path = item.thumbnail_path.ok_or_else(|| {
ApiError(pinakes_core::error::PinakesError::NotFound(
"no thumbnail available".into(),
))
})?;
let file = tokio::fs::File::open(&thumb_path)
.await
.map_err(|_e| ApiError(pinakes_core::error::PinakesError::FileNotFound(thumb_path)))?;
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
axum::response::Response::builder()
.header(header::CONTENT_TYPE, "image/jpeg")
.header(header::CACHE_CONTROL, "public, max-age=86400")
.body(body)
.map_err(|e| {
ApiError(pinakes_core::error::PinakesError::InvalidOperation(
format!("failed to build response: {e}"),
))
})
}
pub async fn get_media_count(
State(state): State<AppState>,
) -> Result<Json<MediaCountResponse>, ApiError> {
let count = state.storage.count_media().await?;
Ok(Json(MediaCountResponse { count }))
}

View file

@ -0,0 +1,18 @@
pub mod audit;
pub mod auth;
pub mod collections;
pub mod config;
pub mod database;
pub mod duplicates;
pub mod export;
pub mod health;
pub mod integrity;
pub mod jobs;
pub mod media;
pub mod saved_searches;
pub mod scan;
pub mod scheduled_tasks;
pub mod search;
pub mod statistics;
pub mod tags;
pub mod webhooks;

View file

@ -0,0 +1,76 @@
use axum::Json;
use axum::extract::{Path, State};
use serde::{Deserialize, Serialize};
use crate::error::ApiError;
use crate::state::AppState;
#[derive(Debug, Deserialize)]
pub struct CreateSavedSearchRequest {
pub name: String,
pub query: String,
pub sort_order: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct SavedSearchResponse {
pub id: String,
pub name: String,
pub query: String,
pub sort_order: Option<String>,
pub created_at: chrono::DateTime<chrono::Utc>,
}
pub async fn create_saved_search(
State(state): State<AppState>,
Json(req): Json<CreateSavedSearchRequest>,
) -> Result<Json<SavedSearchResponse>, ApiError> {
let id = uuid::Uuid::now_v7();
state
.storage
.save_search(id, &req.name, &req.query, req.sort_order.as_deref())
.await
.map_err(ApiError)?;
Ok(Json(SavedSearchResponse {
id: id.to_string(),
name: req.name,
query: req.query,
sort_order: req.sort_order,
created_at: chrono::Utc::now(),
}))
}
pub async fn list_saved_searches(
State(state): State<AppState>,
) -> Result<Json<Vec<SavedSearchResponse>>, ApiError> {
let searches = state
.storage
.list_saved_searches()
.await
.map_err(ApiError)?;
Ok(Json(
searches
.into_iter()
.map(|s| SavedSearchResponse {
id: s.id.to_string(),
name: s.name,
query: s.query,
sort_order: s.sort_order,
created_at: s.created_at,
})
.collect(),
))
}
pub async fn delete_saved_search(
State(state): State<AppState>,
Path(id): Path<uuid::Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
state
.storage
.delete_saved_search(id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
}

View file

@ -0,0 +1,30 @@
use axum::Json;
use axum::extract::State;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
/// Trigger a scan as a background job. Returns the job ID immediately.
pub async fn trigger_scan(
State(state): State<AppState>,
Json(req): Json<ScanRequest>,
) -> Result<Json<ScanJobResponse>, ApiError> {
let kind = pinakes_core::jobs::JobKind::Scan { path: req.path };
let job_id = state.job_queue.submit(kind).await;
Ok(Json(ScanJobResponse {
job_id: job_id.to_string(),
}))
}
pub async fn scan_status(State(state): State<AppState>) -> Json<ScanStatusResponse> {
let snapshot = state.scan_progress.snapshot();
let error_count = snapshot.errors.len();
Json(ScanStatusResponse {
scanning: snapshot.scanning,
files_found: snapshot.files_found,
files_processed: snapshot.files_processed,
error_count,
errors: snapshot.errors,
})
}

View file

@ -0,0 +1,55 @@
use axum::Json;
use axum::extract::{Path, State};
use crate::dto::ScheduledTaskResponse;
use crate::error::ApiError;
use crate::state::AppState;
pub async fn list_scheduled_tasks(
State(state): State<AppState>,
) -> Result<Json<Vec<ScheduledTaskResponse>>, ApiError> {
let tasks = state.scheduler.list_tasks().await;
let responses: Vec<ScheduledTaskResponse> = tasks
.into_iter()
.map(|t| ScheduledTaskResponse {
id: t.id,
name: t.name,
schedule: t.schedule.display_string(),
enabled: t.enabled,
last_run: t.last_run.map(|dt| dt.to_rfc3339()),
next_run: t.next_run.map(|dt| dt.to_rfc3339()),
last_status: t.last_status,
})
.collect();
Ok(Json(responses))
}
pub async fn toggle_scheduled_task(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<Json<serde_json::Value>, ApiError> {
match state.scheduler.toggle_task(&id).await {
Some(enabled) => Ok(Json(serde_json::json!({
"id": id,
"enabled": enabled,
}))),
None => Err(ApiError(pinakes_core::error::PinakesError::NotFound(
format!("scheduled task not found: {id}"),
))),
}
}
pub async fn run_scheduled_task_now(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<Json<serde_json::Value>, ApiError> {
match state.scheduler.run_now(&id).await {
Some(job_id) => Ok(Json(serde_json::json!({
"id": id,
"job_id": job_id,
}))),
None => Err(ApiError(pinakes_core::error::PinakesError::NotFound(
format!("scheduled task not found: {id}"),
))),
}
}

View file

@ -0,0 +1,87 @@
use axum::Json;
use axum::extract::{Query, State};
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::Pagination;
use pinakes_core::search::{SearchRequest, SortOrder, parse_search_query};
fn resolve_sort(sort: Option<&str>) -> SortOrder {
match sort {
Some("date_asc") => SortOrder::DateAsc,
Some("date_desc") => SortOrder::DateDesc,
Some("name_asc") => SortOrder::NameAsc,
Some("name_desc") => SortOrder::NameDesc,
Some("size_asc") => SortOrder::SizeAsc,
Some("size_desc") => SortOrder::SizeDesc,
_ => SortOrder::Relevance,
}
}
pub async fn search(
State(state): State<AppState>,
Query(params): Query<SearchParams>,
) -> Result<Json<SearchResponse>, ApiError> {
if params.q.len() > 2048 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"search query exceeds maximum length of 2048 characters".into(),
),
));
}
let query = parse_search_query(&params.q)?;
let sort = resolve_sort(params.sort.as_deref());
let request = SearchRequest {
query,
sort,
pagination: Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
None,
),
};
let results = state.storage.search(&request).await?;
Ok(Json(SearchResponse {
items: results.items.into_iter().map(MediaResponse::from).collect(),
total_count: results.total_count,
}))
}
pub async fn search_post(
State(state): State<AppState>,
Json(body): Json<SearchRequestBody>,
) -> Result<Json<SearchResponse>, ApiError> {
if body.q.len() > 2048 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"search query exceeds maximum length of 2048 characters".into(),
),
));
}
let query = parse_search_query(&body.q)?;
let sort = resolve_sort(body.sort.as_deref());
let request = SearchRequest {
query,
sort,
pagination: Pagination::new(
body.offset.unwrap_or(0),
body.limit.unwrap_or(50).min(1000),
None,
),
};
let results = state.storage.search(&request).await?;
Ok(Json(SearchResponse {
items: results.items.into_iter().map(MediaResponse::from).collect(),
total_count: results.total_count,
}))
}

View file

@ -0,0 +1,13 @@
use axum::Json;
use axum::extract::State;
use crate::dto::LibraryStatisticsResponse;
use crate::error::ApiError;
use crate::state::AppState;
pub async fn library_statistics(
State(state): State<AppState>,
) -> Result<Json<LibraryStatisticsResponse>, ApiError> {
let stats = state.storage.library_statistics().await?;
Ok(Json(LibraryStatisticsResponse::from(stats)))
}

View file

@ -0,0 +1,70 @@
use axum::Json;
use axum::extract::{Path, State};
use uuid::Uuid;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::MediaId;
pub async fn create_tag(
State(state): State<AppState>,
Json(req): Json<CreateTagRequest>,
) -> Result<Json<TagResponse>, ApiError> {
if req.name.is_empty() || req.name.len() > 255 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"tag name must be 1-255 characters".into(),
),
));
}
let tag = pinakes_core::tags::create_tag(&state.storage, &req.name, req.parent_id).await?;
Ok(Json(TagResponse::from(tag)))
}
pub async fn list_tags(State(state): State<AppState>) -> Result<Json<Vec<TagResponse>>, ApiError> {
let tags = state.storage.list_tags().await?;
Ok(Json(tags.into_iter().map(TagResponse::from).collect()))
}
pub async fn get_tag(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<TagResponse>, ApiError> {
let tag = state.storage.get_tag(id).await?;
Ok(Json(TagResponse::from(tag)))
}
pub async fn delete_tag(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
state.storage.delete_tag(id).await?;
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn tag_media(
State(state): State<AppState>,
Path(media_id): Path<Uuid>,
Json(req): Json<TagMediaRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
pinakes_core::tags::tag_media(&state.storage, MediaId(media_id), req.tag_id).await?;
Ok(Json(serde_json::json!({"tagged": true})))
}
pub async fn untag_media(
State(state): State<AppState>,
Path((media_id, tag_id)): Path<(Uuid, Uuid)>,
) -> Result<Json<serde_json::Value>, ApiError> {
pinakes_core::tags::untag_media(&state.storage, MediaId(media_id), tag_id).await?;
Ok(Json(serde_json::json!({"untagged": true})))
}
pub async fn get_media_tags(
State(state): State<AppState>,
Path(media_id): Path<Uuid>,
) -> Result<Json<Vec<TagResponse>>, ApiError> {
let tags = state.storage.get_media_tags(MediaId(media_id)).await?;
Ok(Json(tags.into_iter().map(TagResponse::from).collect()))
}

View file

@ -0,0 +1,40 @@
use axum::Json;
use axum::extract::State;
use serde::Serialize;
use crate::error::ApiError;
use crate::state::AppState;
#[derive(Debug, Serialize)]
pub struct WebhookInfo {
pub url: String,
pub events: Vec<String>,
}
pub async fn list_webhooks(
State(state): State<AppState>,
) -> Result<Json<Vec<WebhookInfo>>, ApiError> {
let config = state.config.read().await;
let hooks: Vec<WebhookInfo> = config
.webhooks
.iter()
.map(|h| WebhookInfo {
url: h.url.clone(),
events: h.events.clone(),
})
.collect();
Ok(Json(hooks))
}
pub async fn test_webhook(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
let config = state.config.read().await;
let count = config.webhooks.len();
// Emit a test event to all configured webhooks
// In production, the event bus would handle delivery
Ok(Json(serde_json::json!({
"webhooks_configured": count,
"test_sent": true
})))
}

View file

@ -0,0 +1,50 @@
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::RwLock;
use pinakes_core::cache::CacheLayer;
use pinakes_core::config::{Config, UserRole};
use pinakes_core::jobs::JobQueue;
use pinakes_core::scan::ScanProgress;
use pinakes_core::scheduler::TaskScheduler;
use pinakes_core::storage::DynStorageBackend;
/// Default session TTL: 24 hours.
pub const SESSION_TTL_SECS: i64 = 24 * 60 * 60;
#[derive(Debug, Clone)]
pub struct SessionInfo {
pub username: String,
pub role: UserRole,
pub created_at: chrono::DateTime<chrono::Utc>,
}
impl SessionInfo {
/// Returns true if this session has exceeded its TTL.
pub fn is_expired(&self) -> bool {
let age = chrono::Utc::now() - self.created_at;
age.num_seconds() > SESSION_TTL_SECS
}
}
pub type SessionStore = Arc<RwLock<HashMap<String, SessionInfo>>>;
/// Remove all expired sessions from the store.
pub async fn cleanup_expired_sessions(sessions: &SessionStore) {
let mut store = sessions.write().await;
store.retain(|_, info| !info.is_expired());
}
#[derive(Clone)]
pub struct AppState {
pub storage: DynStorageBackend,
pub config: Arc<RwLock<Config>>,
pub config_path: Option<PathBuf>,
pub scan_progress: ScanProgress,
pub sessions: SessionStore,
pub job_queue: Arc<JobQueue>,
pub cache: Arc<CacheLayer>,
pub scheduler: Arc<TaskScheduler>,
}

View file

@ -0,0 +1,212 @@
use std::net::SocketAddr;
use std::sync::Arc;
use axum::body::Body;
use axum::extract::ConnectInfo;
use axum::http::{Request, StatusCode};
use http_body_util::BodyExt;
use tokio::sync::RwLock;
use tower::ServiceExt;
use pinakes_core::cache::CacheLayer;
use pinakes_core::config::{
AccountsConfig, Config, DirectoryConfig, JobsConfig, ScanningConfig, ServerConfig,
SqliteConfig, StorageBackendType, StorageConfig, ThumbnailConfig, UiConfig, WebhookConfig,
};
use pinakes_core::jobs::JobQueue;
use pinakes_core::storage::StorageBackend;
use pinakes_core::storage::sqlite::SqliteBackend;
/// Fake socket address for tests (governor needs ConnectInfo<SocketAddr>)
fn test_addr() -> ConnectInfo<SocketAddr> {
ConnectInfo("127.0.0.1:9999".parse().unwrap())
}
/// Build a GET request with ConnectInfo for rate limiter compatibility
fn get(uri: &str) -> Request<Body> {
let mut req = Request::builder().uri(uri).body(Body::empty()).unwrap();
req.extensions_mut().insert(test_addr());
req
}
/// Build a POST request with ConnectInfo
fn post_json(uri: &str, body: &str) -> Request<Body> {
let mut req = Request::builder()
.method("POST")
.uri(uri)
.header("content-type", "application/json")
.body(Body::from(body.to_string()))
.unwrap();
req.extensions_mut().insert(test_addr());
req
}
async fn setup_app() -> axum::Router {
let backend = SqliteBackend::in_memory().expect("in-memory SQLite");
backend.run_migrations().await.expect("migrations");
let storage = Arc::new(backend) as pinakes_core::storage::DynStorageBackend;
let config = Config {
storage: StorageConfig {
backend: StorageBackendType::Sqlite,
sqlite: Some(SqliteConfig {
path: ":memory:".into(),
}),
postgres: None,
},
directories: DirectoryConfig { roots: vec![] },
scanning: ScanningConfig {
watch: false,
poll_interval_secs: 300,
ignore_patterns: vec![],
import_concurrency: 8,
},
server: ServerConfig {
host: "127.0.0.1".to_string(),
port: 3000,
api_key: None,
},
ui: UiConfig::default(),
accounts: AccountsConfig::default(),
jobs: JobsConfig::default(),
thumbnails: ThumbnailConfig::default(),
webhooks: Vec::<WebhookConfig>::new(),
scheduled_tasks: vec![],
};
let job_queue = JobQueue::new(1, |_id, _kind, _cancel, _jobs| tokio::spawn(async {}));
let config = Arc::new(RwLock::new(config));
let scheduler = pinakes_core::scheduler::TaskScheduler::new(
job_queue.clone(),
tokio_util::sync::CancellationToken::new(),
config.clone(),
None,
);
let state = pinakes_server::state::AppState {
storage,
config,
config_path: None,
scan_progress: pinakes_core::scan::ScanProgress::new(),
sessions: Arc::new(RwLock::new(std::collections::HashMap::new())),
job_queue,
cache: Arc::new(CacheLayer::new(60)),
scheduler: Arc::new(scheduler),
};
pinakes_server::app::create_router(state)
}
#[tokio::test]
async fn test_list_media_empty() {
let app = setup_app().await;
let response = app.oneshot(get("/api/v1/media")).await.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = response.into_body().collect().await.unwrap().to_bytes();
let items: Vec<serde_json::Value> = serde_json::from_slice(&body).unwrap();
assert_eq!(items.len(), 0);
}
#[tokio::test]
async fn test_create_and_list_tags() {
let app = setup_app().await;
// Create a tag
let response = app
.clone()
.oneshot(post_json("/api/v1/tags", r#"{"name":"Music"}"#))
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
// List tags
let response = app.oneshot(get("/api/v1/tags")).await.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = response.into_body().collect().await.unwrap().to_bytes();
let tags: Vec<serde_json::Value> = serde_json::from_slice(&body).unwrap();
assert_eq!(tags.len(), 1);
assert_eq!(tags[0]["name"], "Music");
}
#[tokio::test]
async fn test_search_empty() {
let app = setup_app().await;
let response = app.oneshot(get("/api/v1/search?q=test")).await.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = response.into_body().collect().await.unwrap().to_bytes();
let result: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(result["total_count"], 0);
}
#[tokio::test]
async fn test_media_not_found() {
let app = setup_app().await;
let response = app
.oneshot(get("/api/v1/media/00000000-0000-0000-0000-000000000000"))
.await
.unwrap();
assert_eq!(response.status(), StatusCode::NOT_FOUND);
}
#[tokio::test]
async fn test_collections_crud() {
let app = setup_app().await;
// Create collection
let response = app
.clone()
.oneshot(post_json(
"/api/v1/collections",
r#"{"name":"Favorites","kind":"manual"}"#,
))
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
// List collections
let response = app.oneshot(get("/api/v1/collections")).await.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = response.into_body().collect().await.unwrap().to_bytes();
let cols: Vec<serde_json::Value> = serde_json::from_slice(&body).unwrap();
assert_eq!(cols.len(), 1);
assert_eq!(cols[0]["name"], "Favorites");
}
#[tokio::test]
async fn test_statistics_endpoint() {
let app = setup_app().await;
let response = app.oneshot(get("/api/v1/statistics")).await.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = response.into_body().collect().await.unwrap().to_bytes();
let stats: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(stats["total_media"], 0);
assert_eq!(stats["total_size_bytes"], 0);
}
#[tokio::test]
async fn test_scheduled_tasks_endpoint() {
let app = setup_app().await;
let response = app.oneshot(get("/api/v1/tasks/scheduled")).await.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = response.into_body().collect().await.unwrap().to_bytes();
let tasks: Vec<serde_json::Value> = serde_json::from_slice(&body).unwrap();
assert!(!tasks.is_empty(), "should have default scheduled tasks");
// Verify structure of first task
assert!(tasks[0]["id"].is_string());
assert!(tasks[0]["name"].is_string());
assert!(tasks[0]["schedule"].is_string());
}