initial commit

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I4a6b498153eccd5407510dd541b7f4816a6a6964
This commit is contained in:
raf 2026-01-30 22:05:46 +03:00
commit 6a73d11c4b
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
124 changed files with 34856 additions and 0 deletions

View file

@ -0,0 +1,23 @@
use axum::Json;
use axum::extract::{Query, State};
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::Pagination;
pub async fn list_audit(
State(state): State<AppState>,
Query(params): Query<PaginationParams>,
) -> Result<Json<Vec<AuditEntryResponse>>, ApiError> {
let pagination = Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
None,
);
let entries = state.storage.list_audit_entries(None, &pagination).await?;
Ok(Json(
entries.into_iter().map(AuditEntryResponse::from).collect(),
))
}

View file

@ -0,0 +1,119 @@
use axum::Json;
use axum::extract::State;
use axum::http::{HeaderMap, StatusCode};
use crate::dto::{LoginRequest, LoginResponse, UserInfoResponse};
use crate::state::AppState;
pub async fn login(
State(state): State<AppState>,
Json(req): Json<LoginRequest>,
) -> Result<Json<LoginResponse>, StatusCode> {
// Limit input sizes to prevent DoS
if req.username.len() > 255 || req.password.len() > 1024 {
return Err(StatusCode::BAD_REQUEST);
}
let config = state.config.read().await;
if !config.accounts.enabled {
return Err(StatusCode::NOT_FOUND);
}
let user = config
.accounts
.users
.iter()
.find(|u| u.username == req.username);
let user = match user {
Some(u) => u,
None => {
tracing::warn!(username = %req.username, "login failed: unknown user");
return Err(StatusCode::UNAUTHORIZED);
}
};
// Verify password using argon2
use argon2::password_hash::PasswordVerifier;
let hash = &user.password_hash;
let parsed_hash = argon2::password_hash::PasswordHash::new(hash)
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
let valid = argon2::Argon2::default()
.verify_password(req.password.as_bytes(), &parsed_hash)
.is_ok();
if !valid {
tracing::warn!(username = %req.username, "login failed: invalid password");
return Err(StatusCode::UNAUTHORIZED);
}
// Generate session token
use rand::Rng;
let token: String = rand::rng()
.sample_iter(&rand::distr::Alphanumeric)
.take(48)
.map(char::from)
.collect();
let role = user.role;
let username = user.username.clone();
// Store session
{
let mut sessions = state.sessions.write().await;
sessions.insert(
token.clone(),
crate::state::SessionInfo {
username: username.clone(),
role,
created_at: chrono::Utc::now(),
},
);
}
tracing::info!(username = %username, role = %role, "login successful");
Ok(Json(LoginResponse {
token,
username,
role: role.to_string(),
}))
}
pub async fn logout(State(state): State<AppState>, headers: HeaderMap) -> StatusCode {
if let Some(token) = extract_bearer_token(&headers) {
let mut sessions = state.sessions.write().await;
sessions.remove(token);
}
StatusCode::OK
}
pub async fn me(
State(state): State<AppState>,
headers: HeaderMap,
) -> Result<Json<UserInfoResponse>, StatusCode> {
let config = state.config.read().await;
if !config.accounts.enabled {
// When accounts are not enabled, return a default admin user
return Ok(Json(UserInfoResponse {
username: "admin".to_string(),
role: "admin".to_string(),
}));
}
drop(config);
let token = extract_bearer_token(&headers).ok_or(StatusCode::UNAUTHORIZED)?;
let sessions = state.sessions.read().await;
let session = sessions.get(token).ok_or(StatusCode::UNAUTHORIZED)?;
Ok(Json(UserInfoResponse {
username: session.username.clone(),
role: session.role.to_string(),
}))
}
fn extract_bearer_token(headers: &HeaderMap) -> Option<&str> {
headers
.get("authorization")
.and_then(|v| v.to_str().ok())
.and_then(|s| s.strip_prefix("Bearer "))
}

View file

@ -0,0 +1,101 @@
use axum::Json;
use axum::extract::{Path, State};
use uuid::Uuid;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::{CollectionKind, MediaId};
pub async fn create_collection(
State(state): State<AppState>,
Json(req): Json<CreateCollectionRequest>,
) -> Result<Json<CollectionResponse>, ApiError> {
if req.name.is_empty() || req.name.len() > 255 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"collection name must be 1-255 characters".into(),
),
));
}
if let Some(ref desc) = req.description
&& desc.len() > 10_000
{
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"description exceeds 10000 characters".into(),
),
));
}
let kind = match req.kind.as_str() {
"virtual" => CollectionKind::Virtual,
_ => CollectionKind::Manual,
};
let col = pinakes_core::collections::create_collection(
&state.storage,
&req.name,
kind,
req.description.as_deref(),
req.filter_query.as_deref(),
)
.await?;
Ok(Json(CollectionResponse::from(col)))
}
pub async fn list_collections(
State(state): State<AppState>,
) -> Result<Json<Vec<CollectionResponse>>, ApiError> {
let cols = state.storage.list_collections().await?;
Ok(Json(
cols.into_iter().map(CollectionResponse::from).collect(),
))
}
pub async fn get_collection(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<CollectionResponse>, ApiError> {
let col = state.storage.get_collection(id).await?;
Ok(Json(CollectionResponse::from(col)))
}
pub async fn delete_collection(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
state.storage.delete_collection(id).await?;
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn add_member(
State(state): State<AppState>,
Path(collection_id): Path<Uuid>,
Json(req): Json<AddMemberRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
pinakes_core::collections::add_member(
&state.storage,
collection_id,
MediaId(req.media_id),
req.position.unwrap_or(0),
)
.await?;
Ok(Json(serde_json::json!({"added": true})))
}
pub async fn remove_member(
State(state): State<AppState>,
Path((collection_id, media_id)): Path<(Uuid, Uuid)>,
) -> Result<Json<serde_json::Value>, ApiError> {
pinakes_core::collections::remove_member(&state.storage, collection_id, MediaId(media_id))
.await?;
Ok(Json(serde_json::json!({"removed": true})))
}
pub async fn get_members(
State(state): State<AppState>,
Path(collection_id): Path<Uuid>,
) -> Result<Json<Vec<MediaResponse>>, ApiError> {
let items = pinakes_core::collections::get_members(&state.storage, collection_id).await?;
Ok(Json(items.into_iter().map(MediaResponse::from).collect()))
}

View file

@ -0,0 +1,217 @@
use axum::Json;
use axum::extract::State;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
pub async fn get_config(State(state): State<AppState>) -> Result<Json<ConfigResponse>, ApiError> {
let config = state.config.read().await;
let roots = state.storage.list_root_dirs().await?;
let config_path = state
.config_path
.as_ref()
.map(|p| p.to_string_lossy().to_string());
let config_writable = match &state.config_path {
Some(path) => {
if path.exists() {
std::fs::metadata(path)
.map(|m| !m.permissions().readonly())
.unwrap_or(false)
} else {
path.parent()
.map(|parent| {
std::fs::metadata(parent)
.map(|m| !m.permissions().readonly())
.unwrap_or(false)
})
.unwrap_or(false)
}
}
None => false,
};
Ok(Json(ConfigResponse {
backend: format!("{:?}", config.storage.backend).to_lowercase(),
database_path: config
.storage
.sqlite
.as_ref()
.map(|s| s.path.to_string_lossy().to_string()),
roots: roots
.iter()
.map(|p| p.to_string_lossy().to_string())
.collect(),
scanning: ScanningConfigResponse {
watch: config.scanning.watch,
poll_interval_secs: config.scanning.poll_interval_secs,
ignore_patterns: config.scanning.ignore_patterns.clone(),
},
server: ServerConfigResponse {
host: config.server.host.clone(),
port: config.server.port,
},
ui: UiConfigResponse::from(&config.ui),
config_path,
config_writable,
}))
}
pub async fn get_ui_config(
State(state): State<AppState>,
) -> Result<Json<UiConfigResponse>, ApiError> {
let config = state.config.read().await;
Ok(Json(UiConfigResponse::from(&config.ui)))
}
pub async fn update_ui_config(
State(state): State<AppState>,
Json(req): Json<UpdateUiConfigRequest>,
) -> Result<Json<UiConfigResponse>, ApiError> {
let mut config = state.config.write().await;
if let Some(theme) = req.theme {
config.ui.theme = theme;
}
if let Some(default_view) = req.default_view {
config.ui.default_view = default_view;
}
if let Some(default_page_size) = req.default_page_size {
config.ui.default_page_size = default_page_size;
}
if let Some(default_view_mode) = req.default_view_mode {
config.ui.default_view_mode = default_view_mode;
}
if let Some(auto_play) = req.auto_play_media {
config.ui.auto_play_media = auto_play;
}
if let Some(show_thumbs) = req.show_thumbnails {
config.ui.show_thumbnails = show_thumbs;
}
if let Some(collapsed) = req.sidebar_collapsed {
config.ui.sidebar_collapsed = collapsed;
}
if let Some(ref path) = state.config_path {
config.save_to_file(path).map_err(ApiError)?;
}
Ok(Json(UiConfigResponse::from(&config.ui)))
}
pub async fn update_scanning_config(
State(state): State<AppState>,
Json(req): Json<UpdateScanningRequest>,
) -> Result<Json<ConfigResponse>, ApiError> {
let mut config = state.config.write().await;
if let Some(watch) = req.watch {
config.scanning.watch = watch;
}
if let Some(interval) = req.poll_interval_secs {
config.scanning.poll_interval_secs = interval;
}
if let Some(patterns) = req.ignore_patterns {
config.scanning.ignore_patterns = patterns;
}
// Persist to disk if we have a config path
if let Some(ref path) = state.config_path {
config.save_to_file(path).map_err(ApiError)?;
}
let roots = state.storage.list_root_dirs().await?;
let config_path = state
.config_path
.as_ref()
.map(|p| p.to_string_lossy().to_string());
let config_writable = match &state.config_path {
Some(path) => {
if path.exists() {
std::fs::metadata(path)
.map(|m| !m.permissions().readonly())
.unwrap_or(false)
} else {
path.parent()
.map(|parent| {
std::fs::metadata(parent)
.map(|m| !m.permissions().readonly())
.unwrap_or(false)
})
.unwrap_or(false)
}
}
None => false,
};
Ok(Json(ConfigResponse {
backend: format!("{:?}", config.storage.backend).to_lowercase(),
database_path: config
.storage
.sqlite
.as_ref()
.map(|s| s.path.to_string_lossy().to_string()),
roots: roots
.iter()
.map(|p| p.to_string_lossy().to_string())
.collect(),
scanning: ScanningConfigResponse {
watch: config.scanning.watch,
poll_interval_secs: config.scanning.poll_interval_secs,
ignore_patterns: config.scanning.ignore_patterns.clone(),
},
server: ServerConfigResponse {
host: config.server.host.clone(),
port: config.server.port,
},
ui: UiConfigResponse::from(&config.ui),
config_path,
config_writable,
}))
}
pub async fn add_root(
State(state): State<AppState>,
Json(req): Json<RootDirRequest>,
) -> Result<Json<ConfigResponse>, ApiError> {
let path = std::path::PathBuf::from(&req.path);
if !path.exists() {
return Err(ApiError(pinakes_core::error::PinakesError::FileNotFound(
path,
)));
}
state.storage.add_root_dir(path.clone()).await?;
{
let mut config = state.config.write().await;
if !config.directories.roots.contains(&path) {
config.directories.roots.push(path);
}
if let Some(ref config_path) = state.config_path {
config.save_to_file(config_path).map_err(ApiError)?;
}
}
get_config(State(state)).await
}
pub async fn remove_root(
State(state): State<AppState>,
Json(req): Json<RootDirRequest>,
) -> Result<Json<ConfigResponse>, ApiError> {
let path = std::path::PathBuf::from(&req.path);
state.storage.remove_root_dir(&path).await?;
{
let mut config = state.config.write().await;
config.directories.roots.retain(|r| r != &path);
if let Some(ref config_path) = state.config_path {
config.save_to_file(config_path).map_err(ApiError)?;
}
}
get_config(State(state)).await
}

View file

@ -0,0 +1,34 @@
use axum::Json;
use axum::extract::State;
use crate::dto::DatabaseStatsResponse;
use crate::error::ApiError;
use crate::state::AppState;
pub async fn database_stats(
State(state): State<AppState>,
) -> Result<Json<DatabaseStatsResponse>, ApiError> {
let stats = state.storage.database_stats().await?;
Ok(Json(DatabaseStatsResponse {
media_count: stats.media_count,
tag_count: stats.tag_count,
collection_count: stats.collection_count,
audit_count: stats.audit_count,
database_size_bytes: stats.database_size_bytes,
backend_name: stats.backend_name,
}))
}
pub async fn vacuum_database(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
state.storage.vacuum().await?;
Ok(Json(serde_json::json!({"status": "ok"})))
}
pub async fn clear_database(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
state.storage.clear_all_data().await?;
Ok(Json(serde_json::json!({"status": "ok"})))
}

View file

@ -0,0 +1,30 @@
use axum::Json;
use axum::extract::State;
use crate::dto::{DuplicateGroupResponse, MediaResponse};
use crate::error::ApiError;
use crate::state::AppState;
pub async fn list_duplicates(
State(state): State<AppState>,
) -> Result<Json<Vec<DuplicateGroupResponse>>, ApiError> {
let groups = state.storage.find_duplicates().await?;
let response: Vec<DuplicateGroupResponse> = groups
.into_iter()
.map(|items| {
let content_hash = items
.first()
.map(|i| i.content_hash.0.clone())
.unwrap_or_default();
let media_items: Vec<MediaResponse> =
items.into_iter().map(MediaResponse::from).collect();
DuplicateGroupResponse {
content_hash,
items: media_items,
}
})
.collect();
Ok(Json(response))
}

View file

@ -0,0 +1,42 @@
use axum::Json;
use axum::extract::State;
use serde::Deserialize;
use std::path::PathBuf;
use crate::error::ApiError;
use crate::state::AppState;
#[derive(Debug, Deserialize)]
pub struct ExportRequest {
pub format: String,
pub destination: PathBuf,
}
pub async fn trigger_export(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
// Default export to JSON in data dir
let dest = pinakes_core::config::Config::default_data_dir().join("export.json");
let kind = pinakes_core::jobs::JobKind::Export {
format: pinakes_core::jobs::ExportFormat::Json,
destination: dest,
};
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}
pub async fn trigger_export_with_options(
State(state): State<AppState>,
Json(req): Json<ExportRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
let format = match req.format.as_str() {
"csv" => pinakes_core::jobs::ExportFormat::Csv,
_ => pinakes_core::jobs::ExportFormat::Json,
};
let kind = pinakes_core::jobs::JobKind::Export {
format,
destination: req.destination,
};
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}

View file

@ -0,0 +1,8 @@
use axum::Json;
pub async fn health() -> Json<serde_json::Value> {
Json(serde_json::json!({
"status": "ok",
"version": env!("CARGO_PKG_VERSION"),
}))
}

View file

@ -0,0 +1,99 @@
use axum::Json;
use axum::extract::State;
use serde::Deserialize;
use crate::error::ApiError;
use crate::state::AppState;
#[derive(Debug, Deserialize)]
pub struct OrphanResolveRequest {
pub action: String,
pub ids: Vec<uuid::Uuid>,
}
pub async fn trigger_orphan_detection(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
let kind = pinakes_core::jobs::JobKind::OrphanDetection;
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}
pub async fn trigger_verify_integrity(
State(state): State<AppState>,
Json(req): Json<VerifyIntegrityRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
let media_ids = req
.media_ids
.into_iter()
.map(|id| pinakes_core::model::MediaId(id))
.collect();
let kind = pinakes_core::jobs::JobKind::VerifyIntegrity { media_ids };
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}
#[derive(Debug, Deserialize)]
pub struct VerifyIntegrityRequest {
pub media_ids: Vec<uuid::Uuid>,
}
pub async fn trigger_cleanup_thumbnails(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
let kind = pinakes_core::jobs::JobKind::CleanupThumbnails;
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({ "job_id": job_id.to_string() })))
}
#[derive(Debug, Deserialize)]
pub struct GenerateThumbnailsRequest {
/// When true, only generate thumbnails for items that don't have one yet.
/// When false (default), regenerate all thumbnails.
#[serde(default)]
pub only_missing: bool,
}
pub async fn generate_all_thumbnails(
State(state): State<AppState>,
body: Option<Json<GenerateThumbnailsRequest>>,
) -> Result<Json<serde_json::Value>, ApiError> {
let only_missing = body.map(|b| b.only_missing).unwrap_or(false);
let media_ids = state
.storage
.list_media_ids_for_thumbnails(only_missing)
.await?;
let count = media_ids.len();
if count == 0 {
return Ok(Json(serde_json::json!({
"job_id": null,
"media_count": 0,
"message": "no media items to process"
})));
}
let kind = pinakes_core::jobs::JobKind::GenerateThumbnails { media_ids };
let job_id = state.job_queue.submit(kind).await;
Ok(Json(serde_json::json!({
"job_id": job_id.to_string(),
"media_count": count
})))
}
pub async fn resolve_orphans(
State(state): State<AppState>,
Json(req): Json<OrphanResolveRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
let action = match req.action.as_str() {
"delete" => pinakes_core::integrity::OrphanAction::Delete,
_ => pinakes_core::integrity::OrphanAction::Ignore,
};
let ids: Vec<pinakes_core::model::MediaId> = req
.ids
.into_iter()
.map(pinakes_core::model::MediaId)
.collect();
let count = pinakes_core::integrity::resolve_orphans(&state.storage, action, &ids)
.await
.map_err(|e| ApiError(e))?;
Ok(Json(serde_json::json!({ "resolved": count })))
}

View file

@ -0,0 +1,34 @@
use axum::Json;
use axum::extract::{Path, State};
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::jobs::Job;
pub async fn list_jobs(State(state): State<AppState>) -> Json<Vec<Job>> {
Json(state.job_queue.list().await)
}
pub async fn get_job(
State(state): State<AppState>,
Path(id): Path<uuid::Uuid>,
) -> Result<Json<Job>, ApiError> {
state.job_queue.status(id).await.map(Json).ok_or_else(|| {
pinakes_core::error::PinakesError::NotFound(format!("job not found: {id}")).into()
})
}
pub async fn cancel_job(
State(state): State<AppState>,
Path(id): Path<uuid::Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
let cancelled = state.job_queue.cancel(id).await;
if cancelled {
Ok(Json(serde_json::json!({ "cancelled": true })))
} else {
Err(pinakes_core::error::PinakesError::NotFound(format!(
"job not found or already finished: {id}"
))
.into())
}
}

View file

@ -0,0 +1,795 @@
use axum::Json;
use axum::extract::{Path, Query, State};
use uuid::Uuid;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::{MediaId, Pagination};
use pinakes_core::storage::DynStorageBackend;
/// Apply tags and add to collection after a successful import.
/// Shared logic used by import_with_options, batch_import, and import_directory_endpoint.
async fn apply_import_post_processing(
storage: &DynStorageBackend,
media_id: MediaId,
tag_ids: Option<&[Uuid]>,
new_tags: Option<&[String]>,
collection_id: Option<Uuid>,
) {
if let Some(tag_ids) = tag_ids {
for tid in tag_ids {
if let Err(e) = pinakes_core::tags::tag_media(storage, media_id, *tid).await {
tracing::warn!(error = %e, "failed to apply tag during import");
}
}
}
if let Some(new_tags) = new_tags {
for name in new_tags {
match pinakes_core::tags::create_tag(storage, name, None).await {
Ok(tag) => {
if let Err(e) = pinakes_core::tags::tag_media(storage, media_id, tag.id).await {
tracing::warn!(error = %e, "failed to apply new tag during import");
}
}
Err(e) => {
tracing::warn!(tag_name = %name, error = %e, "failed to create tag during import");
}
}
}
}
if let Some(col_id) = collection_id
&& let Err(e) = pinakes_core::collections::add_member(storage, col_id, media_id, 0).await
{
tracing::warn!(error = %e, "failed to add to collection during import");
}
}
pub async fn import_media(
State(state): State<AppState>,
Json(req): Json<ImportRequest>,
) -> Result<Json<ImportResponse>, ApiError> {
let result = pinakes_core::import::import_file(&state.storage, &req.path).await?;
Ok(Json(ImportResponse {
media_id: result.media_id.0.to_string(),
was_duplicate: result.was_duplicate,
}))
}
pub async fn list_media(
State(state): State<AppState>,
Query(params): Query<PaginationParams>,
) -> Result<Json<Vec<MediaResponse>>, ApiError> {
let pagination = Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
params.sort,
);
let items = state.storage.list_media(&pagination).await?;
Ok(Json(items.into_iter().map(MediaResponse::from).collect()))
}
pub async fn get_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<MediaResponse>, ApiError> {
let item = state.storage.get_media(MediaId(id)).await?;
Ok(Json(MediaResponse::from(item)))
}
/// Maximum length for short text fields (title, artist, album, genre).
const MAX_SHORT_TEXT: usize = 500;
/// Maximum length for long text fields (description).
const MAX_LONG_TEXT: usize = 10_000;
fn validate_optional_text(field: &Option<String>, name: &str, max: usize) -> Result<(), ApiError> {
if let Some(v) = field
&& v.len() > max
{
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(format!(
"{name} exceeds {max} characters"
)),
));
}
Ok(())
}
pub async fn update_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(req): Json<UpdateMediaRequest>,
) -> Result<Json<MediaResponse>, ApiError> {
validate_optional_text(&req.title, "title", MAX_SHORT_TEXT)?;
validate_optional_text(&req.artist, "artist", MAX_SHORT_TEXT)?;
validate_optional_text(&req.album, "album", MAX_SHORT_TEXT)?;
validate_optional_text(&req.genre, "genre", MAX_SHORT_TEXT)?;
validate_optional_text(&req.description, "description", MAX_LONG_TEXT)?;
let mut item = state.storage.get_media(MediaId(id)).await?;
if let Some(title) = req.title {
item.title = Some(title);
}
if let Some(artist) = req.artist {
item.artist = Some(artist);
}
if let Some(album) = req.album {
item.album = Some(album);
}
if let Some(genre) = req.genre {
item.genre = Some(genre);
}
if let Some(year) = req.year {
item.year = Some(year);
}
if let Some(description) = req.description {
item.description = Some(description);
}
item.updated_at = chrono::Utc::now();
state.storage.update_media(&item).await?;
pinakes_core::audit::record_action(
&state.storage,
Some(item.id),
pinakes_core::model::AuditAction::Updated,
None,
)
.await?;
Ok(Json(MediaResponse::from(item)))
}
pub async fn delete_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
let media_id = MediaId(id);
// Fetch item first to get thumbnail path for cleanup
let item = state.storage.get_media(media_id).await?;
// Record audit BEFORE delete to avoid FK constraint violation
pinakes_core::audit::record_action(
&state.storage,
Some(media_id),
pinakes_core::model::AuditAction::Deleted,
None,
)
.await?;
state.storage.delete_media(media_id).await?;
// Clean up thumbnail file if it exists
if let Some(ref thumb_path) = item.thumbnail_path
&& let Err(e) = tokio::fs::remove_file(thumb_path).await
&& e.kind() != std::io::ErrorKind::NotFound
{
tracing::warn!(path = %thumb_path.display(), error = %e, "failed to remove thumbnail");
}
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn open_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
let item = state.storage.get_media(MediaId(id)).await?;
let opener = pinakes_core::opener::default_opener();
opener.open(&item.path)?;
pinakes_core::audit::record_action(
&state.storage,
Some(item.id),
pinakes_core::model::AuditAction::Opened,
None,
)
.await?;
Ok(Json(serde_json::json!({"opened": true})))
}
pub async fn stream_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
headers: axum::http::HeaderMap,
) -> Result<axum::response::Response, ApiError> {
use axum::body::Body;
use axum::http::{StatusCode, header};
use tokio::io::{AsyncReadExt, AsyncSeekExt};
use tokio_util::io::ReaderStream;
let item = state.storage.get_media(MediaId(id)).await?;
let file = tokio::fs::File::open(&item.path).await.map_err(|_e| {
ApiError(pinakes_core::error::PinakesError::FileNotFound(
item.path.clone(),
))
})?;
let metadata = file
.metadata()
.await
.map_err(|e| ApiError(pinakes_core::error::PinakesError::Io(e)))?;
let total_size = metadata.len();
let content_type = item.media_type.mime_type();
// Parse Range header
if let Some(range_header) = headers.get(header::RANGE)
&& let Ok(range_str) = range_header.to_str()
&& let Some(range) = parse_range(range_str, total_size)
{
let (start, end) = range;
let content_length = end - start + 1;
let mut file = file;
file.seek(std::io::SeekFrom::Start(start))
.await
.map_err(|e| ApiError(pinakes_core::error::PinakesError::Io(e)))?;
let limited = file.take(content_length);
let stream = ReaderStream::new(limited);
let body = Body::from_stream(stream);
return axum::response::Response::builder()
.status(StatusCode::PARTIAL_CONTENT)
.header(header::CONTENT_TYPE, content_type)
.header(header::CONTENT_LENGTH, content_length)
.header(header::ACCEPT_RANGES, "bytes")
.header(
header::CONTENT_RANGE,
format!("bytes {start}-{end}/{total_size}"),
)
.header(
header::CONTENT_DISPOSITION,
format!("inline; filename=\"{}\"", item.file_name),
)
.body(body)
.map_err(|e| {
ApiError(pinakes_core::error::PinakesError::InvalidOperation(
format!("failed to build response: {e}"),
))
});
}
// Full response (no Range header)
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
axum::response::Response::builder()
.header(header::CONTENT_TYPE, content_type)
.header(header::CONTENT_LENGTH, total_size)
.header(header::ACCEPT_RANGES, "bytes")
.header(
header::CONTENT_DISPOSITION,
format!("inline; filename=\"{}\"", item.file_name),
)
.body(body)
.map_err(|e| {
ApiError(pinakes_core::error::PinakesError::InvalidOperation(
format!("failed to build response: {e}"),
))
})
}
/// Parse a `Range: bytes=START-END` header value.
/// Returns `Some((start, end))` inclusive, or `None` if malformed.
fn parse_range(header: &str, total_size: u64) -> Option<(u64, u64)> {
let bytes_prefix = header.strip_prefix("bytes=")?;
let (start_str, end_str) = bytes_prefix.split_once('-')?;
if start_str.is_empty() {
// Suffix range: bytes=-500 means last 500 bytes
let suffix_len: u64 = end_str.parse().ok()?;
let start = total_size.saturating_sub(suffix_len);
Some((start, total_size - 1))
} else {
let start: u64 = start_str.parse().ok()?;
let end = if end_str.is_empty() {
total_size - 1
} else {
end_str.parse::<u64>().ok()?.min(total_size - 1)
};
if start > end || start >= total_size {
return None;
}
Some((start, end))
}
}
pub async fn import_with_options(
State(state): State<AppState>,
Json(req): Json<ImportWithOptionsRequest>,
) -> Result<Json<ImportResponse>, ApiError> {
let result = pinakes_core::import::import_file(&state.storage, &req.path).await?;
if !result.was_duplicate {
apply_import_post_processing(
&state.storage,
result.media_id,
req.tag_ids.as_deref(),
req.new_tags.as_deref(),
req.collection_id,
)
.await;
}
Ok(Json(ImportResponse {
media_id: result.media_id.0.to_string(),
was_duplicate: result.was_duplicate,
}))
}
pub async fn batch_import(
State(state): State<AppState>,
Json(req): Json<BatchImportRequest>,
) -> Result<Json<BatchImportResponse>, ApiError> {
if req.paths.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let mut results = Vec::new();
let mut imported = 0usize;
let mut duplicates = 0usize;
let mut errors = 0usize;
for path in &req.paths {
match pinakes_core::import::import_file(&state.storage, path).await {
Ok(result) => {
if result.was_duplicate {
duplicates += 1;
} else {
imported += 1;
apply_import_post_processing(
&state.storage,
result.media_id,
req.tag_ids.as_deref(),
req.new_tags.as_deref(),
req.collection_id,
)
.await;
}
results.push(BatchImportItemResult {
path: path.to_string_lossy().to_string(),
media_id: Some(result.media_id.0.to_string()),
was_duplicate: result.was_duplicate,
error: None,
});
}
Err(e) => {
errors += 1;
results.push(BatchImportItemResult {
path: path.to_string_lossy().to_string(),
media_id: None,
was_duplicate: false,
error: Some(e.to_string()),
});
}
}
}
let total = results.len();
Ok(Json(BatchImportResponse {
results,
total,
imported,
duplicates,
errors,
}))
}
pub async fn import_directory_endpoint(
State(state): State<AppState>,
Json(req): Json<DirectoryImportRequest>,
) -> Result<Json<BatchImportResponse>, ApiError> {
let config = state.config.read().await;
let ignore_patterns = config.scanning.ignore_patterns.clone();
let concurrency = config.scanning.import_concurrency;
drop(config);
let import_results = pinakes_core::import::import_directory_with_concurrency(
&state.storage,
&req.path,
&ignore_patterns,
concurrency,
)
.await?;
let mut results = Vec::new();
let mut imported = 0usize;
let mut duplicates = 0usize;
let mut errors = 0usize;
for r in import_results {
match r {
Ok(result) => {
if result.was_duplicate {
duplicates += 1;
} else {
imported += 1;
apply_import_post_processing(
&state.storage,
result.media_id,
req.tag_ids.as_deref(),
req.new_tags.as_deref(),
req.collection_id,
)
.await;
}
results.push(BatchImportItemResult {
path: result.path.to_string_lossy().to_string(),
media_id: Some(result.media_id.0.to_string()),
was_duplicate: result.was_duplicate,
error: None,
});
}
Err(e) => {
errors += 1;
results.push(BatchImportItemResult {
path: String::new(),
media_id: None,
was_duplicate: false,
error: Some(e.to_string()),
});
}
}
}
let total = results.len();
Ok(Json(BatchImportResponse {
results,
total,
imported,
duplicates,
errors,
}))
}
pub async fn preview_directory(
State(state): State<AppState>,
Json(req): Json<serde_json::Value>,
) -> Result<Json<DirectoryPreviewResponse>, ApiError> {
let path_str = req.get("path").and_then(|v| v.as_str()).ok_or_else(|| {
pinakes_core::error::PinakesError::InvalidOperation("path required".into())
})?;
let recursive = req
.get("recursive")
.and_then(|v| v.as_bool())
.unwrap_or(true);
let dir = std::path::PathBuf::from(path_str);
if !dir.is_dir() {
return Err(pinakes_core::error::PinakesError::FileNotFound(dir).into());
}
// Validate the directory is under a configured root (if roots are configured)
let roots = state.storage.list_root_dirs().await?;
if !roots.is_empty() {
let canonical = dir.canonicalize().map_err(|_| {
pinakes_core::error::PinakesError::InvalidOperation("cannot resolve path".into())
})?;
let allowed = roots.iter().any(|root| canonical.starts_with(root));
if !allowed {
return Err(pinakes_core::error::PinakesError::InvalidOperation(
"path is not under a configured root directory".into(),
)
.into());
}
}
let files: Vec<DirectoryPreviewFile> = tokio::task::spawn_blocking(move || {
let mut result = Vec::new();
fn walk_dir(
dir: &std::path::Path,
recursive: bool,
result: &mut Vec<DirectoryPreviewFile>,
) {
let Ok(entries) = std::fs::read_dir(dir) else {
return;
};
for entry in entries.flatten() {
let path = entry.path();
// Skip hidden files/dirs
if path
.file_name()
.map(|n| n.to_string_lossy().starts_with('.'))
.unwrap_or(false)
{
continue;
}
if path.is_dir() {
if recursive {
walk_dir(&path, recursive, result);
}
} else if path.is_file()
&& let Some(mt) = pinakes_core::media_type::MediaType::from_path(&path)
{
let size = entry.metadata().ok().map(|m| m.len()).unwrap_or(0);
let file_name = path
.file_name()
.map(|n| n.to_string_lossy().to_string())
.unwrap_or_default();
let media_type = serde_json::to_value(mt)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_default();
result.push(DirectoryPreviewFile {
path: path.to_string_lossy().to_string(),
file_name,
media_type,
file_size: size,
});
}
}
}
walk_dir(&dir, recursive, &mut result);
result
})
.await
.map_err(|e| pinakes_core::error::PinakesError::Io(std::io::Error::other(e)))?;
let total_count = files.len();
let total_size = files.iter().map(|f| f.file_size).sum();
Ok(Json(DirectoryPreviewResponse {
files,
total_count,
total_size,
}))
}
pub async fn set_custom_field(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(req): Json<SetCustomFieldRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
if req.name.is_empty() || req.name.len() > 255 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"field name must be 1-255 characters".into(),
),
));
}
if req.value.len() > MAX_LONG_TEXT {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(format!(
"field value exceeds {} characters",
MAX_LONG_TEXT
)),
));
}
use pinakes_core::model::{CustomField, CustomFieldType};
let field_type = match req.field_type.as_str() {
"number" => CustomFieldType::Number,
"date" => CustomFieldType::Date,
"boolean" => CustomFieldType::Boolean,
_ => CustomFieldType::Text,
};
let field = CustomField {
field_type,
value: req.value,
};
state
.storage
.set_custom_field(MediaId(id), &req.name, &field)
.await?;
Ok(Json(serde_json::json!({"set": true})))
}
pub async fn delete_custom_field(
State(state): State<AppState>,
Path((id, name)): Path<(Uuid, String)>,
) -> Result<Json<serde_json::Value>, ApiError> {
state
.storage
.delete_custom_field(MediaId(id), &name)
.await?;
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn batch_tag(
State(state): State<AppState>,
Json(req): Json<BatchTagRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
match state
.storage
.batch_tag_media(&media_ids, &req.tag_ids)
.await
{
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn delete_all_media(
State(state): State<AppState>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
// Record audit entry before deletion
if let Err(e) = pinakes_core::audit::record_action(
&state.storage,
None,
pinakes_core::model::AuditAction::Deleted,
Some("delete all media".to_string()),
)
.await
{
tracing::warn!(error = %e, "failed to record audit entry");
}
match state.storage.delete_all_media().await {
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn batch_delete(
State(state): State<AppState>,
Json(req): Json<BatchDeleteRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
// Record audit entries BEFORE delete to avoid FK constraint violation.
// Use None for media_id since they'll be deleted; include ID in details.
for id in &media_ids {
if let Err(e) = pinakes_core::audit::record_action(
&state.storage,
None,
pinakes_core::model::AuditAction::Deleted,
Some(format!("batch delete: media_id={}", id.0)),
)
.await
{
tracing::warn!(error = %e, "failed to record audit entry");
}
}
match state.storage.batch_delete_media(&media_ids).await {
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn batch_add_to_collection(
State(state): State<AppState>,
Json(req): Json<BatchCollectionRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let mut processed = 0;
let mut errors = Vec::new();
for (i, media_id) in req.media_ids.iter().enumerate() {
match pinakes_core::collections::add_member(
&state.storage,
req.collection_id,
MediaId(*media_id),
i as i32,
)
.await
{
Ok(_) => processed += 1,
Err(e) => errors.push(format!("{media_id}: {e}")),
}
}
Ok(Json(BatchOperationResponse { processed, errors }))
}
pub async fn batch_update(
State(state): State<AppState>,
Json(req): Json<BatchUpdateRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
match state
.storage
.batch_update_media(
&media_ids,
req.title.as_deref(),
req.artist.as_deref(),
req.album.as_deref(),
req.genre.as_deref(),
req.year,
req.description.as_deref(),
)
.await
{
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn get_thumbnail(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<axum::response::Response, ApiError> {
use axum::body::Body;
use axum::http::header;
use tokio_util::io::ReaderStream;
let item = state.storage.get_media(MediaId(id)).await?;
let thumb_path = item.thumbnail_path.ok_or_else(|| {
ApiError(pinakes_core::error::PinakesError::NotFound(
"no thumbnail available".into(),
))
})?;
let file = tokio::fs::File::open(&thumb_path)
.await
.map_err(|_e| ApiError(pinakes_core::error::PinakesError::FileNotFound(thumb_path)))?;
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
axum::response::Response::builder()
.header(header::CONTENT_TYPE, "image/jpeg")
.header(header::CACHE_CONTROL, "public, max-age=86400")
.body(body)
.map_err(|e| {
ApiError(pinakes_core::error::PinakesError::InvalidOperation(
format!("failed to build response: {e}"),
))
})
}
pub async fn get_media_count(
State(state): State<AppState>,
) -> Result<Json<MediaCountResponse>, ApiError> {
let count = state.storage.count_media().await?;
Ok(Json(MediaCountResponse { count }))
}

View file

@ -0,0 +1,18 @@
pub mod audit;
pub mod auth;
pub mod collections;
pub mod config;
pub mod database;
pub mod duplicates;
pub mod export;
pub mod health;
pub mod integrity;
pub mod jobs;
pub mod media;
pub mod saved_searches;
pub mod scan;
pub mod scheduled_tasks;
pub mod search;
pub mod statistics;
pub mod tags;
pub mod webhooks;

View file

@ -0,0 +1,76 @@
use axum::Json;
use axum::extract::{Path, State};
use serde::{Deserialize, Serialize};
use crate::error::ApiError;
use crate::state::AppState;
#[derive(Debug, Deserialize)]
pub struct CreateSavedSearchRequest {
pub name: String,
pub query: String,
pub sort_order: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct SavedSearchResponse {
pub id: String,
pub name: String,
pub query: String,
pub sort_order: Option<String>,
pub created_at: chrono::DateTime<chrono::Utc>,
}
pub async fn create_saved_search(
State(state): State<AppState>,
Json(req): Json<CreateSavedSearchRequest>,
) -> Result<Json<SavedSearchResponse>, ApiError> {
let id = uuid::Uuid::now_v7();
state
.storage
.save_search(id, &req.name, &req.query, req.sort_order.as_deref())
.await
.map_err(ApiError)?;
Ok(Json(SavedSearchResponse {
id: id.to_string(),
name: req.name,
query: req.query,
sort_order: req.sort_order,
created_at: chrono::Utc::now(),
}))
}
pub async fn list_saved_searches(
State(state): State<AppState>,
) -> Result<Json<Vec<SavedSearchResponse>>, ApiError> {
let searches = state
.storage
.list_saved_searches()
.await
.map_err(ApiError)?;
Ok(Json(
searches
.into_iter()
.map(|s| SavedSearchResponse {
id: s.id.to_string(),
name: s.name,
query: s.query,
sort_order: s.sort_order,
created_at: s.created_at,
})
.collect(),
))
}
pub async fn delete_saved_search(
State(state): State<AppState>,
Path(id): Path<uuid::Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
state
.storage
.delete_saved_search(id)
.await
.map_err(ApiError)?;
Ok(Json(serde_json::json!({ "deleted": true })))
}

View file

@ -0,0 +1,30 @@
use axum::Json;
use axum::extract::State;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
/// Trigger a scan as a background job. Returns the job ID immediately.
pub async fn trigger_scan(
State(state): State<AppState>,
Json(req): Json<ScanRequest>,
) -> Result<Json<ScanJobResponse>, ApiError> {
let kind = pinakes_core::jobs::JobKind::Scan { path: req.path };
let job_id = state.job_queue.submit(kind).await;
Ok(Json(ScanJobResponse {
job_id: job_id.to_string(),
}))
}
pub async fn scan_status(State(state): State<AppState>) -> Json<ScanStatusResponse> {
let snapshot = state.scan_progress.snapshot();
let error_count = snapshot.errors.len();
Json(ScanStatusResponse {
scanning: snapshot.scanning,
files_found: snapshot.files_found,
files_processed: snapshot.files_processed,
error_count,
errors: snapshot.errors,
})
}

View file

@ -0,0 +1,55 @@
use axum::Json;
use axum::extract::{Path, State};
use crate::dto::ScheduledTaskResponse;
use crate::error::ApiError;
use crate::state::AppState;
pub async fn list_scheduled_tasks(
State(state): State<AppState>,
) -> Result<Json<Vec<ScheduledTaskResponse>>, ApiError> {
let tasks = state.scheduler.list_tasks().await;
let responses: Vec<ScheduledTaskResponse> = tasks
.into_iter()
.map(|t| ScheduledTaskResponse {
id: t.id,
name: t.name,
schedule: t.schedule.display_string(),
enabled: t.enabled,
last_run: t.last_run.map(|dt| dt.to_rfc3339()),
next_run: t.next_run.map(|dt| dt.to_rfc3339()),
last_status: t.last_status,
})
.collect();
Ok(Json(responses))
}
pub async fn toggle_scheduled_task(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<Json<serde_json::Value>, ApiError> {
match state.scheduler.toggle_task(&id).await {
Some(enabled) => Ok(Json(serde_json::json!({
"id": id,
"enabled": enabled,
}))),
None => Err(ApiError(pinakes_core::error::PinakesError::NotFound(
format!("scheduled task not found: {id}"),
))),
}
}
pub async fn run_scheduled_task_now(
State(state): State<AppState>,
Path(id): Path<String>,
) -> Result<Json<serde_json::Value>, ApiError> {
match state.scheduler.run_now(&id).await {
Some(job_id) => Ok(Json(serde_json::json!({
"id": id,
"job_id": job_id,
}))),
None => Err(ApiError(pinakes_core::error::PinakesError::NotFound(
format!("scheduled task not found: {id}"),
))),
}
}

View file

@ -0,0 +1,87 @@
use axum::Json;
use axum::extract::{Query, State};
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::Pagination;
use pinakes_core::search::{SearchRequest, SortOrder, parse_search_query};
fn resolve_sort(sort: Option<&str>) -> SortOrder {
match sort {
Some("date_asc") => SortOrder::DateAsc,
Some("date_desc") => SortOrder::DateDesc,
Some("name_asc") => SortOrder::NameAsc,
Some("name_desc") => SortOrder::NameDesc,
Some("size_asc") => SortOrder::SizeAsc,
Some("size_desc") => SortOrder::SizeDesc,
_ => SortOrder::Relevance,
}
}
pub async fn search(
State(state): State<AppState>,
Query(params): Query<SearchParams>,
) -> Result<Json<SearchResponse>, ApiError> {
if params.q.len() > 2048 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"search query exceeds maximum length of 2048 characters".into(),
),
));
}
let query = parse_search_query(&params.q)?;
let sort = resolve_sort(params.sort.as_deref());
let request = SearchRequest {
query,
sort,
pagination: Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
None,
),
};
let results = state.storage.search(&request).await?;
Ok(Json(SearchResponse {
items: results.items.into_iter().map(MediaResponse::from).collect(),
total_count: results.total_count,
}))
}
pub async fn search_post(
State(state): State<AppState>,
Json(body): Json<SearchRequestBody>,
) -> Result<Json<SearchResponse>, ApiError> {
if body.q.len() > 2048 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"search query exceeds maximum length of 2048 characters".into(),
),
));
}
let query = parse_search_query(&body.q)?;
let sort = resolve_sort(body.sort.as_deref());
let request = SearchRequest {
query,
sort,
pagination: Pagination::new(
body.offset.unwrap_or(0),
body.limit.unwrap_or(50).min(1000),
None,
),
};
let results = state.storage.search(&request).await?;
Ok(Json(SearchResponse {
items: results.items.into_iter().map(MediaResponse::from).collect(),
total_count: results.total_count,
}))
}

View file

@ -0,0 +1,13 @@
use axum::Json;
use axum::extract::State;
use crate::dto::LibraryStatisticsResponse;
use crate::error::ApiError;
use crate::state::AppState;
pub async fn library_statistics(
State(state): State<AppState>,
) -> Result<Json<LibraryStatisticsResponse>, ApiError> {
let stats = state.storage.library_statistics().await?;
Ok(Json(LibraryStatisticsResponse::from(stats)))
}

View file

@ -0,0 +1,70 @@
use axum::Json;
use axum::extract::{Path, State};
use uuid::Uuid;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::MediaId;
pub async fn create_tag(
State(state): State<AppState>,
Json(req): Json<CreateTagRequest>,
) -> Result<Json<TagResponse>, ApiError> {
if req.name.is_empty() || req.name.len() > 255 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"tag name must be 1-255 characters".into(),
),
));
}
let tag = pinakes_core::tags::create_tag(&state.storage, &req.name, req.parent_id).await?;
Ok(Json(TagResponse::from(tag)))
}
pub async fn list_tags(State(state): State<AppState>) -> Result<Json<Vec<TagResponse>>, ApiError> {
let tags = state.storage.list_tags().await?;
Ok(Json(tags.into_iter().map(TagResponse::from).collect()))
}
pub async fn get_tag(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<TagResponse>, ApiError> {
let tag = state.storage.get_tag(id).await?;
Ok(Json(TagResponse::from(tag)))
}
pub async fn delete_tag(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
state.storage.delete_tag(id).await?;
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn tag_media(
State(state): State<AppState>,
Path(media_id): Path<Uuid>,
Json(req): Json<TagMediaRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
pinakes_core::tags::tag_media(&state.storage, MediaId(media_id), req.tag_id).await?;
Ok(Json(serde_json::json!({"tagged": true})))
}
pub async fn untag_media(
State(state): State<AppState>,
Path((media_id, tag_id)): Path<(Uuid, Uuid)>,
) -> Result<Json<serde_json::Value>, ApiError> {
pinakes_core::tags::untag_media(&state.storage, MediaId(media_id), tag_id).await?;
Ok(Json(serde_json::json!({"untagged": true})))
}
pub async fn get_media_tags(
State(state): State<AppState>,
Path(media_id): Path<Uuid>,
) -> Result<Json<Vec<TagResponse>>, ApiError> {
let tags = state.storage.get_media_tags(MediaId(media_id)).await?;
Ok(Json(tags.into_iter().map(TagResponse::from).collect()))
}

View file

@ -0,0 +1,40 @@
use axum::Json;
use axum::extract::State;
use serde::Serialize;
use crate::error::ApiError;
use crate::state::AppState;
#[derive(Debug, Serialize)]
pub struct WebhookInfo {
pub url: String,
pub events: Vec<String>,
}
pub async fn list_webhooks(
State(state): State<AppState>,
) -> Result<Json<Vec<WebhookInfo>>, ApiError> {
let config = state.config.read().await;
let hooks: Vec<WebhookInfo> = config
.webhooks
.iter()
.map(|h| WebhookInfo {
url: h.url.clone(),
events: h.events.clone(),
})
.collect();
Ok(Json(hooks))
}
pub async fn test_webhook(
State(state): State<AppState>,
) -> Result<Json<serde_json::Value>, ApiError> {
let config = state.config.read().await;
let count = config.webhooks.len();
// Emit a test event to all configured webhooks
// In production, the event bus would handle delivery
Ok(Json(serde_json::json!({
"webhooks_configured": count,
"test_sent": true
})))
}