pinakes/crates/pinakes-server/src/routes/media.rs
NotAShelf 59041e9620
pinakes-server: add more media management endpoints
Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Id3ce15a21618efbf079b277a82bf530f6a6a6964
2026-02-05 14:36:10 +03:00

1112 lines
35 KiB
Rust

use axum::Json;
use axum::extract::{Path, Query, State};
use uuid::Uuid;
use crate::dto::*;
use crate::error::ApiError;
use crate::state::AppState;
use pinakes_core::model::{MediaId, Pagination};
use pinakes_core::storage::DynStorageBackend;
/// Apply tags and add to collection after a successful import.
/// Shared logic used by import_with_options, batch_import, and import_directory_endpoint.
async fn apply_import_post_processing(
storage: &DynStorageBackend,
media_id: MediaId,
tag_ids: Option<&[Uuid]>,
new_tags: Option<&[String]>,
collection_id: Option<Uuid>,
) {
if let Some(tag_ids) = tag_ids {
for tid in tag_ids {
if let Err(e) = pinakes_core::tags::tag_media(storage, media_id, *tid).await {
tracing::warn!(error = %e, "failed to apply tag during import");
}
}
}
if let Some(new_tags) = new_tags {
for name in new_tags {
match pinakes_core::tags::create_tag(storage, name, None).await {
Ok(tag) => {
if let Err(e) = pinakes_core::tags::tag_media(storage, media_id, tag.id).await {
tracing::warn!(error = %e, "failed to apply new tag during import");
}
}
Err(e) => {
tracing::warn!(tag_name = %name, error = %e, "failed to create tag during import");
}
}
}
}
if let Some(col_id) = collection_id
&& let Err(e) = pinakes_core::collections::add_member(storage, col_id, media_id, 0).await
{
tracing::warn!(error = %e, "failed to add to collection during import");
}
}
pub async fn import_media(
State(state): State<AppState>,
Json(req): Json<ImportRequest>,
) -> Result<Json<ImportResponse>, ApiError> {
let result = pinakes_core::import::import_file(&state.storage, &req.path).await?;
Ok(Json(ImportResponse {
media_id: result.media_id.0.to_string(),
was_duplicate: result.was_duplicate,
}))
}
pub async fn list_media(
State(state): State<AppState>,
Query(params): Query<PaginationParams>,
) -> Result<Json<Vec<MediaResponse>>, ApiError> {
let pagination = Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
params.sort,
);
let items = state.storage.list_media(&pagination).await?;
Ok(Json(items.into_iter().map(MediaResponse::from).collect()))
}
pub async fn get_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<MediaResponse>, ApiError> {
let item = state.storage.get_media(MediaId(id)).await?;
Ok(Json(MediaResponse::from(item)))
}
/// Maximum length for short text fields (title, artist, album, genre).
const MAX_SHORT_TEXT: usize = 500;
/// Maximum length for long text fields (description).
const MAX_LONG_TEXT: usize = 10_000;
fn validate_optional_text(field: &Option<String>, name: &str, max: usize) -> Result<(), ApiError> {
if let Some(v) = field
&& v.len() > max
{
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(format!(
"{name} exceeds {max} characters"
)),
));
}
Ok(())
}
pub async fn update_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(req): Json<UpdateMediaRequest>,
) -> Result<Json<MediaResponse>, ApiError> {
validate_optional_text(&req.title, "title", MAX_SHORT_TEXT)?;
validate_optional_text(&req.artist, "artist", MAX_SHORT_TEXT)?;
validate_optional_text(&req.album, "album", MAX_SHORT_TEXT)?;
validate_optional_text(&req.genre, "genre", MAX_SHORT_TEXT)?;
validate_optional_text(&req.description, "description", MAX_LONG_TEXT)?;
let mut item = state.storage.get_media(MediaId(id)).await?;
if let Some(title) = req.title {
item.title = Some(title);
}
if let Some(artist) = req.artist {
item.artist = Some(artist);
}
if let Some(album) = req.album {
item.album = Some(album);
}
if let Some(genre) = req.genre {
item.genre = Some(genre);
}
if let Some(year) = req.year {
item.year = Some(year);
}
if let Some(description) = req.description {
item.description = Some(description);
}
item.updated_at = chrono::Utc::now();
state.storage.update_media(&item).await?;
pinakes_core::audit::record_action(
&state.storage,
Some(item.id),
pinakes_core::model::AuditAction::Updated,
None,
)
.await?;
Ok(Json(MediaResponse::from(item)))
}
pub async fn delete_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
let media_id = MediaId(id);
// Fetch item first to get thumbnail path for cleanup
let item = state.storage.get_media(media_id).await?;
// Record audit BEFORE delete to avoid FK constraint violation
pinakes_core::audit::record_action(
&state.storage,
Some(media_id),
pinakes_core::model::AuditAction::Deleted,
None,
)
.await?;
state.storage.delete_media(media_id).await?;
// Clean up thumbnail file if it exists
if let Some(ref thumb_path) = item.thumbnail_path
&& let Err(e) = tokio::fs::remove_file(thumb_path).await
&& e.kind() != std::io::ErrorKind::NotFound
{
tracing::warn!(path = %thumb_path.display(), error = %e, "failed to remove thumbnail");
}
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn open_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
let item = state.storage.get_media(MediaId(id)).await?;
let opener = pinakes_core::opener::default_opener();
opener.open(&item.path)?;
pinakes_core::audit::record_action(
&state.storage,
Some(item.id),
pinakes_core::model::AuditAction::Opened,
None,
)
.await?;
Ok(Json(serde_json::json!({"opened": true})))
}
pub async fn stream_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
headers: axum::http::HeaderMap,
) -> Result<axum::response::Response, ApiError> {
use axum::body::Body;
use axum::http::{StatusCode, header};
use tokio::io::{AsyncReadExt, AsyncSeekExt};
use tokio_util::io::ReaderStream;
let item = state.storage.get_media(MediaId(id)).await?;
let file = tokio::fs::File::open(&item.path).await.map_err(|_e| {
ApiError(pinakes_core::error::PinakesError::FileNotFound(
item.path.clone(),
))
})?;
let metadata = file
.metadata()
.await
.map_err(|e| ApiError(pinakes_core::error::PinakesError::Io(e)))?;
let total_size = metadata.len();
let content_type = item.media_type.mime_type();
// Parse Range header
if let Some(range_header) = headers.get(header::RANGE)
&& let Ok(range_str) = range_header.to_str()
&& let Some(range) = parse_range(range_str, total_size)
{
let (start, end) = range;
let content_length = end - start + 1;
let mut file = file;
file.seek(std::io::SeekFrom::Start(start))
.await
.map_err(|e| ApiError(pinakes_core::error::PinakesError::Io(e)))?;
let limited = file.take(content_length);
let stream = ReaderStream::new(limited);
let body = Body::from_stream(stream);
return axum::response::Response::builder()
.status(StatusCode::PARTIAL_CONTENT)
.header(header::CONTENT_TYPE, content_type)
.header(header::CONTENT_LENGTH, content_length)
.header(header::ACCEPT_RANGES, "bytes")
.header(
header::CONTENT_RANGE,
format!("bytes {start}-{end}/{total_size}"),
)
.header(
header::CONTENT_DISPOSITION,
format!("inline; filename=\"{}\"", item.file_name),
)
.body(body)
.map_err(|e| {
ApiError(pinakes_core::error::PinakesError::InvalidOperation(
format!("failed to build response: {e}"),
))
});
}
// Full response (no Range header)
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
axum::response::Response::builder()
.header(header::CONTENT_TYPE, content_type)
.header(header::CONTENT_LENGTH, total_size)
.header(header::ACCEPT_RANGES, "bytes")
.header(
header::CONTENT_DISPOSITION,
format!("inline; filename=\"{}\"", item.file_name),
)
.body(body)
.map_err(|e| {
ApiError(pinakes_core::error::PinakesError::InvalidOperation(
format!("failed to build response: {e}"),
))
})
}
/// Parse a `Range: bytes=START-END` header value.
/// Returns `Some((start, end))` inclusive, or `None` if malformed.
fn parse_range(header: &str, total_size: u64) -> Option<(u64, u64)> {
let bytes_prefix = header.strip_prefix("bytes=")?;
let (start_str, end_str) = bytes_prefix.split_once('-')?;
if start_str.is_empty() {
// Suffix range: bytes=-500 means last 500 bytes
let suffix_len: u64 = end_str.parse().ok()?;
let start = total_size.saturating_sub(suffix_len);
Some((start, total_size - 1))
} else {
let start: u64 = start_str.parse().ok()?;
let end = if end_str.is_empty() {
total_size - 1
} else {
end_str.parse::<u64>().ok()?.min(total_size - 1)
};
if start > end || start >= total_size {
return None;
}
Some((start, end))
}
}
pub async fn import_with_options(
State(state): State<AppState>,
Json(req): Json<ImportWithOptionsRequest>,
) -> Result<Json<ImportResponse>, ApiError> {
let result = pinakes_core::import::import_file(&state.storage, &req.path).await?;
if !result.was_duplicate {
apply_import_post_processing(
&state.storage,
result.media_id,
req.tag_ids.as_deref(),
req.new_tags.as_deref(),
req.collection_id,
)
.await;
}
Ok(Json(ImportResponse {
media_id: result.media_id.0.to_string(),
was_duplicate: result.was_duplicate,
}))
}
pub async fn batch_import(
State(state): State<AppState>,
Json(req): Json<BatchImportRequest>,
) -> Result<Json<BatchImportResponse>, ApiError> {
if req.paths.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let mut results = Vec::new();
let mut imported = 0usize;
let mut duplicates = 0usize;
let mut errors = 0usize;
for path in &req.paths {
match pinakes_core::import::import_file(&state.storage, path).await {
Ok(result) => {
if result.was_duplicate {
duplicates += 1;
} else {
imported += 1;
apply_import_post_processing(
&state.storage,
result.media_id,
req.tag_ids.as_deref(),
req.new_tags.as_deref(),
req.collection_id,
)
.await;
}
results.push(BatchImportItemResult {
path: path.to_string_lossy().to_string(),
media_id: Some(result.media_id.0.to_string()),
was_duplicate: result.was_duplicate,
error: None,
});
}
Err(e) => {
errors += 1;
results.push(BatchImportItemResult {
path: path.to_string_lossy().to_string(),
media_id: None,
was_duplicate: false,
error: Some(e.to_string()),
});
}
}
}
let total = results.len();
Ok(Json(BatchImportResponse {
results,
total,
imported,
duplicates,
errors,
}))
}
pub async fn import_directory_endpoint(
State(state): State<AppState>,
Json(req): Json<DirectoryImportRequest>,
) -> Result<Json<BatchImportResponse>, ApiError> {
let config = state.config.read().await;
let ignore_patterns = config.scanning.ignore_patterns.clone();
let concurrency = config.scanning.import_concurrency;
drop(config);
let import_results = pinakes_core::import::import_directory_with_concurrency(
&state.storage,
&req.path,
&ignore_patterns,
concurrency,
)
.await?;
let mut results = Vec::new();
let mut imported = 0usize;
let mut duplicates = 0usize;
let mut errors = 0usize;
for r in import_results {
match r {
Ok(result) => {
if result.was_duplicate {
duplicates += 1;
} else {
imported += 1;
apply_import_post_processing(
&state.storage,
result.media_id,
req.tag_ids.as_deref(),
req.new_tags.as_deref(),
req.collection_id,
)
.await;
}
results.push(BatchImportItemResult {
path: result.path.to_string_lossy().to_string(),
media_id: Some(result.media_id.0.to_string()),
was_duplicate: result.was_duplicate,
error: None,
});
}
Err(e) => {
errors += 1;
results.push(BatchImportItemResult {
path: String::new(),
media_id: None,
was_duplicate: false,
error: Some(e.to_string()),
});
}
}
}
let total = results.len();
Ok(Json(BatchImportResponse {
results,
total,
imported,
duplicates,
errors,
}))
}
pub async fn preview_directory(
State(state): State<AppState>,
Json(req): Json<serde_json::Value>,
) -> Result<Json<DirectoryPreviewResponse>, ApiError> {
let path_str = req.get("path").and_then(|v| v.as_str()).ok_or_else(|| {
pinakes_core::error::PinakesError::InvalidOperation("path required".into())
})?;
let recursive = req
.get("recursive")
.and_then(|v| v.as_bool())
.unwrap_or(true);
let dir = std::path::PathBuf::from(path_str);
if !dir.is_dir() {
return Err(pinakes_core::error::PinakesError::FileNotFound(dir).into());
}
// Validate the directory is under a configured root (if roots are configured)
let roots = state.storage.list_root_dirs().await?;
if !roots.is_empty() {
let canonical = dir.canonicalize().map_err(|_| {
pinakes_core::error::PinakesError::InvalidOperation("cannot resolve path".into())
})?;
let allowed = roots.iter().any(|root| canonical.starts_with(root));
if !allowed {
return Err(pinakes_core::error::PinakesError::InvalidOperation(
"path is not under a configured root directory".into(),
)
.into());
}
}
let files: Vec<DirectoryPreviewFile> = tokio::task::spawn_blocking(move || {
let mut result = Vec::new();
fn walk_dir(
dir: &std::path::Path,
recursive: bool,
result: &mut Vec<DirectoryPreviewFile>,
) {
let Ok(entries) = std::fs::read_dir(dir) else {
return;
};
for entry in entries.flatten() {
let path = entry.path();
// Skip hidden files/dirs
if path
.file_name()
.map(|n| n.to_string_lossy().starts_with('.'))
.unwrap_or(false)
{
continue;
}
if path.is_dir() {
if recursive {
walk_dir(&path, recursive, result);
}
} else if path.is_file()
&& let Some(mt) = pinakes_core::media_type::MediaType::from_path(&path)
{
let size = entry.metadata().ok().map(|m| m.len()).unwrap_or(0);
let file_name = path
.file_name()
.map(|n| n.to_string_lossy().to_string())
.unwrap_or_default();
let media_type = serde_json::to_value(mt)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_default();
result.push(DirectoryPreviewFile {
path: path.to_string_lossy().to_string(),
file_name,
media_type,
file_size: size,
});
}
}
}
walk_dir(&dir, recursive, &mut result);
result
})
.await
.map_err(|e| pinakes_core::error::PinakesError::Io(std::io::Error::other(e)))?;
let total_count = files.len();
let total_size = files.iter().map(|f| f.file_size).sum();
Ok(Json(DirectoryPreviewResponse {
files,
total_count,
total_size,
}))
}
pub async fn set_custom_field(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(req): Json<SetCustomFieldRequest>,
) -> Result<Json<serde_json::Value>, ApiError> {
if req.name.is_empty() || req.name.len() > 255 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"field name must be 1-255 characters".into(),
),
));
}
if req.value.len() > MAX_LONG_TEXT {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(format!(
"field value exceeds {} characters",
MAX_LONG_TEXT
)),
));
}
use pinakes_core::model::{CustomField, CustomFieldType};
let field_type = match req.field_type.as_str() {
"number" => CustomFieldType::Number,
"date" => CustomFieldType::Date,
"boolean" => CustomFieldType::Boolean,
_ => CustomFieldType::Text,
};
let field = CustomField {
field_type,
value: req.value,
};
state
.storage
.set_custom_field(MediaId(id), &req.name, &field)
.await?;
Ok(Json(serde_json::json!({"set": true})))
}
pub async fn delete_custom_field(
State(state): State<AppState>,
Path((id, name)): Path<(Uuid, String)>,
) -> Result<Json<serde_json::Value>, ApiError> {
state
.storage
.delete_custom_field(MediaId(id), &name)
.await?;
Ok(Json(serde_json::json!({"deleted": true})))
}
pub async fn batch_tag(
State(state): State<AppState>,
Json(req): Json<BatchTagRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
match state
.storage
.batch_tag_media(&media_ids, &req.tag_ids)
.await
{
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn delete_all_media(
State(state): State<AppState>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
// Record audit entry before deletion
if let Err(e) = pinakes_core::audit::record_action(
&state.storage,
None,
pinakes_core::model::AuditAction::Deleted,
Some("delete all media".to_string()),
)
.await
{
tracing::warn!(error = %e, "failed to record audit entry");
}
match state.storage.delete_all_media().await {
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn batch_delete(
State(state): State<AppState>,
Json(req): Json<BatchDeleteRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
// Record audit entries BEFORE delete to avoid FK constraint violation.
// Use None for media_id since they'll be deleted; include ID in details.
for id in &media_ids {
if let Err(e) = pinakes_core::audit::record_action(
&state.storage,
None,
pinakes_core::model::AuditAction::Deleted,
Some(format!("batch delete: media_id={}", id.0)),
)
.await
{
tracing::warn!(error = %e, "failed to record audit entry");
}
}
match state.storage.batch_delete_media(&media_ids).await {
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn batch_add_to_collection(
State(state): State<AppState>,
Json(req): Json<BatchCollectionRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let mut processed = 0;
let mut errors = Vec::new();
for (i, media_id) in req.media_ids.iter().enumerate() {
match pinakes_core::collections::add_member(
&state.storage,
req.collection_id,
MediaId(*media_id),
i as i32,
)
.await
{
Ok(_) => processed += 1,
Err(e) => errors.push(format!("{media_id}: {e}")),
}
}
Ok(Json(BatchOperationResponse { processed, errors }))
}
pub async fn batch_update(
State(state): State<AppState>,
Json(req): Json<BatchUpdateRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
match state
.storage
.batch_update_media(
&media_ids,
req.title.as_deref(),
req.artist.as_deref(),
req.album.as_deref(),
req.genre.as_deref(),
req.year,
req.description.as_deref(),
)
.await
{
Ok(count) => Ok(Json(BatchOperationResponse {
processed: count as usize,
errors: Vec::new(),
})),
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
pub async fn get_thumbnail(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<axum::response::Response, ApiError> {
use axum::body::Body;
use axum::http::header;
use tokio_util::io::ReaderStream;
let item = state.storage.get_media(MediaId(id)).await?;
let thumb_path = item.thumbnail_path.ok_or_else(|| {
ApiError(pinakes_core::error::PinakesError::NotFound(
"no thumbnail available".into(),
))
})?;
let file = tokio::fs::File::open(&thumb_path)
.await
.map_err(|_e| ApiError(pinakes_core::error::PinakesError::FileNotFound(thumb_path)))?;
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
axum::response::Response::builder()
.header(header::CONTENT_TYPE, "image/jpeg")
.header(header::CACHE_CONTROL, "public, max-age=86400")
.body(body)
.map_err(|e| {
ApiError(pinakes_core::error::PinakesError::InvalidOperation(
format!("failed to build response: {e}"),
))
})
}
pub async fn get_media_count(
State(state): State<AppState>,
) -> Result<Json<MediaCountResponse>, ApiError> {
let count = state.storage.count_media().await?;
Ok(Json(MediaCountResponse { count }))
}
// ===== File Management Endpoints =====
pub async fn rename_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(req): Json<RenameMediaRequest>,
) -> Result<Json<MediaResponse>, ApiError> {
let media_id = MediaId(id);
// Perform the rename
let old_path = state.storage.rename_media(media_id, &req.new_name).await?;
// Record in sync log
let item = state.storage.get_media(media_id).await?;
let change = pinakes_core::sync::SyncLogEntry {
id: uuid::Uuid::now_v7(),
sequence: 0,
change_type: pinakes_core::sync::SyncChangeType::Moved,
media_id: Some(media_id),
path: item.path.to_string_lossy().to_string(),
content_hash: Some(item.content_hash.clone()),
file_size: Some(item.file_size),
metadata_json: Some(serde_json::json!({ "old_path": old_path }).to_string()),
changed_by_device: None,
timestamp: chrono::Utc::now(),
};
let _ = state.storage.record_sync_change(&change).await;
// Record audit
pinakes_core::audit::record_action(
&state.storage,
Some(media_id),
pinakes_core::model::AuditAction::Updated,
Some(format!("renamed from {} to {}", old_path, req.new_name)),
)
.await?;
Ok(Json(MediaResponse::from(item)))
}
pub async fn move_media_endpoint(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Json(req): Json<MoveMediaRequest>,
) -> Result<Json<MediaResponse>, ApiError> {
let media_id = MediaId(id);
// Perform the move
let old_path = state.storage.move_media(media_id, &req.destination).await?;
// Record in sync log
let item = state.storage.get_media(media_id).await?;
let change = pinakes_core::sync::SyncLogEntry {
id: uuid::Uuid::now_v7(),
sequence: 0,
change_type: pinakes_core::sync::SyncChangeType::Moved,
media_id: Some(media_id),
path: item.path.to_string_lossy().to_string(),
content_hash: Some(item.content_hash.clone()),
file_size: Some(item.file_size),
metadata_json: Some(serde_json::json!({ "old_path": old_path }).to_string()),
changed_by_device: None,
timestamp: chrono::Utc::now(),
};
let _ = state.storage.record_sync_change(&change).await;
// Record audit
pinakes_core::audit::record_action(
&state.storage,
Some(media_id),
pinakes_core::model::AuditAction::Updated,
Some(format!(
"moved from {} to {}",
old_path,
req.destination.display()
)),
)
.await?;
Ok(Json(MediaResponse::from(item)))
}
pub async fn batch_move_media(
State(state): State<AppState>,
Json(req): Json<BatchMoveRequest>,
) -> Result<Json<BatchOperationResponse>, ApiError> {
if req.media_ids.len() > 10_000 {
return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation(
"batch size exceeds limit of 10000".into(),
),
));
}
let media_ids: Vec<MediaId> = req.media_ids.iter().map(|id| MediaId(*id)).collect();
match state
.storage
.batch_move_media(&media_ids, &req.destination)
.await
{
Ok(results) => {
// Record sync changes for each moved item
for (media_id, old_path) in &results {
if let Ok(item) = state.storage.get_media(*media_id).await {
let change = pinakes_core::sync::SyncLogEntry {
id: uuid::Uuid::now_v7(),
sequence: 0,
change_type: pinakes_core::sync::SyncChangeType::Moved,
media_id: Some(*media_id),
path: item.path.to_string_lossy().to_string(),
content_hash: Some(item.content_hash.clone()),
file_size: Some(item.file_size),
metadata_json: Some(
serde_json::json!({ "old_path": old_path }).to_string(),
),
changed_by_device: None,
timestamp: chrono::Utc::now(),
};
let _ = state.storage.record_sync_change(&change).await;
}
}
Ok(Json(BatchOperationResponse {
processed: results.len(),
errors: Vec::new(),
}))
}
Err(e) => Ok(Json(BatchOperationResponse {
processed: 0,
errors: vec![e.to_string()],
})),
}
}
// ===== Trash Endpoints =====
pub async fn soft_delete_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<serde_json::Value>, ApiError> {
let media_id = MediaId(id);
// Get item info before soft delete
let item = state.storage.get_media(media_id).await?;
// Perform soft delete
state.storage.soft_delete_media(media_id).await?;
// Record in sync log
let change = pinakes_core::sync::SyncLogEntry {
id: uuid::Uuid::now_v7(),
sequence: 0,
change_type: pinakes_core::sync::SyncChangeType::Deleted,
media_id: Some(media_id),
path: item.path.to_string_lossy().to_string(),
content_hash: Some(item.content_hash.clone()),
file_size: Some(item.file_size),
metadata_json: None,
changed_by_device: None,
timestamp: chrono::Utc::now(),
};
let _ = state.storage.record_sync_change(&change).await;
// Record audit
pinakes_core::audit::record_action(
&state.storage,
Some(media_id),
pinakes_core::model::AuditAction::Deleted,
Some("moved to trash".to_string()),
)
.await?;
Ok(Json(serde_json::json!({"deleted": true, "trashed": true})))
}
pub async fn restore_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> Result<Json<MediaResponse>, ApiError> {
let media_id = MediaId(id);
// Perform restore
state.storage.restore_media(media_id).await?;
// Get updated item
let item = state.storage.get_media(media_id).await?;
// Record in sync log
let change = pinakes_core::sync::SyncLogEntry {
id: uuid::Uuid::now_v7(),
sequence: 0,
change_type: pinakes_core::sync::SyncChangeType::Created,
media_id: Some(media_id),
path: item.path.to_string_lossy().to_string(),
content_hash: Some(item.content_hash.clone()),
file_size: Some(item.file_size),
metadata_json: None,
changed_by_device: None,
timestamp: chrono::Utc::now(),
};
let _ = state.storage.record_sync_change(&change).await;
// Record audit
pinakes_core::audit::record_action(
&state.storage,
Some(media_id),
pinakes_core::model::AuditAction::Updated,
Some("restored from trash".to_string()),
)
.await?;
Ok(Json(MediaResponse::from(item)))
}
pub async fn list_trash(
State(state): State<AppState>,
Query(params): Query<PaginationParams>,
) -> Result<Json<TrashResponse>, ApiError> {
let pagination = Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
params.sort,
);
let items = state.storage.list_trash(&pagination).await?;
let count = state.storage.count_trash().await?;
Ok(Json(TrashResponse {
items: items.into_iter().map(MediaResponse::from).collect(),
total_count: count,
}))
}
pub async fn trash_info(
State(state): State<AppState>,
) -> Result<Json<TrashInfoResponse>, ApiError> {
let count = state.storage.count_trash().await?;
Ok(Json(TrashInfoResponse { count }))
}
pub async fn empty_trash(
State(state): State<AppState>,
) -> Result<Json<EmptyTrashResponse>, ApiError> {
// Record audit before emptying
pinakes_core::audit::record_action(
&state.storage,
None,
pinakes_core::model::AuditAction::Deleted,
Some("emptied trash".to_string()),
)
.await?;
let deleted_count = state.storage.empty_trash().await?;
Ok(Json(EmptyTrashResponse { deleted_count }))
}
pub async fn permanent_delete_media(
State(state): State<AppState>,
Path(id): Path<Uuid>,
Query(params): Query<std::collections::HashMap<String, String>>,
) -> Result<Json<serde_json::Value>, ApiError> {
let media_id = MediaId(id);
let permanent = params
.get("permanent")
.map(|v| v == "true")
.unwrap_or(false);
if permanent {
// Get item info before delete
let item = state.storage.get_media(media_id).await?;
// Record audit BEFORE delete
pinakes_core::audit::record_action(
&state.storage,
Some(media_id),
pinakes_core::model::AuditAction::Deleted,
Some("permanently deleted".to_string()),
)
.await?;
// Perform hard delete
state.storage.delete_media(media_id).await?;
// Record in sync log
let change = pinakes_core::sync::SyncLogEntry {
id: uuid::Uuid::now_v7(),
sequence: 0,
change_type: pinakes_core::sync::SyncChangeType::Deleted,
media_id: Some(media_id),
path: item.path.to_string_lossy().to_string(),
content_hash: Some(item.content_hash.clone()),
file_size: Some(item.file_size),
metadata_json: Some(serde_json::json!({"permanent": true}).to_string()),
changed_by_device: None,
timestamp: chrono::Utc::now(),
};
let _ = state.storage.record_sync_change(&change).await;
// Clean up thumbnail
if let Some(ref thumb_path) = item.thumbnail_path
&& let Err(e) = tokio::fs::remove_file(thumb_path).await
&& e.kind() != std::io::ErrorKind::NotFound
{
tracing::warn!(path = %thumb_path.display(), error = %e, "failed to remove thumbnail");
}
Ok(Json(
serde_json::json!({"deleted": true, "permanent": true}),
))
} else {
// Soft delete (move to trash)
soft_delete_media(State(state), Path(id)).await
}
}