pinakes-server: validate rename/move destinations; cap import tag arrays; consolidate batch_delete audit entry

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: I02b585a9fb5fc3a2b5fa40f9aac3a7b66a6a6964
This commit is contained in:
raf 2026-03-12 20:48:29 +03:00
commit b86ff5f6ab
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF

View file

@ -2,10 +2,7 @@ use axum::{
Json, Json,
extract::{Path, Query, State}, extract::{Path, Query, State},
}; };
use pinakes_core::{ use pinakes_core::{model::MediaId, storage::DynStorageBackend};
model::{MediaId, Pagination},
storage::DynStorageBackend,
};
use uuid::Uuid; use uuid::Uuid;
use crate::{ use crate::{
@ -40,6 +37,24 @@ use crate::{
state::AppState, state::AppState,
}; };
/// Validates that a destination path is absolute and within a configured root.
fn validate_destination_path(
destination: &std::path::Path,
roots: &[std::path::PathBuf],
) -> Result<(), ApiError> {
if !destination.is_absolute() {
return Err(ApiError::bad_request(
"destination must be an absolute path",
));
}
if !roots.iter().any(|root| destination.starts_with(root)) {
return Err(ApiError::bad_request(
"destination must be within a configured library root",
));
}
Ok(())
}
/// Apply tags and add to collection after a successful import. /// Apply tags and add to collection after a successful import.
/// Shared logic used by `import_with_options`, `batch_import`, and /// Shared logic used by `import_with_options`, `batch_import`, and
/// `import_directory_endpoint`. /// `import_directory_endpoint`.
@ -114,11 +129,7 @@ pub async fn list_media(
State(state): State<AppState>, State(state): State<AppState>,
Query(params): Query<PaginationParams>, Query(params): Query<PaginationParams>,
) -> Result<Json<Vec<MediaResponse>>, ApiError> { ) -> Result<Json<Vec<MediaResponse>>, ApiError> {
let pagination = Pagination::new( let pagination = params.to_pagination();
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
params.sort,
);
let items = state.storage.list_media(&pagination).await?; let items = state.storage.list_media(&pagination).await?;
let roots = state.config.read().await.directories.roots.clone(); let roots = state.config.read().await.directories.roots.clone();
Ok(Json( Ok(Json(
@ -387,6 +398,12 @@ pub async fn import_with_options(
State(state): State<AppState>, State(state): State<AppState>,
Json(req): Json<ImportWithOptionsRequest>, Json(req): Json<ImportWithOptionsRequest>,
) -> Result<Json<ImportResponse>, ApiError> { ) -> Result<Json<ImportResponse>, ApiError> {
if req.tag_ids.as_ref().is_some_and(|v| v.len() > 100) {
return Err(ApiError::bad_request("tag_ids must not exceed 100 items"));
}
if req.new_tags.as_ref().is_some_and(|v| v.len() > 100) {
return Err(ApiError::bad_request("new_tags must not exceed 100 items"));
}
let result = pinakes_core::import::import_file( let result = pinakes_core::import::import_file(
&state.storage, &state.storage,
&req.path, &req.path,
@ -415,6 +432,12 @@ pub async fn batch_import(
State(state): State<AppState>, State(state): State<AppState>,
Json(req): Json<BatchImportRequest>, Json(req): Json<BatchImportRequest>,
) -> Result<Json<BatchImportResponse>, ApiError> { ) -> Result<Json<BatchImportResponse>, ApiError> {
if req.tag_ids.as_ref().is_some_and(|v| v.len() > 100) {
return Err(ApiError::bad_request("tag_ids must not exceed 100 items"));
}
if req.new_tags.as_ref().is_some_and(|v| v.len() > 100) {
return Err(ApiError::bad_request("new_tags must not exceed 100 items"));
}
if req.paths.len() > 10_000 { if req.paths.len() > 10_000 {
return Err(ApiError( return Err(ApiError(
pinakes_core::error::PinakesError::InvalidOperation( pinakes_core::error::PinakesError::InvalidOperation(
@ -776,19 +799,17 @@ pub async fn batch_delete(
let media_ids: Vec<MediaId> = let media_ids: Vec<MediaId> =
req.media_ids.iter().map(|id| MediaId(*id)).collect(); req.media_ids.iter().map(|id| MediaId(*id)).collect();
// Record audit entries BEFORE delete to avoid FK constraint violation. // Record a single audit entry before delete to avoid FK constraint
// Use None for media_id since they'll be deleted; include ID in details. // violations. One entry for the whole batch is sufficient.
for id in &media_ids { if let Err(e) = pinakes_core::audit::record_action(
if let Err(e) = pinakes_core::audit::record_action( &state.storage,
&state.storage, None,
None, pinakes_core::model::AuditAction::Deleted,
pinakes_core::model::AuditAction::Deleted, Some(format!("batch delete: {} items", media_ids.len())),
Some(format!("batch delete: media_id={}", id.0)), )
) .await
.await {
{ tracing::warn!(error = %e, "failed to record audit entry");
tracing::warn!(error = %e, "failed to record audit entry");
}
} }
match state.storage.batch_delete_media(&media_ids).await { match state.storage.batch_delete_media(&media_ids).await {
@ -924,6 +945,15 @@ pub async fn rename_media(
Path(id): Path<Uuid>, Path(id): Path<Uuid>,
Json(req): Json<RenameMediaRequest>, Json(req): Json<RenameMediaRequest>,
) -> Result<Json<MediaResponse>, ApiError> { ) -> Result<Json<MediaResponse>, ApiError> {
let name_len = req.new_name.chars().count();
if name_len == 0 || name_len > 255 {
return Err(ApiError::bad_request("new_name must be 1-255 characters"));
}
if req.new_name.contains('\0') || req.new_name.contains('/') {
return Err(ApiError::bad_request(
"new_name must not contain null bytes or path separators",
));
}
let media_id = MediaId(id); let media_id = MediaId(id);
// Perform the rename // Perform the rename
@ -967,6 +997,8 @@ pub async fn move_media_endpoint(
Path(id): Path<Uuid>, Path(id): Path<Uuid>,
Json(req): Json<MoveMediaRequest>, Json(req): Json<MoveMediaRequest>,
) -> Result<Json<MediaResponse>, ApiError> { ) -> Result<Json<MediaResponse>, ApiError> {
let roots = state.config.read().await.directories.roots.clone();
validate_destination_path(&req.destination, &roots)?;
let media_id = MediaId(id); let media_id = MediaId(id);
// Perform the move // Perform the move
@ -1020,6 +1052,8 @@ pub async fn batch_move_media(
), ),
)); ));
} }
let roots = state.config.read().await.directories.roots.clone();
validate_destination_path(&req.destination, &roots)?;
let media_ids: Vec<MediaId> = let media_ids: Vec<MediaId> =
req.media_ids.iter().map(|id| MediaId(*id)).collect(); req.media_ids.iter().map(|id| MediaId(*id)).collect();
@ -1030,26 +1064,35 @@ pub async fn batch_move_media(
.await .await
{ {
Ok(results) => { Ok(results) => {
// Record sync changes for each moved item // Record sync changes for each moved item. Derive the new path from
// the destination and old filename to avoid N extra get_media calls.
for (media_id, old_path) in &results { for (media_id, old_path) in &results {
if let Ok(item) = state.storage.get_media(*media_id).await { let Some(file_name) =
let change = pinakes_core::sync::SyncLogEntry { std::path::Path::new(old_path.as_str()).file_name()
id: uuid::Uuid::now_v7(), else {
sequence: 0, tracing::warn!(
change_type: pinakes_core::sync::SyncChangeType::Moved, old_path = %old_path,
media_id: Some(*media_id), "skipping sync log entry: no filename in old_path"
path: item.path.to_string_lossy().to_string(), );
content_hash: Some(item.content_hash.clone()), continue;
file_size: Some(item.file_size), };
metadata_json: Some( let new_path = req.destination.join(file_name);
serde_json::json!({ "old_path": old_path }).to_string(), let change = pinakes_core::sync::SyncLogEntry {
), id: uuid::Uuid::now_v7(),
changed_by_device: None, sequence: 0,
timestamp: chrono::Utc::now(), change_type: pinakes_core::sync::SyncChangeType::Moved,
}; media_id: Some(*media_id),
if let Err(e) = state.storage.record_sync_change(&change).await { path: new_path.to_string_lossy().to_string(),
tracing::warn!(error = %e, "failed to record sync change"); content_hash: None,
} file_size: None,
metadata_json: Some(
serde_json::json!({ "old_path": old_path }).to_string(),
),
changed_by_device: None,
timestamp: chrono::Utc::now(),
};
if let Err(e) = state.storage.record_sync_change(&change).await {
tracing::warn!(error = %e, "failed to record sync change");
} }
} }
@ -1164,11 +1207,7 @@ pub async fn list_trash(
State(state): State<AppState>, State(state): State<AppState>,
Query(params): Query<PaginationParams>, Query(params): Query<PaginationParams>,
) -> Result<Json<TrashResponse>, ApiError> { ) -> Result<Json<TrashResponse>, ApiError> {
let pagination = Pagination::new( let pagination = params.to_pagination();
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
params.sort,
);
let items = state.storage.list_trash(&pagination).await?; let items = state.storage.list_trash(&pagination).await?;
let count = state.storage.count_trash().await?; let count = state.storage.count_trash().await?;