pinakes-server: validate rename/move destinations; cap import tag arrays; consolidate batch_delete audit entry
Signed-off-by: NotAShelf <raf@notashelf.dev> Change-Id: I02b585a9fb5fc3a2b5fa40f9aac3a7b66a6a6964
This commit is contained in:
parent
1f7d7ea925
commit
b86ff5f6ab
1 changed files with 85 additions and 46 deletions
|
|
@ -2,10 +2,7 @@ use axum::{
|
|||
Json,
|
||||
extract::{Path, Query, State},
|
||||
};
|
||||
use pinakes_core::{
|
||||
model::{MediaId, Pagination},
|
||||
storage::DynStorageBackend,
|
||||
};
|
||||
use pinakes_core::{model::MediaId, storage::DynStorageBackend};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
|
|
@ -40,6 +37,24 @@ use crate::{
|
|||
state::AppState,
|
||||
};
|
||||
|
||||
/// Validates that a destination path is absolute and within a configured root.
|
||||
fn validate_destination_path(
|
||||
destination: &std::path::Path,
|
||||
roots: &[std::path::PathBuf],
|
||||
) -> Result<(), ApiError> {
|
||||
if !destination.is_absolute() {
|
||||
return Err(ApiError::bad_request(
|
||||
"destination must be an absolute path",
|
||||
));
|
||||
}
|
||||
if !roots.iter().any(|root| destination.starts_with(root)) {
|
||||
return Err(ApiError::bad_request(
|
||||
"destination must be within a configured library root",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply tags and add to collection after a successful import.
|
||||
/// Shared logic used by `import_with_options`, `batch_import`, and
|
||||
/// `import_directory_endpoint`.
|
||||
|
|
@ -114,11 +129,7 @@ pub async fn list_media(
|
|||
State(state): State<AppState>,
|
||||
Query(params): Query<PaginationParams>,
|
||||
) -> Result<Json<Vec<MediaResponse>>, ApiError> {
|
||||
let pagination = Pagination::new(
|
||||
params.offset.unwrap_or(0),
|
||||
params.limit.unwrap_or(50).min(1000),
|
||||
params.sort,
|
||||
);
|
||||
let pagination = params.to_pagination();
|
||||
let items = state.storage.list_media(&pagination).await?;
|
||||
let roots = state.config.read().await.directories.roots.clone();
|
||||
Ok(Json(
|
||||
|
|
@ -387,6 +398,12 @@ pub async fn import_with_options(
|
|||
State(state): State<AppState>,
|
||||
Json(req): Json<ImportWithOptionsRequest>,
|
||||
) -> Result<Json<ImportResponse>, ApiError> {
|
||||
if req.tag_ids.as_ref().is_some_and(|v| v.len() > 100) {
|
||||
return Err(ApiError::bad_request("tag_ids must not exceed 100 items"));
|
||||
}
|
||||
if req.new_tags.as_ref().is_some_and(|v| v.len() > 100) {
|
||||
return Err(ApiError::bad_request("new_tags must not exceed 100 items"));
|
||||
}
|
||||
let result = pinakes_core::import::import_file(
|
||||
&state.storage,
|
||||
&req.path,
|
||||
|
|
@ -415,6 +432,12 @@ pub async fn batch_import(
|
|||
State(state): State<AppState>,
|
||||
Json(req): Json<BatchImportRequest>,
|
||||
) -> Result<Json<BatchImportResponse>, ApiError> {
|
||||
if req.tag_ids.as_ref().is_some_and(|v| v.len() > 100) {
|
||||
return Err(ApiError::bad_request("tag_ids must not exceed 100 items"));
|
||||
}
|
||||
if req.new_tags.as_ref().is_some_and(|v| v.len() > 100) {
|
||||
return Err(ApiError::bad_request("new_tags must not exceed 100 items"));
|
||||
}
|
||||
if req.paths.len() > 10_000 {
|
||||
return Err(ApiError(
|
||||
pinakes_core::error::PinakesError::InvalidOperation(
|
||||
|
|
@ -776,20 +799,18 @@ pub async fn batch_delete(
|
|||
let media_ids: Vec<MediaId> =
|
||||
req.media_ids.iter().map(|id| MediaId(*id)).collect();
|
||||
|
||||
// Record audit entries BEFORE delete to avoid FK constraint violation.
|
||||
// Use None for media_id since they'll be deleted; include ID in details.
|
||||
for id in &media_ids {
|
||||
// Record a single audit entry before delete to avoid FK constraint
|
||||
// violations. One entry for the whole batch is sufficient.
|
||||
if let Err(e) = pinakes_core::audit::record_action(
|
||||
&state.storage,
|
||||
None,
|
||||
pinakes_core::model::AuditAction::Deleted,
|
||||
Some(format!("batch delete: media_id={}", id.0)),
|
||||
Some(format!("batch delete: {} items", media_ids.len())),
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::warn!(error = %e, "failed to record audit entry");
|
||||
}
|
||||
}
|
||||
|
||||
match state.storage.batch_delete_media(&media_ids).await {
|
||||
Ok(count) => {
|
||||
|
|
@ -924,6 +945,15 @@ pub async fn rename_media(
|
|||
Path(id): Path<Uuid>,
|
||||
Json(req): Json<RenameMediaRequest>,
|
||||
) -> Result<Json<MediaResponse>, ApiError> {
|
||||
let name_len = req.new_name.chars().count();
|
||||
if name_len == 0 || name_len > 255 {
|
||||
return Err(ApiError::bad_request("new_name must be 1-255 characters"));
|
||||
}
|
||||
if req.new_name.contains('\0') || req.new_name.contains('/') {
|
||||
return Err(ApiError::bad_request(
|
||||
"new_name must not contain null bytes or path separators",
|
||||
));
|
||||
}
|
||||
let media_id = MediaId(id);
|
||||
|
||||
// Perform the rename
|
||||
|
|
@ -967,6 +997,8 @@ pub async fn move_media_endpoint(
|
|||
Path(id): Path<Uuid>,
|
||||
Json(req): Json<MoveMediaRequest>,
|
||||
) -> Result<Json<MediaResponse>, ApiError> {
|
||||
let roots = state.config.read().await.directories.roots.clone();
|
||||
validate_destination_path(&req.destination, &roots)?;
|
||||
let media_id = MediaId(id);
|
||||
|
||||
// Perform the move
|
||||
|
|
@ -1020,6 +1052,8 @@ pub async fn batch_move_media(
|
|||
),
|
||||
));
|
||||
}
|
||||
let roots = state.config.read().await.directories.roots.clone();
|
||||
validate_destination_path(&req.destination, &roots)?;
|
||||
|
||||
let media_ids: Vec<MediaId> =
|
||||
req.media_ids.iter().map(|id| MediaId(*id)).collect();
|
||||
|
|
@ -1030,17 +1064,27 @@ pub async fn batch_move_media(
|
|||
.await
|
||||
{
|
||||
Ok(results) => {
|
||||
// Record sync changes for each moved item
|
||||
// Record sync changes for each moved item. Derive the new path from
|
||||
// the destination and old filename to avoid N extra get_media calls.
|
||||
for (media_id, old_path) in &results {
|
||||
if let Ok(item) = state.storage.get_media(*media_id).await {
|
||||
let Some(file_name) =
|
||||
std::path::Path::new(old_path.as_str()).file_name()
|
||||
else {
|
||||
tracing::warn!(
|
||||
old_path = %old_path,
|
||||
"skipping sync log entry: no filename in old_path"
|
||||
);
|
||||
continue;
|
||||
};
|
||||
let new_path = req.destination.join(file_name);
|
||||
let change = pinakes_core::sync::SyncLogEntry {
|
||||
id: uuid::Uuid::now_v7(),
|
||||
sequence: 0,
|
||||
change_type: pinakes_core::sync::SyncChangeType::Moved,
|
||||
media_id: Some(*media_id),
|
||||
path: item.path.to_string_lossy().to_string(),
|
||||
content_hash: Some(item.content_hash.clone()),
|
||||
file_size: Some(item.file_size),
|
||||
path: new_path.to_string_lossy().to_string(),
|
||||
content_hash: None,
|
||||
file_size: None,
|
||||
metadata_json: Some(
|
||||
serde_json::json!({ "old_path": old_path }).to_string(),
|
||||
),
|
||||
|
|
@ -1051,7 +1095,6 @@ pub async fn batch_move_media(
|
|||
tracing::warn!(error = %e, "failed to record sync change");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Json(BatchOperationResponse {
|
||||
processed: results.len(),
|
||||
|
|
@ -1164,11 +1207,7 @@ pub async fn list_trash(
|
|||
State(state): State<AppState>,
|
||||
Query(params): Query<PaginationParams>,
|
||||
) -> Result<Json<TrashResponse>, ApiError> {
|
||||
let pagination = Pagination::new(
|
||||
params.offset.unwrap_or(0),
|
||||
params.limit.unwrap_or(50).min(1000),
|
||||
params.sort,
|
||||
);
|
||||
let pagination = params.to_pagination();
|
||||
|
||||
let items = state.storage.list_trash(&pagination).await?;
|
||||
let count = state.storage.count_trash().await?;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue