pinakes-server: add MAX_OFFSET/MAX_LIMIT constants; centralize pagination bounds

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ib8227feb353cbbadc7f42fa5d29618e16a6a6964
This commit is contained in:
raf 2026-03-12 20:45:45 +03:00
commit c16fcb4a9b
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
6 changed files with 63 additions and 47 deletions

View file

@ -16,15 +16,16 @@ pub fn relativize_path(full_path: &Path, roots: &[PathBuf]) -> String {
let mut best: Option<&PathBuf> = None; let mut best: Option<&PathBuf> = None;
for root in roots { for root in roots {
if full_path.starts_with(root) { if full_path.starts_with(root) {
let is_longer = best let is_longer =
.is_none_or(|b| root.components().count() > b.components().count()); best.is_none_or(|b| root.components().count() > b.components().count());
if is_longer { if is_longer {
best = Some(root); best = Some(root);
} }
} }
} }
if let Some(root) = best if let Some(root) = best
&& let Ok(rel) = full_path.strip_prefix(root) { && let Ok(rel) = full_path.strip_prefix(root)
{
// Normalise to forward slashes on all platforms. // Normalise to forward slashes on all platforms.
return rel return rel
.components() .components()

View file

@ -1,7 +1,14 @@
use pinakes_core::model::Pagination;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::media::MediaResponse; use super::media::MediaResponse;
/// Maximum offset accepted from clients. Prevents pathologically large OFFSET
/// values that cause expensive sequential scans in the database.
pub const MAX_OFFSET: u64 = 10_000_000;
/// Maximum page size accepted from most listing endpoints.
pub const MAX_LIMIT: u64 = 1000;
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
pub struct SearchParams { pub struct SearchParams {
pub q: String, pub q: String,
@ -10,6 +17,17 @@ pub struct SearchParams {
pub limit: Option<u64>, pub limit: Option<u64>,
} }
impl SearchParams {
#[must_use]
pub fn to_pagination(&self) -> Pagination {
Pagination::new(
self.offset.unwrap_or(0).min(MAX_OFFSET),
self.limit.unwrap_or(50).min(MAX_LIMIT),
None,
)
}
}
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
pub struct SearchResponse { pub struct SearchResponse {
pub items: Vec<MediaResponse>, pub items: Vec<MediaResponse>,
@ -25,6 +43,17 @@ pub struct SearchRequestBody {
pub limit: Option<u64>, pub limit: Option<u64>,
} }
impl SearchRequestBody {
#[must_use]
pub fn to_pagination(&self) -> Pagination {
Pagination::new(
self.offset.unwrap_or(0).min(MAX_OFFSET),
self.limit.unwrap_or(50).min(MAX_LIMIT),
None,
)
}
}
// Pagination // Pagination
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
pub struct PaginationParams { pub struct PaginationParams {
@ -32,3 +61,14 @@ pub struct PaginationParams {
pub limit: Option<u64>, pub limit: Option<u64>,
pub sort: Option<String>, pub sort: Option<String>,
} }
impl PaginationParams {
#[must_use]
pub fn to_pagination(&self) -> Pagination {
Pagination::new(
self.offset.unwrap_or(0).min(MAX_OFFSET),
self.limit.unwrap_or(50).min(MAX_LIMIT),
self.sort.clone(),
)
}
}

View file

@ -2,7 +2,6 @@ use axum::{
Json, Json,
extract::{Query, State}, extract::{Query, State},
}; };
use pinakes_core::model::Pagination;
use crate::{ use crate::{
dto::{AuditEntryResponse, PaginationParams}, dto::{AuditEntryResponse, PaginationParams},
@ -14,11 +13,7 @@ pub async fn list_audit(
State(state): State<AppState>, State(state): State<AppState>,
Query(params): Query<PaginationParams>, Query(params): Query<PaginationParams>,
) -> Result<Json<Vec<AuditEntryResponse>>, ApiError> { ) -> Result<Json<Vec<AuditEntryResponse>>, ApiError> {
let pagination = Pagination::new( let pagination = params.to_pagination();
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
None,
);
let entries = state.storage.list_audit_entries(None, &pagination).await?; let entries = state.storage.list_audit_entries(None, &pagination).await?;
Ok(Json( Ok(Json(
entries.into_iter().map(AuditEntryResponse::from).collect(), entries.into_iter().map(AuditEntryResponse::from).collect(),

View file

@ -22,7 +22,7 @@ use uuid::Uuid;
use crate::{ use crate::{
auth::resolve_user_id, auth::resolve_user_id,
dto::MediaResponse, dto::{MAX_OFFSET, MediaResponse},
error::ApiError, error::ApiError,
state::AppState, state::AppState,
}; };
@ -177,7 +177,7 @@ pub async fn list_books(
Query(query): Query<SearchBooksQuery>, Query(query): Query<SearchBooksQuery>,
) -> Result<impl IntoResponse, ApiError> { ) -> Result<impl IntoResponse, ApiError> {
let pagination = Pagination { let pagination = Pagination {
offset: query.offset, offset: query.offset.min(MAX_OFFSET),
limit: query.limit.min(1000), limit: query.limit.min(1000),
sort: None, sort: None,
}; };

View file

@ -2,10 +2,7 @@ use axum::{
Json, Json,
extract::{Query, State}, extract::{Query, State},
}; };
use pinakes_core::{ use pinakes_core::search::{SearchRequest, SortOrder, parse_search_query};
model::Pagination,
search::{SearchRequest, SortOrder, parse_search_query},
};
use crate::{ use crate::{
dto::{MediaResponse, SearchParams, SearchRequestBody, SearchResponse}, dto::{MediaResponse, SearchParams, SearchRequestBody, SearchResponse},
@ -43,11 +40,7 @@ pub async fn search(
let request = SearchRequest { let request = SearchRequest {
query, query,
sort, sort,
pagination: Pagination::new( pagination: params.to_pagination(),
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
None,
),
}; };
let results = state.storage.search(&request).await?; let results = state.storage.search(&request).await?;
@ -81,11 +74,7 @@ pub async fn search_post(
let request = SearchRequest { let request = SearchRequest {
query, query,
sort, sort,
pagination: Pagination::new( pagination: body.to_pagination(),
body.offset.unwrap_or(0),
body.limit.unwrap_or(50).min(1000),
None,
),
}; };
let results = state.storage.search(&request).await?; let results = state.storage.search(&request).await?;

View file

@ -207,11 +207,7 @@ pub async fn list_outgoing(
Query(params): Query<PaginationParams>, Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareResponse>>> { ) -> ApiResult<Json<Vec<ShareResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?; let user_id = resolve_user_id(&state.storage, &username).await?;
let pagination = Pagination { let pagination = params.to_pagination();
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50).min(1000),
sort: params.sort,
};
let shares = state let shares = state
.storage .storage
@ -230,11 +226,7 @@ pub async fn list_incoming(
Query(params): Query<PaginationParams>, Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareResponse>>> { ) -> ApiResult<Json<Vec<ShareResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?; let user_id = resolve_user_id(&state.storage, &username).await?;
let pagination = Pagination { let pagination = params.to_pagination();
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50).min(1000),
sort: params.sort,
};
let shares = state let shares = state
.storage .storage
@ -406,6 +398,9 @@ pub async fn batch_delete(
Extension(username): Extension<String>, Extension(username): Extension<String>,
Json(req): Json<BatchDeleteSharesRequest>, Json(req): Json<BatchDeleteSharesRequest>,
) -> ApiResult<Json<serde_json::Value>> { ) -> ApiResult<Json<serde_json::Value>> {
if req.share_ids.is_empty() || req.share_ids.len() > 100 {
return Err(ApiError::bad_request("share_ids must contain 1-100 items"));
}
let user_id = resolve_user_id(&state.storage, &username).await?; let user_id = resolve_user_id(&state.storage, &username).await?;
let share_ids: Vec<ShareId> = let share_ids: Vec<ShareId> =
req.share_ids.into_iter().map(ShareId).collect(); req.share_ids.into_iter().map(ShareId).collect();
@ -624,11 +619,7 @@ pub async fn get_activity(
)); ));
} }
let pagination = Pagination { let pagination = params.to_pagination();
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50).min(1000),
sort: params.sort,
};
let activity = state let activity = state
.storage .storage