pinakes-server: add MAX_OFFSET/MAX_LIMIT constants; centralize pagination bounds

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Ib8227feb353cbbadc7f42fa5d29618e16a6a6964
This commit is contained in:
raf 2026-03-12 20:45:45 +03:00
commit c16fcb4a9b
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
6 changed files with 63 additions and 47 deletions

View file

@ -16,15 +16,16 @@ pub fn relativize_path(full_path: &Path, roots: &[PathBuf]) -> String {
let mut best: Option<&PathBuf> = None;
for root in roots {
if full_path.starts_with(root) {
let is_longer = best
.is_none_or(|b| root.components().count() > b.components().count());
let is_longer =
best.is_none_or(|b| root.components().count() > b.components().count());
if is_longer {
best = Some(root);
}
}
}
if let Some(root) = best
&& let Ok(rel) = full_path.strip_prefix(root) {
&& let Ok(rel) = full_path.strip_prefix(root)
{
// Normalise to forward slashes on all platforms.
return rel
.components()

View file

@ -1,7 +1,14 @@
use pinakes_core::model::Pagination;
use serde::{Deserialize, Serialize};
use super::media::MediaResponse;
/// Maximum offset accepted from clients. Prevents pathologically large OFFSET
/// values that cause expensive sequential scans in the database.
pub const MAX_OFFSET: u64 = 10_000_000;
/// Maximum page size accepted from most listing endpoints.
pub const MAX_LIMIT: u64 = 1000;
#[derive(Debug, Deserialize)]
pub struct SearchParams {
pub q: String,
@ -10,6 +17,17 @@ pub struct SearchParams {
pub limit: Option<u64>,
}
impl SearchParams {
#[must_use]
pub fn to_pagination(&self) -> Pagination {
Pagination::new(
self.offset.unwrap_or(0).min(MAX_OFFSET),
self.limit.unwrap_or(50).min(MAX_LIMIT),
None,
)
}
}
#[derive(Debug, Serialize)]
pub struct SearchResponse {
pub items: Vec<MediaResponse>,
@ -25,6 +43,17 @@ pub struct SearchRequestBody {
pub limit: Option<u64>,
}
impl SearchRequestBody {
#[must_use]
pub fn to_pagination(&self) -> Pagination {
Pagination::new(
self.offset.unwrap_or(0).min(MAX_OFFSET),
self.limit.unwrap_or(50).min(MAX_LIMIT),
None,
)
}
}
// Pagination
#[derive(Debug, Deserialize)]
pub struct PaginationParams {
@ -32,3 +61,14 @@ pub struct PaginationParams {
pub limit: Option<u64>,
pub sort: Option<String>,
}
impl PaginationParams {
#[must_use]
pub fn to_pagination(&self) -> Pagination {
Pagination::new(
self.offset.unwrap_or(0).min(MAX_OFFSET),
self.limit.unwrap_or(50).min(MAX_LIMIT),
self.sort.clone(),
)
}
}

View file

@ -2,7 +2,6 @@ use axum::{
Json,
extract::{Query, State},
};
use pinakes_core::model::Pagination;
use crate::{
dto::{AuditEntryResponse, PaginationParams},
@ -14,11 +13,7 @@ pub async fn list_audit(
State(state): State<AppState>,
Query(params): Query<PaginationParams>,
) -> Result<Json<Vec<AuditEntryResponse>>, ApiError> {
let pagination = Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
None,
);
let pagination = params.to_pagination();
let entries = state.storage.list_audit_entries(None, &pagination).await?;
Ok(Json(
entries.into_iter().map(AuditEntryResponse::from).collect(),

View file

@ -22,7 +22,7 @@ use uuid::Uuid;
use crate::{
auth::resolve_user_id,
dto::MediaResponse,
dto::{MAX_OFFSET, MediaResponse},
error::ApiError,
state::AppState,
};
@ -177,7 +177,7 @@ pub async fn list_books(
Query(query): Query<SearchBooksQuery>,
) -> Result<impl IntoResponse, ApiError> {
let pagination = Pagination {
offset: query.offset,
offset: query.offset.min(MAX_OFFSET),
limit: query.limit.min(1000),
sort: None,
};

View file

@ -2,10 +2,7 @@ use axum::{
Json,
extract::{Query, State},
};
use pinakes_core::{
model::Pagination,
search::{SearchRequest, SortOrder, parse_search_query},
};
use pinakes_core::search::{SearchRequest, SortOrder, parse_search_query};
use crate::{
dto::{MediaResponse, SearchParams, SearchRequestBody, SearchResponse},
@ -43,11 +40,7 @@ pub async fn search(
let request = SearchRequest {
query,
sort,
pagination: Pagination::new(
params.offset.unwrap_or(0),
params.limit.unwrap_or(50).min(1000),
None,
),
pagination: params.to_pagination(),
};
let results = state.storage.search(&request).await?;
@ -81,11 +74,7 @@ pub async fn search_post(
let request = SearchRequest {
query,
sort,
pagination: Pagination::new(
body.offset.unwrap_or(0),
body.limit.unwrap_or(50).min(1000),
None,
),
pagination: body.to_pagination(),
};
let results = state.storage.search(&request).await?;

View file

@ -207,11 +207,7 @@ pub async fn list_outgoing(
Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let pagination = Pagination {
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50).min(1000),
sort: params.sort,
};
let pagination = params.to_pagination();
let shares = state
.storage
@ -230,11 +226,7 @@ pub async fn list_incoming(
Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let pagination = Pagination {
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50).min(1000),
sort: params.sort,
};
let pagination = params.to_pagination();
let shares = state
.storage
@ -406,6 +398,9 @@ pub async fn batch_delete(
Extension(username): Extension<String>,
Json(req): Json<BatchDeleteSharesRequest>,
) -> ApiResult<Json<serde_json::Value>> {
if req.share_ids.is_empty() || req.share_ids.len() > 100 {
return Err(ApiError::bad_request("share_ids must contain 1-100 items"));
}
let user_id = resolve_user_id(&state.storage, &username).await?;
let share_ids: Vec<ShareId> =
req.share_ids.into_iter().map(ShareId).collect();
@ -624,11 +619,7 @@ pub async fn get_activity(
));
}
let pagination = Pagination {
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50).min(1000),
sort: params.sort,
};
let pagination = params.to_pagination();
let activity = state
.storage