treewide: better cross-device sync capabilities; in-database storage

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Id99798df6f7e4470caae8a193c2654aa6a6a6964
This commit is contained in:
raf 2026-02-05 08:28:50 +03:00
commit f34c78b238
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
41 changed files with 8806 additions and 138 deletions

View file

@ -26,6 +26,7 @@ governor = { workspace = true }
tower_governor = { workspace = true }
tokio-util = { version = "0.7", features = ["io"] }
argon2 = { workspace = true }
blake3 = { workspace = true }
rand = "0.9"
percent-encoding = "2"
http = "1.0"

View file

@ -72,6 +72,8 @@ pub fn create_router_with_tls(
// Public routes (no auth required)
let public_routes = Router::new()
.route("/s/{token}", get(routes::social::access_shared_media))
// Enhanced sharing: public share access
.route("/shared/{token}", get(routes::shares::access_shared))
// Kubernetes-style health probes (no auth required for orchestration)
.route("/health/live", get(routes::health::liveness))
.route("/health/ready", get(routes::health::readiness));
@ -216,6 +218,25 @@ pub fn create_router_with_tls(
.route(
"/media/{id}/stream/dash/{profile}/{segment}",
get(routes::streaming::dash_segment),
)
// Managed storage (read)
.route("/media/{id}/download", get(routes::upload::download_file))
.route("/managed/stats", get(routes::upload::managed_stats))
// Sync (read)
.route("/sync/devices", get(routes::sync::list_devices))
.route("/sync/devices/{id}", get(routes::sync::get_device))
.route("/sync/changes", get(routes::sync::get_changes))
.route("/sync/conflicts", get(routes::sync::list_conflicts))
.route("/sync/upload/{id}", get(routes::sync::get_upload_status))
.route("/sync/download/{*path}", get(routes::sync::download_file))
// Enhanced sharing (read)
.route("/shares/outgoing", get(routes::shares::list_outgoing))
.route("/shares/incoming", get(routes::shares::list_incoming))
.route("/shares/{id}", get(routes::shares::get_share))
.route("/shares/{id}/activity", get(routes::shares::get_activity))
.route(
"/notifications/shares",
get(routes::shares::get_notifications),
);
// Write routes: Editor+ required
@ -371,6 +392,49 @@ pub fn create_router_with_tls(
post(routes::transcode::start_transcode),
)
.route("/transcode/{id}", delete(routes::transcode::cancel_session))
// Managed storage (write)
.route("/upload", post(routes::upload::upload_file))
.route(
"/media/{id}/move-to-managed",
post(routes::upload::move_to_managed),
)
// Sync (write)
.route("/sync/devices", post(routes::sync::register_device))
.route("/sync/devices/{id}", put(routes::sync::update_device))
.route("/sync/devices/{id}", delete(routes::sync::delete_device))
.route(
"/sync/devices/{id}/token",
post(routes::sync::regenerate_token),
)
.route("/sync/report", post(routes::sync::report_changes))
.route("/sync/ack", post(routes::sync::acknowledge_changes))
.route(
"/sync/conflicts/{id}/resolve",
post(routes::sync::resolve_conflict),
)
.route("/sync/upload", post(routes::sync::create_upload))
.route(
"/sync/upload/{id}/chunks/{index}",
put(routes::sync::upload_chunk),
)
.route(
"/sync/upload/{id}/complete",
post(routes::sync::complete_upload),
)
.route("/sync/upload/{id}", delete(routes::sync::cancel_upload))
// Enhanced sharing (write)
.route("/shares", post(routes::shares::create_share))
.route("/shares/{id}", patch(routes::shares::update_share))
.route("/shares/{id}", delete(routes::shares::delete_share))
.route("/shares/batch/delete", post(routes::shares::batch_delete))
.route(
"/notifications/shares/{id}/read",
post(routes::shares::mark_notification_read),
)
.route(
"/notifications/shares/read-all",
post(routes::shares::mark_all_read),
)
.layer(middleware::from_fn(auth::require_editor));
// Admin-only routes: destructive/config operations

View file

@ -997,3 +997,418 @@ impl From<pinakes_core::transcode::TranscodeSession> for TranscodeSessionRespons
pub struct CreateTranscodeRequest {
pub profile: String,
}
// ===== Managed Storage / Upload =====
#[derive(Debug, Serialize)]
pub struct UploadResponse {
pub media_id: String,
pub content_hash: String,
pub was_duplicate: bool,
pub file_size: u64,
}
impl From<pinakes_core::model::UploadResult> for UploadResponse {
fn from(result: pinakes_core::model::UploadResult) -> Self {
Self {
media_id: result.media_id.0.to_string(),
content_hash: result.content_hash.0,
was_duplicate: result.was_duplicate,
file_size: result.file_size,
}
}
}
#[derive(Debug, Serialize)]
pub struct ManagedStorageStatsResponse {
pub total_blobs: u64,
pub total_size_bytes: u64,
pub orphaned_blobs: u64,
pub deduplication_ratio: f64,
}
impl From<pinakes_core::model::ManagedStorageStats> for ManagedStorageStatsResponse {
fn from(stats: pinakes_core::model::ManagedStorageStats) -> Self {
Self {
total_blobs: stats.total_blobs,
total_size_bytes: stats.total_size_bytes,
orphaned_blobs: stats.orphaned_blobs,
deduplication_ratio: stats.deduplication_ratio,
}
}
}
// ===== Sync =====
#[derive(Debug, Deserialize)]
pub struct RegisterDeviceRequest {
pub name: String,
pub device_type: String,
pub client_version: String,
pub os_info: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct DeviceResponse {
pub id: String,
pub name: String,
pub device_type: String,
pub client_version: String,
pub os_info: Option<String>,
pub last_sync_at: Option<DateTime<Utc>>,
pub last_seen_at: DateTime<Utc>,
pub sync_cursor: Option<i64>,
pub enabled: bool,
pub created_at: DateTime<Utc>,
}
impl From<pinakes_core::sync::SyncDevice> for DeviceResponse {
fn from(d: pinakes_core::sync::SyncDevice) -> Self {
Self {
id: d.id.0.to_string(),
name: d.name,
device_type: d.device_type.to_string(),
client_version: d.client_version,
os_info: d.os_info,
last_sync_at: d.last_sync_at,
last_seen_at: d.last_seen_at,
sync_cursor: d.sync_cursor,
enabled: d.enabled,
created_at: d.created_at,
}
}
}
#[derive(Debug, Serialize)]
pub struct DeviceRegistrationResponse {
pub device: DeviceResponse,
pub device_token: String,
}
#[derive(Debug, Deserialize)]
pub struct UpdateDeviceRequest {
pub name: Option<String>,
pub enabled: Option<bool>,
}
#[derive(Debug, Deserialize)]
pub struct GetChangesParams {
pub cursor: Option<i64>,
pub limit: Option<u64>,
}
#[derive(Debug, Serialize)]
pub struct SyncChangeResponse {
pub id: String,
pub sequence: i64,
pub change_type: String,
pub media_id: Option<String>,
pub path: String,
pub content_hash: Option<String>,
pub file_size: Option<u64>,
pub timestamp: DateTime<Utc>,
}
impl From<pinakes_core::sync::SyncLogEntry> for SyncChangeResponse {
fn from(e: pinakes_core::sync::SyncLogEntry) -> Self {
Self {
id: e.id.to_string(),
sequence: e.sequence,
change_type: e.change_type.to_string(),
media_id: e.media_id.map(|id| id.0.to_string()),
path: e.path,
content_hash: e.content_hash.map(|h| h.0),
file_size: e.file_size,
timestamp: e.timestamp,
}
}
}
#[derive(Debug, Serialize)]
pub struct ChangesResponse {
pub changes: Vec<SyncChangeResponse>,
pub cursor: i64,
pub has_more: bool,
}
#[derive(Debug, Deserialize)]
pub struct ClientChangeReport {
pub path: String,
pub change_type: String,
pub content_hash: Option<String>,
pub file_size: Option<u64>,
pub local_mtime: Option<i64>,
}
#[derive(Debug, Deserialize)]
pub struct ReportChangesRequest {
pub changes: Vec<ClientChangeReport>,
}
#[derive(Debug, Serialize)]
pub struct ReportChangesResponse {
pub accepted: Vec<String>,
pub conflicts: Vec<ConflictResponse>,
pub upload_required: Vec<String>,
}
#[derive(Debug, Serialize)]
pub struct ConflictResponse {
pub id: String,
pub path: String,
pub local_hash: String,
pub server_hash: String,
pub detected_at: DateTime<Utc>,
}
impl From<pinakes_core::sync::SyncConflict> for ConflictResponse {
fn from(c: pinakes_core::sync::SyncConflict) -> Self {
Self {
id: c.id.to_string(),
path: c.path,
local_hash: c.local_hash,
server_hash: c.server_hash,
detected_at: c.detected_at,
}
}
}
#[derive(Debug, Deserialize)]
pub struct ResolveConflictRequest {
pub resolution: String,
}
#[derive(Debug, Deserialize)]
pub struct CreateUploadSessionRequest {
pub target_path: String,
pub expected_hash: String,
pub expected_size: u64,
pub chunk_size: Option<u64>,
}
#[derive(Debug, Serialize)]
pub struct UploadSessionResponse {
pub id: String,
pub target_path: String,
pub expected_hash: String,
pub expected_size: u64,
pub chunk_size: u64,
pub chunk_count: u64,
pub status: String,
pub created_at: DateTime<Utc>,
pub expires_at: DateTime<Utc>,
}
impl From<pinakes_core::sync::UploadSession> for UploadSessionResponse {
fn from(s: pinakes_core::sync::UploadSession) -> Self {
Self {
id: s.id.to_string(),
target_path: s.target_path,
expected_hash: s.expected_hash.0,
expected_size: s.expected_size,
chunk_size: s.chunk_size,
chunk_count: s.chunk_count,
status: s.status.to_string(),
created_at: s.created_at,
expires_at: s.expires_at,
}
}
}
#[derive(Debug, Serialize)]
pub struct ChunkUploadedResponse {
pub chunk_index: u64,
pub received: bool,
}
#[derive(Debug, Deserialize)]
pub struct AcknowledgeChangesRequest {
pub cursor: i64,
}
// ===== Enhanced Sharing =====
#[derive(Debug, Deserialize)]
pub struct CreateShareRequest {
pub target_type: String,
pub target_id: String,
pub recipient_type: String,
pub recipient_user_id: Option<Uuid>,
pub recipient_group_id: Option<Uuid>,
pub password: Option<String>,
pub permissions: Option<SharePermissionsRequest>,
pub note: Option<String>,
pub expires_in_hours: Option<u64>,
pub inherit_to_children: Option<bool>,
}
#[derive(Debug, Deserialize)]
pub struct SharePermissionsRequest {
pub can_view: Option<bool>,
pub can_download: Option<bool>,
pub can_edit: Option<bool>,
pub can_delete: Option<bool>,
pub can_reshare: Option<bool>,
pub can_add: Option<bool>,
}
#[derive(Debug, Serialize)]
pub struct ShareResponse {
pub id: String,
pub target_type: String,
pub target_id: String,
pub owner_id: String,
pub recipient_type: String,
pub recipient_user_id: Option<String>,
pub recipient_group_id: Option<String>,
pub public_token: Option<String>,
pub permissions: SharePermissionsResponse,
pub note: Option<String>,
pub expires_at: Option<DateTime<Utc>>,
pub access_count: u64,
pub last_accessed: Option<DateTime<Utc>>,
pub inherit_to_children: bool,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Serialize)]
pub struct SharePermissionsResponse {
pub can_view: bool,
pub can_download: bool,
pub can_edit: bool,
pub can_delete: bool,
pub can_reshare: bool,
pub can_add: bool,
}
impl From<pinakes_core::sharing::SharePermissions> for SharePermissionsResponse {
fn from(p: pinakes_core::sharing::SharePermissions) -> Self {
Self {
can_view: p.can_view,
can_download: p.can_download,
can_edit: p.can_edit,
can_delete: p.can_delete,
can_reshare: p.can_reshare,
can_add: p.can_add,
}
}
}
impl From<pinakes_core::sharing::Share> for ShareResponse {
fn from(s: pinakes_core::sharing::Share) -> Self {
let (target_type, target_id) = match &s.target {
pinakes_core::sharing::ShareTarget::Media { media_id } => {
("media".to_string(), media_id.0.to_string())
}
pinakes_core::sharing::ShareTarget::Collection { collection_id } => {
("collection".to_string(), collection_id.to_string())
}
pinakes_core::sharing::ShareTarget::Tag { tag_id } => {
("tag".to_string(), tag_id.to_string())
}
pinakes_core::sharing::ShareTarget::SavedSearch { search_id } => {
("saved_search".to_string(), search_id.to_string())
}
};
let (recipient_type, recipient_user_id, recipient_group_id, public_token) =
match &s.recipient {
pinakes_core::sharing::ShareRecipient::PublicLink { token, .. } => {
("public_link".to_string(), None, None, Some(token.clone()))
}
pinakes_core::sharing::ShareRecipient::User { user_id } => {
("user".to_string(), Some(user_id.0.to_string()), None, None)
}
pinakes_core::sharing::ShareRecipient::Group { group_id } => {
("group".to_string(), None, Some(group_id.to_string()), None)
}
pinakes_core::sharing::ShareRecipient::Federated { .. } => {
("federated".to_string(), None, None, None)
}
};
Self {
id: s.id.0.to_string(),
target_type,
target_id,
owner_id: s.owner_id.0.to_string(),
recipient_type,
recipient_user_id,
recipient_group_id,
public_token,
permissions: s.permissions.into(),
note: s.note,
expires_at: s.expires_at,
access_count: s.access_count,
last_accessed: s.last_accessed,
inherit_to_children: s.inherit_to_children,
created_at: s.created_at,
updated_at: s.updated_at,
}
}
}
#[derive(Debug, Deserialize)]
pub struct UpdateShareRequest {
pub permissions: Option<SharePermissionsRequest>,
pub note: Option<String>,
pub expires_at: Option<DateTime<Utc>>,
pub inherit_to_children: Option<bool>,
}
#[derive(Debug, Serialize)]
pub struct ShareActivityResponse {
pub id: String,
pub share_id: String,
pub actor_id: Option<String>,
pub actor_ip: Option<String>,
pub action: String,
pub details: Option<String>,
pub timestamp: DateTime<Utc>,
}
impl From<pinakes_core::sharing::ShareActivity> for ShareActivityResponse {
fn from(a: pinakes_core::sharing::ShareActivity) -> Self {
Self {
id: a.id.to_string(),
share_id: a.share_id.0.to_string(),
actor_id: a.actor_id.map(|id| id.0.to_string()),
actor_ip: a.actor_ip,
action: a.action.to_string(),
details: a.details,
timestamp: a.timestamp,
}
}
}
#[derive(Debug, Serialize)]
pub struct ShareNotificationResponse {
pub id: String,
pub share_id: String,
pub notification_type: String,
pub is_read: bool,
pub created_at: DateTime<Utc>,
}
impl From<pinakes_core::sharing::ShareNotification> for ShareNotificationResponse {
fn from(n: pinakes_core::sharing::ShareNotification) -> Self {
Self {
id: n.id.to_string(),
share_id: n.share_id.0.to_string(),
notification_type: n.notification_type.to_string(),
is_read: n.is_read,
created_at: n.created_at,
}
}
}
#[derive(Debug, Deserialize)]
pub struct BatchDeleteSharesRequest {
pub share_ids: Vec<Uuid>,
}
#[derive(Debug, Deserialize)]
pub struct AccessSharedRequest {
pub password: Option<String>,
}

View file

@ -69,3 +69,31 @@ impl From<pinakes_core::error::PinakesError> for ApiError {
Self(e)
}
}
impl ApiError {
pub fn bad_request(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::InvalidOperation(
msg.into(),
))
}
pub fn not_found(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::NotFound(msg.into()))
}
pub fn internal(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::Database(msg.into()))
}
pub fn forbidden(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::Authorization(msg.into()))
}
pub fn unauthorized(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::Authentication(
msg.into(),
))
}
}
pub type ApiResult<T> = Result<T, ApiError>;

View file

@ -45,17 +45,20 @@ struct Cli {
migrate_only: bool,
}
fn resolve_config_path(explicit: Option<&std::path::Path>) -> PathBuf {
/// Resolve the configuration file path.
/// Returns (path, was_explicit) where was_explicit indicates if the path was
/// explicitly provided by the user (vs discovered).
fn resolve_config_path(explicit: Option<&std::path::Path>) -> (PathBuf, bool) {
if let Some(path) = explicit {
return path.to_path_buf();
return (path.to_path_buf(), true);
}
// Check current directory
let local = PathBuf::from("pinakes.toml");
if local.exists() {
return local;
return (local, false);
}
// XDG default
Config::default_config_path()
(Config::default_config_path(), false)
}
#[tokio::main]
@ -89,11 +92,17 @@ async fn main() -> Result<()> {
}
}
let config_path = resolve_config_path(cli.config.as_deref());
let (config_path, was_explicit) = resolve_config_path(cli.config.as_deref());
let mut config = if config_path.exists() {
info!(path = %config_path.display(), "loading configuration from file");
Config::from_file(&config_path)?
} else if was_explicit {
// User explicitly provided a config path that doesn't exist - this is an error
return Err(anyhow::anyhow!(
"configuration file not found: {}",
config_path.display()
));
} else {
info!(
"using default configuration (no config file found at {})",
@ -486,6 +495,34 @@ async fn main() -> Result<()> {
});
}
// Initialize managed storage service if enabled
let managed_storage = {
let config_read = config_arc.read().await;
if config_read.managed_storage.enabled {
let service = pinakes_core::managed_storage::ManagedStorageService::new(
config_read.managed_storage.storage_dir.clone(),
config_read.managed_storage.max_upload_size,
config_read.managed_storage.verify_on_read,
);
match service.init().await {
Ok(()) => {
info!(
path = %config_read.managed_storage.storage_dir.display(),
"managed storage initialized"
);
Some(Arc::new(service))
}
Err(e) => {
tracing::error!(error = %e, "failed to initialize managed storage");
None
}
}
} else {
tracing::info!("managed storage disabled in configuration");
None
}
};
let state = AppState {
storage: storage.clone(),
config: config_arc.clone(),
@ -496,6 +533,7 @@ async fn main() -> Result<()> {
scheduler,
plugin_manager,
transcode_service,
managed_storage,
};
// Periodic session cleanup (every 15 minutes)

View file

@ -1,6 +1,6 @@
use axum::{
Json, Router,
extract::{Path, Query, State},
extract::{Extension, Path, Query, State},
http::StatusCode,
response::IntoResponse,
routing::{get, put},
@ -13,7 +13,7 @@ use pinakes_core::{
model::{AuthorInfo, BookMetadata, MediaId, Pagination, ReadingProgress, ReadingStatus},
};
use crate::{dto::MediaResponse, error::ApiError, state::AppState};
use crate::{auth::resolve_user_id, dto::MediaResponse, error::ApiError, state::AppState};
/// Book metadata response DTO
#[derive(Debug, Serialize, Deserialize)]
@ -240,15 +240,15 @@ pub async fn get_author_books(
/// Get reading progress for a book
pub async fn get_reading_progress(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(media_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
// TODO: Get user_id from auth context
let user_id = Uuid::new_v4(); // Placeholder
let user_id = resolve_user_id(&state.storage, &username).await?;
let media_id = MediaId(media_id);
let progress = state
.storage
.get_reading_progress(user_id, media_id)
.get_reading_progress(user_id.0, media_id)
.await?
.ok_or(ApiError(PinakesError::NotFound(
"Reading progress not found".to_string(),
@ -260,16 +260,16 @@ pub async fn get_reading_progress(
/// Update reading progress for a book
pub async fn update_reading_progress(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(media_id): Path<Uuid>,
Json(req): Json<UpdateProgressRequest>,
) -> Result<impl IntoResponse, ApiError> {
// TODO: Get user_id from auth context
let user_id = Uuid::new_v4(); // Placeholder
let user_id = resolve_user_id(&state.storage, &username).await?;
let media_id = MediaId(media_id);
state
.storage
.update_reading_progress(user_id, media_id, req.current_page)
.update_reading_progress(user_id.0, media_id, req.current_page)
.await?;
Ok(StatusCode::NO_CONTENT)
@ -278,14 +278,14 @@ pub async fn update_reading_progress(
/// Get user's reading list
pub async fn get_reading_list(
State(state): State<AppState>,
Extension(username): Extension<String>,
Query(params): Query<ReadingListQuery>,
) -> Result<impl IntoResponse, ApiError> {
// TODO: Get user_id from auth context
let user_id = Uuid::new_v4(); // Placeholder
let user_id = resolve_user_id(&state.storage, &username).await?;
let items = state
.storage
.get_reading_list(user_id, params.status)
.get_reading_list(user_id.0, params.status)
.await?;
let response: Vec<MediaResponse> = items.into_iter().map(MediaResponse::from).collect();

View file

@ -19,11 +19,14 @@ pub mod saved_searches;
pub mod scan;
pub mod scheduled_tasks;
pub mod search;
pub mod shares;
pub mod social;
pub mod statistics;
pub mod streaming;
pub mod subtitles;
pub mod sync;
pub mod tags;
pub mod transcode;
pub mod upload;
pub mod users;
pub mod webhooks;

View file

@ -27,6 +27,12 @@ pub struct TimelineQuery {
pub group_by: GroupBy,
pub year: Option<i32>,
pub month: Option<u32>,
#[serde(default = "default_timeline_limit")]
pub limit: u64,
}
fn default_timeline_limit() -> u64 {
10000
}
/// Timeline group response
@ -62,12 +68,12 @@ pub async fn get_timeline(
State(state): State<AppState>,
Query(query): Query<TimelineQuery>,
) -> Result<impl IntoResponse, ApiError> {
// Query photos with date_taken
// Query photos with date_taken (limit is configurable, defaults to 10000)
let all_media = state
.storage
.list_media(&pinakes_core::model::Pagination {
offset: 0,
limit: 10000, // TODO: Make this more efficient with streaming
limit: query.limit.min(50000), // Cap at 50000 for safety
sort: Some("date_taken DESC".to_string()),
})
.await?;

View file

@ -0,0 +1,543 @@
use axum::{
Json,
extract::{ConnectInfo, Extension, Path, Query, State},
http::StatusCode,
};
use chrono::Utc;
use std::net::SocketAddr;
use uuid::Uuid;
use crate::auth::resolve_user_id;
use crate::dto::{
AccessSharedRequest, BatchDeleteSharesRequest, CreateShareRequest, MediaResponse,
PaginationParams, ShareActivityResponse, ShareNotificationResponse, ShareResponse,
UpdateShareRequest,
};
use crate::error::{ApiError, ApiResult};
use crate::state::AppState;
use pinakes_core::model::MediaId;
use pinakes_core::model::Pagination;
use pinakes_core::sharing::{
Share, ShareActivity, ShareActivityAction, ShareId, ShareNotification, ShareNotificationType,
SharePermissions, ShareRecipient, ShareTarget, generate_share_token, hash_share_password,
verify_share_password,
};
use pinakes_core::users::UserId;
/// Create a new share
/// POST /api/shares
pub async fn create_share(
State(state): State<AppState>,
Extension(username): Extension<String>,
Json(req): Json<CreateShareRequest>,
) -> ApiResult<Json<ShareResponse>> {
let config = state.config.read().await;
if !config.sharing.enabled {
return Err(ApiError::bad_request("Sharing is not enabled"));
}
// Validate public links are allowed
if req.recipient_type == "public_link" && !config.sharing.allow_public_links {
return Err(ApiError::bad_request("Public links are not allowed"));
}
drop(config);
let owner_id = resolve_user_id(&state.storage, &username).await?;
// Parse target
let target_id: Uuid = req
.target_id
.parse()
.map_err(|_| ApiError::bad_request("Invalid target_id"))?;
let target = match req.target_type.as_str() {
"media" => ShareTarget::Media {
media_id: MediaId(target_id),
},
"collection" => ShareTarget::Collection {
collection_id: target_id,
},
"tag" => ShareTarget::Tag { tag_id: target_id },
"saved_search" => ShareTarget::SavedSearch {
search_id: target_id,
},
_ => return Err(ApiError::bad_request("Invalid target_type")),
};
// Parse recipient
let recipient = match req.recipient_type.as_str() {
"public_link" => {
let token = generate_share_token();
let password_hash = req.password.as_ref().map(|p| hash_share_password(p));
ShareRecipient::PublicLink {
token,
password_hash,
}
}
"user" => {
let recipient_user_id = req.recipient_user_id.ok_or_else(|| {
ApiError::bad_request("recipient_user_id required for user share")
})?;
ShareRecipient::User {
user_id: UserId(recipient_user_id),
}
}
"group" => {
let group_id = req.recipient_group_id.ok_or_else(|| {
ApiError::bad_request("recipient_group_id required for group share")
})?;
ShareRecipient::Group { group_id }
}
_ => return Err(ApiError::bad_request("Invalid recipient_type")),
};
// Parse permissions
let permissions = if let Some(perms) = req.permissions {
SharePermissions {
can_view: perms.can_view.unwrap_or(true),
can_download: perms.can_download.unwrap_or(false),
can_edit: perms.can_edit.unwrap_or(false),
can_delete: perms.can_delete.unwrap_or(false),
can_reshare: perms.can_reshare.unwrap_or(false),
can_add: perms.can_add.unwrap_or(false),
}
} else {
SharePermissions::view_only()
};
// Calculate expiration
let expires_at = req
.expires_in_hours
.map(|hours| Utc::now() + chrono::Duration::hours(hours as i64));
let share = Share {
id: ShareId(Uuid::now_v7()),
target,
owner_id,
recipient,
permissions,
note: req.note,
expires_at,
access_count: 0,
last_accessed: None,
inherit_to_children: req.inherit_to_children.unwrap_or(true),
parent_share_id: None,
created_at: Utc::now(),
updated_at: Utc::now(),
};
let created = state
.storage
.create_share(&share)
.await
.map_err(|e| ApiError::internal(format!("Failed to create share: {}", e)))?;
// Send notification to recipient if it's a user share
if let ShareRecipient::User { user_id } = &created.recipient {
let notification = ShareNotification {
id: Uuid::now_v7(),
user_id: *user_id,
share_id: created.id,
notification_type: ShareNotificationType::NewShare,
is_read: false,
created_at: Utc::now(),
};
// Ignore notification errors
let _ = state.storage.create_share_notification(&notification).await;
}
Ok(Json(created.into()))
}
/// List outgoing shares (shares I created)
/// GET /api/shares/outgoing
pub async fn list_outgoing(
State(state): State<AppState>,
Extension(username): Extension<String>,
Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let pagination = Pagination {
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50),
sort: params.sort,
};
let shares = state
.storage
.list_shares_by_owner(user_id, &pagination)
.await
.map_err(|e| ApiError::internal(format!("Failed to list shares: {}", e)))?;
Ok(Json(shares.into_iter().map(Into::into).collect()))
}
/// List incoming shares (shares shared with me)
/// GET /api/shares/incoming
pub async fn list_incoming(
State(state): State<AppState>,
Extension(username): Extension<String>,
Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let pagination = Pagination {
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50),
sort: params.sort,
};
let shares = state
.storage
.list_shares_for_user(user_id, &pagination)
.await
.map_err(|e| ApiError::internal(format!("Failed to list shares: {}", e)))?;
Ok(Json(shares.into_iter().map(Into::into).collect()))
}
/// Get share details
/// GET /api/shares/{id}
pub async fn get_share(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<Json<ShareResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let share = state
.storage
.get_share(ShareId(id))
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Check authorization
let is_owner = share.owner_id == user_id;
let is_recipient = match &share.recipient {
ShareRecipient::User {
user_id: recipient_id,
} => *recipient_id == user_id,
_ => false,
};
if !is_owner && !is_recipient {
return Err(ApiError::forbidden("Not authorized to view this share"));
}
Ok(Json(share.into()))
}
/// Update a share
/// PATCH /api/shares/{id}
pub async fn update_share(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
Json(req): Json<UpdateShareRequest>,
) -> ApiResult<Json<ShareResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let mut share = state
.storage
.get_share(ShareId(id))
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Only owner can update
if share.owner_id != user_id {
return Err(ApiError::forbidden("Only the owner can update this share"));
}
// Update fields
if let Some(perms) = req.permissions {
share.permissions = SharePermissions {
can_view: perms.can_view.unwrap_or(share.permissions.can_view),
can_download: perms.can_download.unwrap_or(share.permissions.can_download),
can_edit: perms.can_edit.unwrap_or(share.permissions.can_edit),
can_delete: perms.can_delete.unwrap_or(share.permissions.can_delete),
can_reshare: perms.can_reshare.unwrap_or(share.permissions.can_reshare),
can_add: perms.can_add.unwrap_or(share.permissions.can_add),
};
}
if let Some(note) = req.note {
share.note = Some(note);
}
if let Some(expires_at) = req.expires_at {
share.expires_at = Some(expires_at);
}
if let Some(inherit) = req.inherit_to_children {
share.inherit_to_children = inherit;
}
share.updated_at = Utc::now();
let updated = state
.storage
.update_share(&share)
.await
.map_err(|e| ApiError::internal(format!("Failed to update share: {}", e)))?;
// Notify recipient of update
if let ShareRecipient::User { user_id } = &updated.recipient {
let notification = ShareNotification {
id: Uuid::now_v7(),
user_id: *user_id,
share_id: updated.id,
notification_type: ShareNotificationType::ShareUpdated,
is_read: false,
created_at: Utc::now(),
};
let _ = state.storage.create_share_notification(&notification).await;
}
Ok(Json(updated.into()))
}
/// Delete (revoke) a share
/// DELETE /api/shares/{id}
pub async fn delete_share(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let share = state
.storage
.get_share(ShareId(id))
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Only owner can delete
if share.owner_id != user_id {
return Err(ApiError::forbidden("Only the owner can revoke this share"));
}
// Notify recipient before deletion
if let ShareRecipient::User { user_id } = &share.recipient {
let notification = ShareNotification {
id: Uuid::now_v7(),
user_id: *user_id,
share_id: share.id,
notification_type: ShareNotificationType::ShareRevoked,
is_read: false,
created_at: Utc::now(),
};
let _ = state.storage.create_share_notification(&notification).await;
}
state
.storage
.delete_share(ShareId(id))
.await
.map_err(|e| ApiError::internal(format!("Failed to delete share: {}", e)))?;
Ok(StatusCode::NO_CONTENT)
}
/// Batch delete shares
/// POST /api/shares/batch/delete
pub async fn batch_delete(
State(state): State<AppState>,
Extension(username): Extension<String>,
Json(req): Json<BatchDeleteSharesRequest>,
) -> ApiResult<Json<serde_json::Value>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let share_ids: Vec<ShareId> = req.share_ids.into_iter().map(ShareId).collect();
// Verify ownership of all shares
for share_id in &share_ids {
let share = state
.storage
.get_share(*share_id)
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
if share.owner_id != user_id {
return Err(ApiError::forbidden(format!(
"Not authorized to delete share {}",
share_id.0
)));
}
}
let deleted = state
.storage
.batch_delete_shares(&share_ids)
.await
.map_err(|e| ApiError::internal(format!("Failed to batch delete: {}", e)))?;
Ok(Json(serde_json::json!({ "deleted": deleted })))
}
/// Access a public shared resource
/// GET /api/shared/{token}
pub async fn access_shared(
State(state): State<AppState>,
Path(token): Path<String>,
Query(params): Query<AccessSharedRequest>,
ConnectInfo(addr): ConnectInfo<SocketAddr>,
) -> ApiResult<Json<MediaResponse>> {
let share = state
.storage
.get_share_by_token(&token)
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Check expiration
if let Some(expires_at) = share.expires_at {
if Utc::now() > expires_at {
return Err(ApiError::not_found("Share has expired"));
}
}
// Check password if required
if let ShareRecipient::PublicLink { password_hash, .. } = &share.recipient {
if let Some(hash) = password_hash {
let provided_password = params
.password
.as_ref()
.ok_or_else(|| ApiError::unauthorized("Password required"))?;
if !verify_share_password(provided_password, hash) {
// Log failed attempt
let activity = ShareActivity {
id: Uuid::now_v7(),
share_id: share.id,
actor_id: None,
actor_ip: Some(addr.ip().to_string()),
action: ShareActivityAction::PasswordFailed,
details: None,
timestamp: Utc::now(),
};
let _ = state.storage.record_share_activity(&activity).await;
return Err(ApiError::unauthorized("Invalid password"));
}
}
}
// Record access
state
.storage
.record_share_access(share.id)
.await
.map_err(|e| ApiError::internal(format!("Failed to record access: {}", e)))?;
// Log the access
let activity = ShareActivity {
id: Uuid::now_v7(),
share_id: share.id,
actor_id: None,
actor_ip: Some(addr.ip().to_string()),
action: ShareActivityAction::Accessed,
details: None,
timestamp: Utc::now(),
};
let _ = state.storage.record_share_activity(&activity).await;
// Return the shared content
match &share.target {
ShareTarget::Media { media_id } => {
let item = state
.storage
.get_media(*media_id)
.await
.map_err(|e| ApiError::not_found(format!("Media not found: {}", e)))?;
Ok(Json(item.into()))
}
_ => {
// For collections/tags, return a placeholder
// Full implementation would return the collection contents
Err(ApiError::bad_request(
"Collection/tag sharing not yet fully implemented",
))
}
}
}
/// Get share activity log
/// GET /api/shares/{id}/activity
pub async fn get_activity(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareActivityResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let share = state
.storage
.get_share(ShareId(id))
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Only owner can view activity
if share.owner_id != user_id {
return Err(ApiError::forbidden(
"Only the owner can view share activity",
));
}
let pagination = Pagination {
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50),
sort: params.sort,
};
let activity = state
.storage
.get_share_activity(ShareId(id), &pagination)
.await
.map_err(|e| ApiError::internal(format!("Failed to get activity: {}", e)))?;
Ok(Json(activity.into_iter().map(Into::into).collect()))
}
/// Get unread share notifications
/// GET /api/notifications/shares
pub async fn get_notifications(
State(state): State<AppState>,
Extension(username): Extension<String>,
) -> ApiResult<Json<Vec<ShareNotificationResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let notifications = state
.storage
.get_unread_notifications(user_id)
.await
.map_err(|e| ApiError::internal(format!("Failed to get notifications: {}", e)))?;
Ok(Json(notifications.into_iter().map(Into::into).collect()))
}
/// Mark a notification as read
/// POST /api/notifications/shares/{id}/read
pub async fn mark_notification_read(
State(state): State<AppState>,
Extension(_username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
state
.storage
.mark_notification_read(id)
.await
.map_err(|e| ApiError::internal(format!("Failed to mark as read: {}", e)))?;
Ok(StatusCode::OK)
}
/// Mark all notifications as read
/// POST /api/notifications/shares/read-all
pub async fn mark_all_read(
State(state): State<AppState>,
Extension(username): Extension<String>,
) -> ApiResult<StatusCode> {
let user_id = resolve_user_id(&state.storage, &username).await?;
state
.storage
.mark_all_notifications_read(user_id)
.await
.map_err(|e| ApiError::internal(format!("Failed to mark all as read: {}", e)))?;
Ok(StatusCode::OK)
}

View file

@ -0,0 +1,743 @@
use axum::{
Json,
body::Body,
extract::{Extension, Path, Query, State},
http::{HeaderMap, StatusCode, header},
response::IntoResponse,
};
use chrono::Utc;
use tokio_util::io::ReaderStream;
use uuid::Uuid;
use crate::auth::resolve_user_id;
use crate::dto::{
AcknowledgeChangesRequest, ChangesResponse, ChunkUploadedResponse, ConflictResponse,
CreateUploadSessionRequest, DeviceRegistrationResponse, DeviceResponse, GetChangesParams,
RegisterDeviceRequest, ReportChangesRequest, ReportChangesResponse, ResolveConflictRequest,
SyncChangeResponse, UpdateDeviceRequest, UploadSessionResponse,
};
use crate::error::{ApiError, ApiResult};
use crate::state::AppState;
use pinakes_core::config::ConflictResolution;
use pinakes_core::model::ContentHash;
use pinakes_core::sync::{
ChunkInfo, DeviceId, DeviceType, SyncChangeType, SyncConflict, SyncDevice, SyncLogEntry,
UploadSession, UploadStatus, generate_device_token, hash_device_token, update_device_cursor,
};
use std::path::Path as FilePath;
const DEFAULT_CHUNK_SIZE: u64 = 4 * 1024 * 1024; // 4MB
const DEFAULT_CHANGES_LIMIT: u64 = 100;
/// Register a new sync device
/// POST /api/sync/devices
pub async fn register_device(
State(state): State<AppState>,
Extension(username): Extension<String>,
Json(req): Json<RegisterDeviceRequest>,
) -> ApiResult<Json<DeviceRegistrationResponse>> {
let config = state.config.read().await;
if !config.sync.enabled {
return Err(ApiError::bad_request("Sync is not enabled"));
}
drop(config);
let user_id = resolve_user_id(&state.storage, &username).await?;
let device_type = req
.device_type
.parse::<DeviceType>()
.map_err(|_| ApiError::bad_request("Invalid device type"))?;
// Generate device token
let device_token = generate_device_token();
let token_hash = hash_device_token(&device_token);
let now = Utc::now();
let device = SyncDevice {
id: DeviceId(Uuid::now_v7()),
user_id,
name: req.name,
device_type,
client_version: req.client_version,
os_info: req.os_info,
last_sync_at: None,
last_seen_at: now,
sync_cursor: Some(0),
enabled: true,
created_at: now,
updated_at: now,
};
let registered = state
.storage
.register_device(&device, &token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to register device: {}", e)))?;
Ok(Json(DeviceRegistrationResponse {
device: registered.into(),
device_token,
}))
}
/// List user's sync devices
/// GET /api/sync/devices
pub async fn list_devices(
State(state): State<AppState>,
Extension(username): Extension<String>,
) -> ApiResult<Json<Vec<DeviceResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let devices = state
.storage
.list_user_devices(user_id)
.await
.map_err(|e| ApiError::internal(format!("Failed to list devices: {}", e)))?;
Ok(Json(devices.into_iter().map(Into::into).collect()))
}
/// Get device details
/// GET /api/sync/devices/{id}
pub async fn get_device(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<Json<DeviceResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let device = state
.storage
.get_device(DeviceId(id))
.await
.map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?;
// Verify ownership
if device.user_id != user_id {
return Err(ApiError::forbidden("Not authorized to access this device"));
}
Ok(Json(device.into()))
}
/// Update a device
/// PUT /api/sync/devices/{id}
pub async fn update_device(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
Json(req): Json<UpdateDeviceRequest>,
) -> ApiResult<Json<DeviceResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let mut device = state
.storage
.get_device(DeviceId(id))
.await
.map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?;
// Verify ownership
if device.user_id != user_id {
return Err(ApiError::forbidden("Not authorized to update this device"));
}
if let Some(name) = req.name {
device.name = name;
}
if let Some(enabled) = req.enabled {
device.enabled = enabled;
}
state
.storage
.update_device(&device)
.await
.map_err(|e| ApiError::internal(format!("Failed to update device: {}", e)))?;
Ok(Json(device.into()))
}
/// Delete a device
/// DELETE /api/sync/devices/{id}
pub async fn delete_device(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let device = state
.storage
.get_device(DeviceId(id))
.await
.map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?;
// Verify ownership
if device.user_id != user_id {
return Err(ApiError::forbidden("Not authorized to delete this device"));
}
state
.storage
.delete_device(DeviceId(id))
.await
.map_err(|e| ApiError::internal(format!("Failed to delete device: {}", e)))?;
Ok(StatusCode::NO_CONTENT)
}
/// Regenerate device token
/// POST /api/sync/devices/{id}/token
pub async fn regenerate_token(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<Json<DeviceRegistrationResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let device = state
.storage
.get_device(DeviceId(id))
.await
.map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?;
// Verify ownership
if device.user_id != user_id {
return Err(ApiError::forbidden(
"Not authorized to regenerate token for this device",
));
}
// Generate new token
let new_token = generate_device_token();
let token_hash = hash_device_token(&new_token);
// Re-register with new token (this updates the token hash)
let updated = state
.storage
.register_device(&device, &token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to regenerate token: {}", e)))?;
Ok(Json(DeviceRegistrationResponse {
device: updated.into(),
device_token: new_token,
}))
}
/// Get changes since cursor
/// GET /api/sync/changes
pub async fn get_changes(
State(state): State<AppState>,
Query(params): Query<GetChangesParams>,
) -> ApiResult<Json<ChangesResponse>> {
let config = state.config.read().await;
if !config.sync.enabled {
return Err(ApiError::bad_request("Sync is not enabled"));
}
drop(config);
let cursor = params.cursor.unwrap_or(0);
let limit = params.limit.unwrap_or(DEFAULT_CHANGES_LIMIT);
let changes = state
.storage
.get_changes_since(cursor, limit + 1)
.await
.map_err(|e| ApiError::internal(format!("Failed to get changes: {}", e)))?;
let has_more = changes.len() > limit as usize;
let changes: Vec<SyncChangeResponse> = changes
.into_iter()
.take(limit as usize)
.map(Into::into)
.collect();
let new_cursor = changes.last().map(|c| c.sequence).unwrap_or(cursor);
Ok(Json(ChangesResponse {
changes,
cursor: new_cursor,
has_more,
}))
}
/// Report local changes from client
/// POST /api/sync/report
pub async fn report_changes(
State(state): State<AppState>,
Extension(_username): Extension<String>,
Json(req): Json<ReportChangesRequest>,
) -> ApiResult<Json<ReportChangesResponse>> {
let config = state.config.read().await;
if !config.sync.enabled {
return Err(ApiError::bad_request("Sync is not enabled"));
}
let conflict_resolution = config.sync.default_conflict_resolution.clone();
drop(config);
let mut accepted = Vec::new();
let mut conflicts = Vec::new();
let mut upload_required = Vec::new();
for change in req.changes {
// Check for conflicts
if let Some(content_hash) = &change.content_hash {
let server_state = state
.storage
.get_media_by_path(FilePath::new(&change.path))
.await
.ok()
.flatten();
if let Some(server_item) = server_state {
let client_hash = ContentHash(content_hash.clone());
if server_item.content_hash != client_hash {
// Conflict detected
let conflict = SyncConflict {
id: Uuid::now_v7(),
device_id: DeviceId(Uuid::nil()), // Will be set by device context
path: change.path.clone(),
local_hash: content_hash.clone(),
local_mtime: change.local_mtime.unwrap_or(0),
server_hash: server_item.content_hash.to_string(),
server_mtime: server_item.updated_at.timestamp(),
detected_at: Utc::now(),
resolved_at: None,
resolution: None,
};
// Auto-resolve if configured
match conflict_resolution {
ConflictResolution::ServerWins => {
// Client should download server version
accepted.push(change.path);
}
ConflictResolution::ClientWins => {
// Client should upload
upload_required.push(change.path);
}
ConflictResolution::KeepBoth | ConflictResolution::Manual => {
conflicts.push(conflict.into());
}
}
continue;
}
}
}
// No conflict, check if upload is needed
match change.change_type.as_str() {
"created" | "modified" => {
if change.content_hash.is_some() {
upload_required.push(change.path);
} else {
accepted.push(change.path);
}
}
"deleted" => {
// Record deletion
let entry = SyncLogEntry {
id: Uuid::now_v7(),
sequence: 0, // Will be assigned by storage
change_type: SyncChangeType::Deleted,
media_id: None,
path: change.path.clone(),
content_hash: None,
file_size: None,
metadata_json: None,
changed_by_device: None,
timestamp: Utc::now(),
};
if state.storage.record_sync_change(&entry).await.is_ok() {
accepted.push(change.path);
}
}
_ => {
accepted.push(change.path);
}
}
}
Ok(Json(ReportChangesResponse {
accepted,
conflicts,
upload_required,
}))
}
/// Acknowledge processed changes
/// POST /api/sync/ack
pub async fn acknowledge_changes(
State(state): State<AppState>,
Extension(_username): Extension<String>,
headers: HeaderMap,
Json(req): Json<AcknowledgeChangesRequest>,
) -> ApiResult<StatusCode> {
// Get device from header or context
let device_token = headers
.get("X-Device-Token")
.and_then(|v| v.to_str().ok())
.ok_or_else(|| ApiError::bad_request("Missing X-Device-Token header"))?;
let token_hash = hash_device_token(device_token);
let device = state
.storage
.get_device_by_token(&token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to get device: {}", e)))?
.ok_or_else(|| ApiError::unauthorized("Invalid device token"))?;
// Update device cursor
update_device_cursor(&state.storage, device.id, req.cursor)
.await
.map_err(|e| ApiError::internal(format!("Failed to update cursor: {}", e)))?;
Ok(StatusCode::OK)
}
/// List unresolved conflicts
/// GET /api/sync/conflicts
pub async fn list_conflicts(
State(state): State<AppState>,
Extension(_username): Extension<String>,
headers: HeaderMap,
) -> ApiResult<Json<Vec<ConflictResponse>>> {
let device_token = headers
.get("X-Device-Token")
.and_then(|v| v.to_str().ok())
.ok_or_else(|| ApiError::bad_request("Missing X-Device-Token header"))?;
let token_hash = hash_device_token(device_token);
let device = state
.storage
.get_device_by_token(&token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to get device: {}", e)))?
.ok_or_else(|| ApiError::unauthorized("Invalid device token"))?;
let conflicts = state
.storage
.get_unresolved_conflicts(device.id)
.await
.map_err(|e| ApiError::internal(format!("Failed to get conflicts: {}", e)))?;
Ok(Json(conflicts.into_iter().map(Into::into).collect()))
}
/// Resolve a sync conflict
/// POST /api/sync/conflicts/{id}/resolve
pub async fn resolve_conflict(
State(state): State<AppState>,
Extension(_username): Extension<String>,
Path(id): Path<Uuid>,
Json(req): Json<ResolveConflictRequest>,
) -> ApiResult<StatusCode> {
let resolution = match req.resolution.as_str() {
"server_wins" => ConflictResolution::ServerWins,
"client_wins" => ConflictResolution::ClientWins,
"keep_both" => ConflictResolution::KeepBoth,
_ => return Err(ApiError::bad_request("Invalid resolution type")),
};
state
.storage
.resolve_conflict(id, resolution)
.await
.map_err(|e| ApiError::internal(format!("Failed to resolve conflict: {}", e)))?;
Ok(StatusCode::OK)
}
/// Create an upload session for chunked upload
/// POST /api/sync/upload
pub async fn create_upload(
State(state): State<AppState>,
Extension(_username): Extension<String>,
headers: HeaderMap,
Json(req): Json<CreateUploadSessionRequest>,
) -> ApiResult<Json<UploadSessionResponse>> {
let config = state.config.read().await;
if !config.sync.enabled {
return Err(ApiError::bad_request("Sync is not enabled"));
}
let upload_timeout_hours = config.sync.upload_timeout_hours;
drop(config);
let device_token = headers
.get("X-Device-Token")
.and_then(|v| v.to_str().ok())
.ok_or_else(|| ApiError::bad_request("Missing X-Device-Token header"))?;
let token_hash = hash_device_token(device_token);
let device = state
.storage
.get_device_by_token(&token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to get device: {}", e)))?
.ok_or_else(|| ApiError::unauthorized("Invalid device token"))?;
let chunk_size = req.chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
let chunk_count = (req.expected_size + chunk_size - 1) / chunk_size;
let now = Utc::now();
let session = UploadSession {
id: Uuid::now_v7(),
device_id: device.id,
target_path: req.target_path,
expected_hash: ContentHash(req.expected_hash),
expected_size: req.expected_size,
chunk_size,
chunk_count,
status: UploadStatus::Pending,
created_at: now,
expires_at: now + chrono::Duration::hours(upload_timeout_hours as i64),
last_activity: now,
};
state
.storage
.create_upload_session(&session)
.await
.map_err(|e| ApiError::internal(format!("Failed to create upload session: {}", e)))?;
Ok(Json(session.into()))
}
/// Upload a chunk
/// PUT /api/sync/upload/{id}/chunks/{index}
pub async fn upload_chunk(
State(state): State<AppState>,
Path((session_id, chunk_index)): Path<(Uuid, u64)>,
_headers: HeaderMap,
body: axum::body::Bytes,
) -> ApiResult<Json<ChunkUploadedResponse>> {
let session = state
.storage
.get_upload_session(session_id)
.await
.map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?;
if session.status == UploadStatus::Expired {
return Err(ApiError::bad_request("Upload session has expired"));
}
if chunk_index >= session.chunk_count {
return Err(ApiError::bad_request("Invalid chunk index"));
}
// Calculate chunk hash
let hash = blake3::hash(&body);
let chunk_hash = hash.to_hex().to_string();
let chunk = ChunkInfo {
upload_id: session_id,
chunk_index,
offset: chunk_index * session.chunk_size,
size: body.len() as u64,
hash: chunk_hash,
received_at: Utc::now(),
};
state
.storage
.record_chunk(session_id, &chunk)
.await
.map_err(|e| ApiError::internal(format!("Failed to record chunk: {}", e)))?;
// Store the chunk data (would integrate with managed storage)
// For now, this is a placeholder - actual implementation would write to temp storage
Ok(Json(ChunkUploadedResponse {
chunk_index,
received: true,
}))
}
/// Get upload session status
/// GET /api/sync/upload/{id}
pub async fn get_upload_status(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<Json<UploadSessionResponse>> {
let session = state
.storage
.get_upload_session(id)
.await
.map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?;
Ok(Json(session.into()))
}
/// Complete an upload session
/// POST /api/sync/upload/{id}/complete
pub async fn complete_upload(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let mut session = state
.storage
.get_upload_session(id)
.await
.map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?;
// Verify all chunks received
let chunks = state
.storage
.get_upload_chunks(id)
.await
.map_err(|e| ApiError::internal(format!("Failed to get chunks: {}", e)))?;
if chunks.len() != session.chunk_count as usize {
return Err(ApiError::bad_request(format!(
"Missing chunks: expected {}, got {}",
session.chunk_count,
chunks.len()
)));
}
// Mark session as completed
session.status = UploadStatus::Completed;
state
.storage
.update_upload_session(&session)
.await
.map_err(|e| ApiError::internal(format!("Failed to update session: {}", e)))?;
// Record the sync change
let entry = SyncLogEntry {
id: Uuid::now_v7(),
sequence: 0,
change_type: SyncChangeType::Created,
media_id: None,
path: session.target_path,
content_hash: Some(session.expected_hash),
file_size: Some(session.expected_size),
metadata_json: None,
changed_by_device: Some(session.device_id),
timestamp: Utc::now(),
};
state
.storage
.record_sync_change(&entry)
.await
.map_err(|e| ApiError::internal(format!("Failed to record change: {}", e)))?;
Ok(StatusCode::OK)
}
/// Cancel an upload session
/// DELETE /api/sync/upload/{id}
pub async fn cancel_upload(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let mut session = state
.storage
.get_upload_session(id)
.await
.map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?;
session.status = UploadStatus::Cancelled;
state
.storage
.update_upload_session(&session)
.await
.map_err(|e| ApiError::internal(format!("Failed to cancel session: {}", e)))?;
Ok(StatusCode::NO_CONTENT)
}
/// Download a file for sync (supports Range header)
/// GET /api/sync/download/{*path}
pub async fn download_file(
State(state): State<AppState>,
Path(path): Path<String>,
headers: HeaderMap,
) -> ApiResult<impl IntoResponse> {
let item = state
.storage
.get_media_by_path(FilePath::new(&path))
.await
.map_err(|e| ApiError::internal(format!("Failed to get media: {}", e)))?
.ok_or_else(|| ApiError::not_found("File not found"))?;
let file = tokio::fs::File::open(&item.path)
.await
.map_err(|e| ApiError::not_found(format!("File not found: {}", e)))?;
let metadata = file
.metadata()
.await
.map_err(|e| ApiError::internal(format!("Failed to get metadata: {}", e)))?;
let file_size = metadata.len();
// Check for Range header
if let Some(range_header) = headers.get(header::RANGE) {
if let Ok(range_str) = range_header.to_str() {
if let Some(range) = parse_range_header(range_str, file_size) {
// Partial content response
let (start, end) = range;
let length = end - start + 1;
let file = tokio::fs::File::open(&item.path)
.await
.map_err(|e| ApiError::internal(format!("Failed to reopen file: {}", e)))?;
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
return Ok((
StatusCode::PARTIAL_CONTENT,
[
(header::CONTENT_TYPE, item.media_type.mime_type()),
(header::CONTENT_LENGTH, length.to_string()),
(
header::CONTENT_RANGE,
format!("bytes {}-{}/{}", start, end, file_size),
),
(header::ACCEPT_RANGES, "bytes".to_string()),
],
body,
)
.into_response());
}
}
}
// Full content response
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, item.media_type.mime_type()),
(header::CONTENT_LENGTH, file_size.to_string()),
(header::ACCEPT_RANGES, "bytes".to_string()),
],
body,
)
.into_response())
}
/// Parse HTTP Range header
fn parse_range_header(range: &str, file_size: u64) -> Option<(u64, u64)> {
let range = range.strip_prefix("bytes=")?;
let parts: Vec<&str> = range.split('-').collect();
if parts.len() != 2 {
return None;
}
let start: u64 = parts[0].parse().ok()?;
let end: u64 = if parts[1].is_empty() {
file_size - 1
} else {
parts[1].parse().ok()?
};
if start > end || end >= file_size {
return None;
}
Some((start, end))
}

View file

@ -0,0 +1,169 @@
use axum::{
Json,
extract::{Multipart, Path, State},
http::{StatusCode, header},
response::IntoResponse,
};
use tokio_util::io::ReaderStream;
use uuid::Uuid;
use crate::dto::{ManagedStorageStatsResponse, UploadResponse};
use crate::error::{ApiError, ApiResult};
use crate::state::AppState;
use pinakes_core::model::MediaId;
use pinakes_core::upload;
/// Upload a file to managed storage
/// POST /api/upload
pub async fn upload_file(
State(state): State<AppState>,
mut multipart: Multipart,
) -> ApiResult<Json<UploadResponse>> {
let managed_storage = state
.managed_storage
.as_ref()
.ok_or_else(|| ApiError::bad_request("Managed storage is not enabled"))?;
let config = state.config.read().await;
if !config.managed_storage.enabled {
return Err(ApiError::bad_request("Managed storage is not enabled"));
}
drop(config);
// Extract file from multipart
let field = multipart
.next_field()
.await
.map_err(|e| ApiError::bad_request(format!("Failed to read multipart field: {}", e)))?
.ok_or_else(|| ApiError::bad_request("No file provided"))?;
let original_filename = field
.file_name()
.map(|s| s.to_string())
.unwrap_or_else(|| "unknown".to_string());
let content_type = field
.content_type()
.map(|s| s.to_string())
.unwrap_or_else(|| "application/octet-stream".to_string());
let data = field
.bytes()
.await
.map_err(|e| ApiError::bad_request(format!("Failed to read file data: {}", e)))?;
// Process the upload
let result = upload::process_upload_bytes(
&state.storage,
managed_storage.as_ref(),
&data,
&original_filename,
Some(&content_type),
)
.await
.map_err(|e| ApiError::internal(format!("Upload failed: {}", e)))?;
Ok(Json(result.into()))
}
/// Download a managed file
/// GET /api/media/{id}/download
pub async fn download_file(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<impl IntoResponse> {
let media_id = MediaId(id);
let item = state
.storage
.get_media(media_id)
.await
.map_err(|e| ApiError::not_found(format!("Media not found: {}", e)))?;
let managed_storage = state
.managed_storage
.as_ref()
.ok_or_else(|| ApiError::bad_request("Managed storage is not enabled"))?;
// Check if this is a managed file
if item.storage_mode != pinakes_core::model::StorageMode::Managed {
// For external files, stream from their original path
let file = tokio::fs::File::open(&item.path)
.await
.map_err(|e| ApiError::not_found(format!("File not found: {}", e)))?;
let stream = ReaderStream::new(file);
let body = axum::body::Body::from_stream(stream);
let content_type = item.media_type.mime_type();
let filename = item.original_filename.unwrap_or(item.file_name);
return Ok((
[
(header::CONTENT_TYPE, content_type),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
body,
));
}
// For managed files, stream from content-addressable storage
let file = managed_storage
.open(&item.content_hash)
.await
.map_err(|e| ApiError::not_found(format!("Blob not found: {}", e)))?;
let stream = ReaderStream::new(file);
let body = axum::body::Body::from_stream(stream);
let content_type = item.media_type.mime_type();
let filename = item.original_filename.unwrap_or(item.file_name);
Ok((
[
(header::CONTENT_TYPE, content_type),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
body,
))
}
/// Migrate an external file to managed storage
/// POST /api/media/{id}/move-to-managed
pub async fn move_to_managed(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let managed_storage = state
.managed_storage
.as_ref()
.ok_or_else(|| ApiError::bad_request("Managed storage is not enabled"))?;
let media_id = MediaId(id);
upload::migrate_to_managed(&state.storage, managed_storage.as_ref(), media_id)
.await
.map_err(|e| ApiError::internal(format!("Migration failed: {}", e)))?;
Ok(StatusCode::NO_CONTENT)
}
/// Get managed storage statistics
/// GET /api/managed/stats
pub async fn managed_stats(
State(state): State<AppState>,
) -> ApiResult<Json<ManagedStorageStatsResponse>> {
let stats = state
.storage
.managed_storage_stats()
.await
.map_err(|e| ApiError::internal(format!("Failed to get stats: {}", e)))?;
Ok(Json(stats.into()))
}

View file

@ -6,6 +6,7 @@ use tokio::sync::RwLock;
use pinakes_core::cache::CacheLayer;
use pinakes_core::config::Config;
use pinakes_core::jobs::JobQueue;
use pinakes_core::managed_storage::ManagedStorageService;
use pinakes_core::plugin::PluginManager;
use pinakes_core::scan::ScanProgress;
use pinakes_core::scheduler::TaskScheduler;
@ -26,4 +27,5 @@ pub struct AppState {
pub scheduler: Arc<TaskScheduler>,
pub plugin_manager: Option<Arc<PluginManager>>,
pub transcode_service: Option<Arc<TranscodeService>>,
pub managed_storage: Option<Arc<ManagedStorageService>>,
}

View file

@ -11,9 +11,9 @@ use tower::ServiceExt;
use pinakes_core::cache::CacheLayer;
use pinakes_core::config::{
AccountsConfig, AnalyticsConfig, CloudConfig, Config, DirectoryConfig, EnrichmentConfig,
JobsConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig, SqliteConfig,
StorageBackendType, StorageConfig, ThumbnailConfig, TlsConfig, TranscodingConfig, UiConfig,
UserAccount, UserRole, WebhookConfig,
JobsConfig, ManagedStorageConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig,
SharingConfig, SqliteConfig, StorageBackendType, StorageConfig, SyncConfig, ThumbnailConfig,
TlsConfig, TranscodingConfig, UiConfig, UserAccount, UserRole, WebhookConfig,
};
use pinakes_core::jobs::JobQueue;
use pinakes_core::storage::StorageBackend;
@ -127,6 +127,9 @@ fn default_config() -> Config {
cloud: CloudConfig::default(),
analytics: AnalyticsConfig::default(),
photos: PhotoConfig::default(),
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
}
}
@ -156,6 +159,7 @@ async fn setup_app() -> axum::Router {
scheduler: Arc::new(scheduler),
plugin_manager: None,
transcode_service: None,
managed_storage: None,
};
pinakes_server::app::create_router(state)
@ -227,6 +231,7 @@ async fn setup_app_with_auth() -> (axum::Router, String, String, String) {
scheduler: Arc::new(scheduler),
plugin_manager: None,
transcode_service: None,
managed_storage: None,
};
let app = pinakes_server::app::create_router(state);

View file

@ -11,9 +11,9 @@ use tower::ServiceExt;
use pinakes_core::cache::CacheLayer;
use pinakes_core::config::{
AccountsConfig, AnalyticsConfig, CloudConfig, Config, DirectoryConfig, EnrichmentConfig,
JobsConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig, SqliteConfig,
StorageBackendType, StorageConfig, ThumbnailConfig, TlsConfig, TranscodingConfig, UiConfig,
WebhookConfig,
JobsConfig, ManagedStorageConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig,
SharingConfig, SqliteConfig, StorageBackendType, StorageConfig, SyncConfig, ThumbnailConfig,
TlsConfig, TranscodingConfig, UiConfig, WebhookConfig,
};
use pinakes_core::jobs::JobQueue;
use pinakes_core::plugin::PluginManager;
@ -93,6 +93,9 @@ async fn setup_app_with_plugins() -> (axum::Router, Arc<PluginManager>, tempfile
cloud: CloudConfig::default(),
analytics: AnalyticsConfig::default(),
photos: PhotoConfig::default(),
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
};
let job_queue = JobQueue::new(1, |_id, _kind, _cancel, _jobs| tokio::spawn(async {}));
@ -114,6 +117,7 @@ async fn setup_app_with_plugins() -> (axum::Router, Arc<PluginManager>, tempfile
scheduler: Arc::new(scheduler),
plugin_manager: Some(plugin_manager.clone()),
transcode_service: None,
managed_storage: None,
};
let router = pinakes_server::app::create_router(state);