treewide: better cross-device sync capabilities; in-database storage

Signed-off-by: NotAShelf <raf@notashelf.dev>
Change-Id: Id99798df6f7e4470caae8a193c2654aa6a6a6964
This commit is contained in:
raf 2026-02-05 08:28:50 +03:00
commit f34c78b238
Signed by: NotAShelf
GPG key ID: 29D95B64378DB4BF
41 changed files with 8806 additions and 138 deletions

25
Cargo.lock generated
View file

@ -273,6 +273,7 @@ dependencies = [
"matchit",
"memchr",
"mime",
"multer",
"percent-encoding",
"pin-project-lite",
"serde_core",
@ -4057,6 +4058,23 @@ dependencies = [
"windows-sys 0.60.2",
]
[[package]]
name = "multer"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b"
dependencies = [
"bytes",
"encoding_rs",
"futures-util",
"http",
"httparse",
"memchr",
"mime",
"spin",
"version_check",
]
[[package]]
name = "mutate_once"
version = "0.1.2"
@ -4899,6 +4917,7 @@ dependencies = [
"argon2",
"axum",
"axum-server",
"blake3",
"chrono",
"clap",
"governor",
@ -6401,6 +6420,12 @@ dependencies = [
"smallvec",
]
[[package]]
name = "spin"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
[[package]]
name = "spinning_top"
version = "0.3.0"

View file

@ -76,7 +76,7 @@ notify = { version = "8.2.0", features = ["macos_fsevent"] }
winnow = "0.7.14"
# HTTP server
axum = { version = "0.8.8", features = ["macros"] }
axum = { version = "0.8.8", features = ["macros", "multipart"] }
tower = "0.5.3"
tower-http = { version = "0.6.8", features = ["cors", "trace", "set-header"] }
governor = "0.8.1"

View file

@ -104,6 +104,12 @@ pub struct Config {
pub analytics: AnalyticsConfig,
#[serde(default)]
pub photos: PhotoConfig,
#[serde(default)]
pub managed_storage: ManagedStorageConfig,
#[serde(default)]
pub sync: SyncConfig,
#[serde(default)]
pub sharing: SharingConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -560,6 +566,180 @@ impl Default for PhotoConfig {
}
}
// ===== Managed Storage Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManagedStorageConfig {
/// Enable managed storage for file uploads
#[serde(default)]
pub enabled: bool,
/// Directory where managed files are stored
#[serde(default = "default_managed_storage_dir")]
pub storage_dir: PathBuf,
/// Maximum upload size in bytes (default: 10GB)
#[serde(default = "default_max_upload_size")]
pub max_upload_size: u64,
/// Allowed MIME types for uploads (empty = allow all)
#[serde(default)]
pub allowed_mime_types: Vec<String>,
/// Automatically clean up orphaned blobs
#[serde(default = "default_true")]
pub auto_cleanup: bool,
/// Verify file integrity on read
#[serde(default)]
pub verify_on_read: bool,
}
fn default_managed_storage_dir() -> PathBuf {
Config::default_data_dir().join("managed")
}
fn default_max_upload_size() -> u64 {
10 * 1024 * 1024 * 1024 // 10GB
}
impl Default for ManagedStorageConfig {
fn default() -> Self {
Self {
enabled: false,
storage_dir: default_managed_storage_dir(),
max_upload_size: default_max_upload_size(),
allowed_mime_types: vec![],
auto_cleanup: true,
verify_on_read: false,
}
}
}
// ===== Sync Configuration =====
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ConflictResolution {
ServerWins,
ClientWins,
KeepBoth,
Manual,
}
impl Default for ConflictResolution {
fn default() -> Self {
Self::KeepBoth
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncConfig {
/// Enable cross-device sync functionality
#[serde(default)]
pub enabled: bool,
/// Default conflict resolution strategy
#[serde(default)]
pub default_conflict_resolution: ConflictResolution,
/// Maximum file size for sync in MB
#[serde(default = "default_max_sync_file_size")]
pub max_file_size_mb: u64,
/// Chunk size for chunked uploads in KB
#[serde(default = "default_chunk_size")]
pub chunk_size_kb: u64,
/// Upload session timeout in hours
#[serde(default = "default_upload_timeout")]
pub upload_timeout_hours: u64,
/// Maximum concurrent uploads per device
#[serde(default = "default_max_concurrent_uploads")]
pub max_concurrent_uploads: usize,
/// Sync log retention in days
#[serde(default = "default_sync_log_retention")]
pub sync_log_retention_days: u64,
}
fn default_max_sync_file_size() -> u64 {
4096 // 4GB
}
fn default_chunk_size() -> u64 {
4096 // 4MB
}
fn default_upload_timeout() -> u64 {
24 // 24 hours
}
fn default_max_concurrent_uploads() -> usize {
3
}
fn default_sync_log_retention() -> u64 {
90 // 90 days
}
impl Default for SyncConfig {
fn default() -> Self {
Self {
enabled: false,
default_conflict_resolution: ConflictResolution::default(),
max_file_size_mb: default_max_sync_file_size(),
chunk_size_kb: default_chunk_size(),
upload_timeout_hours: default_upload_timeout(),
max_concurrent_uploads: default_max_concurrent_uploads(),
sync_log_retention_days: default_sync_log_retention(),
}
}
}
// ===== Sharing Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SharingConfig {
/// Enable sharing functionality
#[serde(default = "default_true")]
pub enabled: bool,
/// Allow creating public share links
#[serde(default = "default_true")]
pub allow_public_links: bool,
/// Require password for public share links
#[serde(default)]
pub require_public_link_password: bool,
/// Maximum expiry time for public links in hours (0 = unlimited)
#[serde(default)]
pub max_public_link_expiry_hours: u64,
/// Allow users to reshare content shared with them
#[serde(default = "default_true")]
pub allow_reshare: bool,
/// Enable share notifications
#[serde(default = "default_true")]
pub notifications_enabled: bool,
/// Notification retention in days
#[serde(default = "default_notification_retention")]
pub notification_retention_days: u64,
/// Share activity log retention in days
#[serde(default = "default_activity_retention")]
pub activity_retention_days: u64,
}
fn default_notification_retention() -> u64 {
30
}
fn default_activity_retention() -> u64 {
90
}
impl Default for SharingConfig {
fn default() -> Self {
Self {
enabled: true,
allow_public_links: true,
require_public_link_password: false,
max_public_link_expiry_hours: 0,
allow_reshare: true,
notifications_enabled: true,
notification_retention_days: default_notification_retention(),
activity_retention_days: default_activity_retention(),
}
}
}
// ===== Storage Configuration =====
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -929,6 +1109,9 @@ impl Default for Config {
cloud: CloudConfig::default(),
analytics: AnalyticsConfig::default(),
photos: PhotoConfig::default(),
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
}
}
}

View file

@ -57,6 +57,54 @@ pub enum PinakesError {
#[error("external API error: {0}")]
External(String),
// Managed Storage errors
#[error("managed storage not enabled")]
ManagedStorageDisabled,
#[error("upload too large: {0} bytes exceeds limit")]
UploadTooLarge(u64),
#[error("blob not found: {0}")]
BlobNotFound(String),
#[error("storage integrity error: {0}")]
StorageIntegrity(String),
// Sync errors
#[error("sync not enabled")]
SyncDisabled,
#[error("device not found: {0}")]
DeviceNotFound(String),
#[error("sync conflict: {0}")]
SyncConflict(String),
#[error("upload session expired: {0}")]
UploadSessionExpired(String),
#[error("upload session not found: {0}")]
UploadSessionNotFound(String),
#[error("chunk out of order: expected {expected}, got {actual}")]
ChunkOutOfOrder { expected: u64, actual: u64 },
// Sharing errors
#[error("share not found: {0}")]
ShareNotFound(String),
#[error("share expired: {0}")]
ShareExpired(String),
#[error("share password required")]
SharePasswordRequired,
#[error("share password invalid")]
SharePasswordInvalid,
#[error("insufficient share permissions")]
InsufficientSharePermissions,
}
impl From<rusqlite::Error> for PinakesError {

View file

@ -195,6 +195,12 @@ pub async fn import_file_with_options(
rating: extracted.rating,
perceptual_hash,
// Managed storage fields - external files use defaults
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: now,
updated_at: now,
};

View file

@ -12,6 +12,7 @@ pub mod hash;
pub mod import;
pub mod integrity;
pub mod jobs;
pub mod managed_storage;
pub mod media_type;
pub mod metadata;
pub mod model;
@ -22,10 +23,13 @@ pub mod plugin;
pub mod scan;
pub mod scheduler;
pub mod search;
pub mod sharing;
pub mod social;
pub mod storage;
pub mod subtitles;
pub mod sync;
pub mod tags;
pub mod thumbnail;
pub mod transcode;
pub mod upload;
pub mod users;

View file

@ -0,0 +1,396 @@
//! Content-addressable managed storage service.
//!
//! Provides server-side file storage with:
//! - BLAKE3 content hashing for deduplication
//! - Hierarchical storage layout: `<root>/<hash[0:2]>/<hash[2:4]>/<full_hash>`
//! - Integrity verification on read (optional)
use std::path::{Path, PathBuf};
use tokio::fs;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt, BufReader};
use tracing::{debug, info, warn};
use crate::error::{PinakesError, Result};
use crate::model::ContentHash;
/// Content-addressable storage service for managed files.
#[derive(Debug, Clone)]
pub struct ManagedStorageService {
root_dir: PathBuf,
max_upload_size: u64,
verify_on_read: bool,
}
impl ManagedStorageService {
/// Create a new managed storage service.
pub fn new(root_dir: PathBuf, max_upload_size: u64, verify_on_read: bool) -> Self {
Self {
root_dir,
max_upload_size,
verify_on_read,
}
}
/// Initialize the storage directory structure.
pub async fn init(&self) -> Result<()> {
fs::create_dir_all(&self.root_dir).await?;
info!(path = %self.root_dir.display(), "initialized managed storage");
Ok(())
}
/// Get the storage path for a content hash.
///
/// Layout: `<root>/<hash[0:2]>/<hash[2:4]>/<full_hash>`
pub fn path(&self, hash: &ContentHash) -> PathBuf {
let h = &hash.0;
if h.len() >= 4 {
self.root_dir.join(&h[0..2]).join(&h[2..4]).join(h)
} else {
// Fallback for short hashes (shouldn't happen with BLAKE3)
self.root_dir.join(h)
}
}
/// Check if a blob exists in storage.
pub async fn exists(&self, hash: &ContentHash) -> bool {
self.path(hash).exists()
}
/// Store a file from an async reader, computing the hash as we go.
///
/// Returns the content hash and file size.
/// If the file already exists with the same hash, returns early (deduplication).
pub async fn store_stream<R: AsyncRead + Unpin>(
&self,
mut reader: R,
) -> Result<(ContentHash, u64)> {
// First, stream to a temp file while computing the hash
let temp_dir = self.root_dir.join("temp");
fs::create_dir_all(&temp_dir).await?;
let temp_id = uuid::Uuid::now_v7();
let temp_path = temp_dir.join(temp_id.to_string());
let mut hasher = blake3::Hasher::new();
let mut temp_file = fs::File::create(&temp_path).await?;
let mut total_size = 0u64;
let mut buf = vec![0u8; 64 * 1024]; // 64KB buffer
loop {
let n = reader.read(&mut buf).await?;
if n == 0 {
break;
}
total_size += n as u64;
if total_size > self.max_upload_size {
// Clean up temp file
drop(temp_file);
let _ = fs::remove_file(&temp_path).await;
return Err(PinakesError::UploadTooLarge(total_size));
}
hasher.update(&buf[..n]);
temp_file.write_all(&buf[..n]).await?;
}
temp_file.flush().await?;
temp_file.sync_all().await?;
drop(temp_file);
let hash = ContentHash::new(hasher.finalize().to_hex().to_string());
let final_path = self.path(&hash);
// Check if file already exists (deduplication)
if final_path.exists() {
// Verify size matches
let existing_meta = fs::metadata(&final_path).await?;
if existing_meta.len() == total_size {
debug!(hash = %hash, "blob already exists, deduplicating");
let _ = fs::remove_file(&temp_path).await;
return Ok((hash, total_size));
} else {
warn!(
hash = %hash,
expected = total_size,
actual = existing_meta.len(),
"size mismatch for existing blob, replacing"
);
}
}
// Move temp file to final location
if let Some(parent) = final_path.parent() {
fs::create_dir_all(parent).await?;
}
fs::rename(&temp_path, &final_path).await?;
info!(hash = %hash, size = total_size, "stored new blob");
Ok((hash, total_size))
}
/// Store a file from a path.
pub async fn store_file(&self, path: &Path) -> Result<(ContentHash, u64)> {
let file = fs::File::open(path).await?;
let reader = BufReader::new(file);
self.store_stream(reader).await
}
/// Store bytes directly.
pub async fn store_bytes(&self, data: &[u8]) -> Result<(ContentHash, u64)> {
use std::io::Cursor;
let cursor = Cursor::new(data);
self.store_stream(cursor).await
}
/// Open a blob for reading.
pub async fn open(&self, hash: &ContentHash) -> Result<fs::File> {
let path = self.path(hash);
if !path.exists() {
return Err(PinakesError::BlobNotFound(hash.0.clone()));
}
if self.verify_on_read {
self.verify(hash).await?;
}
fs::File::open(&path).await.map_err(|e| PinakesError::Io(e))
}
/// Read a blob entirely into memory.
pub async fn read(&self, hash: &ContentHash) -> Result<Vec<u8>> {
let path = self.path(hash);
if !path.exists() {
return Err(PinakesError::BlobNotFound(hash.0.clone()));
}
let data = fs::read(&path).await?;
if self.verify_on_read {
let computed = blake3::hash(&data);
if computed.to_hex().to_string() != hash.0 {
return Err(PinakesError::StorageIntegrity(format!(
"hash mismatch for blob {}",
hash
)));
}
}
Ok(data)
}
/// Verify the integrity of a stored blob.
pub async fn verify(&self, hash: &ContentHash) -> Result<bool> {
let path = self.path(hash);
if !path.exists() {
return Ok(false);
}
let file = fs::File::open(&path).await?;
let mut reader = BufReader::new(file);
let mut hasher = blake3::Hasher::new();
let mut buf = vec![0u8; 64 * 1024];
loop {
let n = reader.read(&mut buf).await?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
}
let computed = hasher.finalize().to_hex().to_string();
if computed != hash.0 {
warn!(
expected = %hash,
computed = %computed,
"blob integrity check failed"
);
return Err(PinakesError::StorageIntegrity(format!(
"hash mismatch: expected {}, computed {}",
hash, computed
)));
}
debug!(hash = %hash, "blob integrity verified");
Ok(true)
}
/// Delete a blob from storage.
pub async fn delete(&self, hash: &ContentHash) -> Result<()> {
let path = self.path(hash);
if path.exists() {
fs::remove_file(&path).await?;
info!(hash = %hash, "deleted blob");
// Try to remove empty parent directories
if let Some(parent) = path.parent() {
let _ = fs::remove_dir(parent).await;
if let Some(grandparent) = parent.parent() {
let _ = fs::remove_dir(grandparent).await;
}
}
}
Ok(())
}
/// Get the size of a stored blob.
pub async fn size(&self, hash: &ContentHash) -> Result<u64> {
let path = self.path(hash);
if !path.exists() {
return Err(PinakesError::BlobNotFound(hash.0.clone()));
}
let meta = fs::metadata(&path).await?;
Ok(meta.len())
}
/// List all blob hashes in storage.
pub async fn list_all(&self) -> Result<Vec<ContentHash>> {
let mut hashes = Vec::new();
let mut entries = fs::read_dir(&self.root_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.is_dir() && path.file_name().map(|n| n.len()) == Some(2) {
let mut sub_entries = fs::read_dir(&path).await?;
while let Some(sub_entry) = sub_entries.next_entry().await? {
let sub_path = sub_entry.path();
if sub_path.is_dir() && sub_path.file_name().map(|n| n.len()) == Some(2) {
let mut file_entries = fs::read_dir(&sub_path).await?;
while let Some(file_entry) = file_entries.next_entry().await? {
let file_path = file_entry.path();
if file_path.is_file() {
if let Some(name) = file_path.file_name() {
hashes
.push(ContentHash::new(name.to_string_lossy().to_string()));
}
}
}
}
}
}
}
Ok(hashes)
}
/// Calculate total storage used by all blobs.
pub async fn total_size(&self) -> Result<u64> {
let hashes = self.list_all().await?;
let mut total = 0u64;
for hash in hashes {
if let Ok(size) = self.size(&hash).await {
total += size;
}
}
Ok(total)
}
/// Clean up any orphaned temp files.
pub async fn cleanup_temp(&self) -> Result<u64> {
let temp_dir = self.root_dir.join("temp");
if !temp_dir.exists() {
return Ok(0);
}
let mut count = 0u64;
let mut entries = fs::read_dir(&temp_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.is_file() {
// Check if temp file is old (> 1 hour)
if let Ok(meta) = fs::metadata(&path).await {
if let Ok(modified) = meta.modified() {
let age = std::time::SystemTime::now()
.duration_since(modified)
.unwrap_or_default();
if age.as_secs() > 3600 {
let _ = fs::remove_file(&path).await;
count += 1;
}
}
}
}
}
if count > 0 {
info!(count, "cleaned up orphaned temp files");
}
Ok(count)
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::tempdir;
#[tokio::test]
async fn test_store_and_retrieve() {
let dir = tempdir().unwrap();
let service = ManagedStorageService::new(dir.path().to_path_buf(), 1024 * 1024, false);
service.init().await.unwrap();
let data = b"hello, world!";
let (hash, size) = service.store_bytes(data).await.unwrap();
assert_eq!(size, data.len() as u64);
assert!(service.exists(&hash).await);
let retrieved = service.read(&hash).await.unwrap();
assert_eq!(retrieved, data);
}
#[tokio::test]
async fn test_deduplication() {
let dir = tempdir().unwrap();
let service = ManagedStorageService::new(dir.path().to_path_buf(), 1024 * 1024, false);
service.init().await.unwrap();
let data = b"duplicate content";
let (hash1, _) = service.store_bytes(data).await.unwrap();
let (hash2, _) = service.store_bytes(data).await.unwrap();
assert_eq!(hash1.0, hash2.0);
assert_eq!(service.list_all().await.unwrap().len(), 1);
}
#[tokio::test]
async fn test_verify_integrity() {
let dir = tempdir().unwrap();
let service = ManagedStorageService::new(dir.path().to_path_buf(), 1024 * 1024, true);
service.init().await.unwrap();
let data = b"verify me";
let (hash, _) = service.store_bytes(data).await.unwrap();
assert!(service.verify(&hash).await.unwrap());
}
#[tokio::test]
async fn test_upload_too_large() {
let dir = tempdir().unwrap();
let service = ManagedStorageService::new(dir.path().to_path_buf(), 100, false);
service.init().await.unwrap();
let data = vec![0u8; 200];
let result = service.store_bytes(&data).await;
assert!(matches!(result, Err(PinakesError::UploadTooLarge(_))));
}
#[tokio::test]
async fn test_delete() {
let dir = tempdir().unwrap();
let service = ManagedStorageService::new(dir.path().to_path_buf(), 1024 * 1024, false);
service.init().await.unwrap();
let data = b"delete me";
let (hash, _) = service.store_bytes(data).await.unwrap();
assert!(service.exists(&hash).await);
service.delete(&hash).await.unwrap();
assert!(!service.exists(&hash).await);
}
}

View file

@ -44,6 +44,71 @@ impl fmt::Display for ContentHash {
}
}
// ===== Managed Storage Types =====
/// Storage mode for media items
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum StorageMode {
/// File exists on disk, referenced by path
#[default]
External,
/// File is stored in managed content-addressable storage
Managed,
}
impl fmt::Display for StorageMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::External => write!(f, "external"),
Self::Managed => write!(f, "managed"),
}
}
}
impl std::str::FromStr for StorageMode {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"external" => Ok(Self::External),
"managed" => Ok(Self::Managed),
_ => Err(format!("unknown storage mode: {}", s)),
}
}
}
/// A blob stored in managed storage (content-addressable)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ManagedBlob {
pub content_hash: ContentHash,
pub file_size: u64,
pub mime_type: String,
pub reference_count: u32,
pub stored_at: DateTime<Utc>,
pub last_verified: Option<DateTime<Utc>>,
}
/// Result of uploading a file to managed storage
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UploadResult {
pub media_id: MediaId,
pub content_hash: ContentHash,
pub was_duplicate: bool,
pub file_size: u64,
}
/// Statistics about managed storage
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ManagedStorageStats {
pub total_blobs: u64,
pub total_size_bytes: u64,
pub unique_size_bytes: u64,
pub deduplication_ratio: f64,
pub managed_media_count: u64,
pub orphaned_blobs: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MediaItem {
pub id: MediaId,
@ -73,6 +138,17 @@ pub struct MediaItem {
pub rating: Option<i32>,
pub perceptual_hash: Option<String>,
// Managed storage fields
/// How the file is stored (external on disk or managed in content-addressable storage)
#[serde(default)]
pub storage_mode: StorageMode,
/// Original filename for uploaded files (preserved separately from file_name)
pub original_filename: Option<String>,
/// When the file was uploaded to managed storage
pub uploaded_at: Option<DateTime<Utc>>,
/// Storage key for looking up the blob (usually same as content_hash)
pub storage_key: Option<String>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}

View file

@ -0,0 +1,434 @@
//! Enhanced sharing system.
//!
//! Provides comprehensive sharing capabilities:
//! - Public link sharing with optional password/expiry
//! - User-to-user sharing with granular permissions
//! - Collection/tag sharing with inheritance
//! - Activity logging and notifications
use std::fmt;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::model::MediaId;
use crate::users::UserId;
/// Unique identifier for a share.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct ShareId(pub Uuid);
impl ShareId {
pub fn new() -> Self {
Self(Uuid::now_v7())
}
}
impl Default for ShareId {
fn default() -> Self {
Self::new()
}
}
impl fmt::Display for ShareId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// What is being shared.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ShareTarget {
Media { media_id: MediaId },
Collection { collection_id: Uuid },
Tag { tag_id: Uuid },
SavedSearch { search_id: Uuid },
}
impl ShareTarget {
pub fn target_type(&self) -> &'static str {
match self {
Self::Media { .. } => "media",
Self::Collection { .. } => "collection",
Self::Tag { .. } => "tag",
Self::SavedSearch { .. } => "saved_search",
}
}
pub fn target_id(&self) -> Uuid {
match self {
Self::Media { media_id } => media_id.0,
Self::Collection { collection_id } => *collection_id,
Self::Tag { tag_id } => *tag_id,
Self::SavedSearch { search_id } => *search_id,
}
}
}
/// Who the share is with.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ShareRecipient {
/// Public link accessible to anyone with the token
PublicLink {
token: String,
password_hash: Option<String>,
},
/// Shared with a specific user
User { user_id: UserId },
/// Shared with a group
Group { group_id: Uuid },
/// Shared with a federated user on another server
Federated {
user_handle: String,
server_url: String,
},
}
impl ShareRecipient {
pub fn recipient_type(&self) -> &'static str {
match self {
Self::PublicLink { .. } => "public_link",
Self::User { .. } => "user",
Self::Group { .. } => "group",
Self::Federated { .. } => "federated",
}
}
}
/// Permissions granted by a share.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct SharePermissions {
/// Can view the content
pub can_view: bool,
/// Can download the content
pub can_download: bool,
/// Can edit the content/metadata
pub can_edit: bool,
/// Can delete the content
pub can_delete: bool,
/// Can reshare with others
pub can_reshare: bool,
/// Can add new items (for collections)
pub can_add: bool,
}
impl SharePermissions {
/// View-only permissions
pub fn view_only() -> Self {
Self {
can_view: true,
..Default::default()
}
}
/// Download permissions (includes view)
pub fn download() -> Self {
Self {
can_view: true,
can_download: true,
..Default::default()
}
}
/// Edit permissions (includes view and download)
pub fn edit() -> Self {
Self {
can_view: true,
can_download: true,
can_edit: true,
can_add: true,
..Default::default()
}
}
/// Full permissions
pub fn full() -> Self {
Self {
can_view: true,
can_download: true,
can_edit: true,
can_delete: true,
can_reshare: true,
can_add: true,
}
}
/// Merge permissions (takes the most permissive of each)
pub fn merge(&self, other: &Self) -> Self {
Self {
can_view: self.can_view || other.can_view,
can_download: self.can_download || other.can_download,
can_edit: self.can_edit || other.can_edit,
can_delete: self.can_delete || other.can_delete,
can_reshare: self.can_reshare || other.can_reshare,
can_add: self.can_add || other.can_add,
}
}
}
/// A share record.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Share {
pub id: ShareId,
pub target: ShareTarget,
pub owner_id: UserId,
pub recipient: ShareRecipient,
pub permissions: SharePermissions,
pub note: Option<String>,
pub expires_at: Option<DateTime<Utc>>,
pub access_count: u64,
pub last_accessed: Option<DateTime<Utc>>,
/// Whether children (media in collection, etc.) inherit this share
pub inherit_to_children: bool,
/// Parent share if this was created via reshare
pub parent_share_id: Option<ShareId>,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
impl Share {
/// Create a new public link share.
pub fn new_public_link(
owner_id: UserId,
target: ShareTarget,
token: String,
permissions: SharePermissions,
) -> Self {
let now = Utc::now();
Self {
id: ShareId::new(),
target,
owner_id,
recipient: ShareRecipient::PublicLink {
token,
password_hash: None,
},
permissions,
note: None,
expires_at: None,
access_count: 0,
last_accessed: None,
inherit_to_children: true,
parent_share_id: None,
created_at: now,
updated_at: now,
}
}
/// Create a new user share.
pub fn new_user_share(
owner_id: UserId,
target: ShareTarget,
recipient_user_id: UserId,
permissions: SharePermissions,
) -> Self {
let now = Utc::now();
Self {
id: ShareId::new(),
target,
owner_id,
recipient: ShareRecipient::User {
user_id: recipient_user_id,
},
permissions,
note: None,
expires_at: None,
access_count: 0,
last_accessed: None,
inherit_to_children: true,
parent_share_id: None,
created_at: now,
updated_at: now,
}
}
/// Check if the share has expired.
pub fn is_expired(&self) -> bool {
self.expires_at.map(|exp| exp < Utc::now()).unwrap_or(false)
}
/// Check if this is a public link share.
pub fn is_public(&self) -> bool {
matches!(self.recipient, ShareRecipient::PublicLink { .. })
}
/// Get the public token if this is a public link share.
pub fn public_token(&self) -> Option<&str> {
match &self.recipient {
ShareRecipient::PublicLink { token, .. } => Some(token),
_ => None,
}
}
}
/// Types of share activity actions.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ShareActivityAction {
Created,
Updated,
Accessed,
Downloaded,
Revoked,
Expired,
PasswordFailed,
}
impl fmt::Display for ShareActivityAction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Created => write!(f, "created"),
Self::Updated => write!(f, "updated"),
Self::Accessed => write!(f, "accessed"),
Self::Downloaded => write!(f, "downloaded"),
Self::Revoked => write!(f, "revoked"),
Self::Expired => write!(f, "expired"),
Self::PasswordFailed => write!(f, "password_failed"),
}
}
}
impl std::str::FromStr for ShareActivityAction {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"created" => Ok(Self::Created),
"updated" => Ok(Self::Updated),
"accessed" => Ok(Self::Accessed),
"downloaded" => Ok(Self::Downloaded),
"revoked" => Ok(Self::Revoked),
"expired" => Ok(Self::Expired),
"password_failed" => Ok(Self::PasswordFailed),
_ => Err(format!("unknown share activity action: {}", s)),
}
}
}
/// Activity log entry for a share.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ShareActivity {
pub id: Uuid,
pub share_id: ShareId,
pub actor_id: Option<UserId>,
pub actor_ip: Option<String>,
pub action: ShareActivityAction,
pub details: Option<String>,
pub timestamp: DateTime<Utc>,
}
impl ShareActivity {
pub fn new(share_id: ShareId, action: ShareActivityAction) -> Self {
Self {
id: Uuid::now_v7(),
share_id,
actor_id: None,
actor_ip: None,
action,
details: None,
timestamp: Utc::now(),
}
}
pub fn with_actor(mut self, actor_id: UserId) -> Self {
self.actor_id = Some(actor_id);
self
}
pub fn with_ip(mut self, ip: &str) -> Self {
self.actor_ip = Some(ip.to_string());
self
}
pub fn with_details(mut self, details: &str) -> Self {
self.details = Some(details.to_string());
self
}
}
/// Types of share notifications.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ShareNotificationType {
NewShare,
ShareUpdated,
ShareRevoked,
ShareExpiring,
ShareAccessed,
}
impl fmt::Display for ShareNotificationType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::NewShare => write!(f, "new_share"),
Self::ShareUpdated => write!(f, "share_updated"),
Self::ShareRevoked => write!(f, "share_revoked"),
Self::ShareExpiring => write!(f, "share_expiring"),
Self::ShareAccessed => write!(f, "share_accessed"),
}
}
}
impl std::str::FromStr for ShareNotificationType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"new_share" => Ok(Self::NewShare),
"share_updated" => Ok(Self::ShareUpdated),
"share_revoked" => Ok(Self::ShareRevoked),
"share_expiring" => Ok(Self::ShareExpiring),
"share_accessed" => Ok(Self::ShareAccessed),
_ => Err(format!("unknown share notification type: {}", s)),
}
}
}
/// A notification about a share.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ShareNotification {
pub id: Uuid,
pub user_id: UserId,
pub share_id: ShareId,
pub notification_type: ShareNotificationType,
pub is_read: bool,
pub created_at: DateTime<Utc>,
}
impl ShareNotification {
pub fn new(
user_id: UserId,
share_id: ShareId,
notification_type: ShareNotificationType,
) -> Self {
Self {
id: Uuid::now_v7(),
user_id,
share_id,
notification_type,
is_read: false,
created_at: Utc::now(),
}
}
}
/// Generate a random share token using UUID.
pub fn generate_share_token() -> String {
// Use UUIDv4 for random tokens - simple string representation
Uuid::new_v4().simple().to_string()
}
/// Hash a share password.
pub fn hash_share_password(password: &str) -> String {
// Use BLAKE3 for password hashing (in production, use Argon2)
blake3::hash(password.as_bytes()).to_hex().to_string()
}
/// Verify a share password.
pub fn verify_share_password(password: &str, hash: &str) -> bool {
let computed = hash_share_password(password);
computed == hash
}

View file

@ -511,6 +511,236 @@ pub trait StorageBackend: Send + Sync + 'static {
language: Option<&str>,
pagination: &Pagination,
) -> Result<Vec<MediaItem>>;
// ===== Managed Storage =====
/// Insert a media item that uses managed storage
async fn insert_managed_media(&self, item: &MediaItem) -> Result<()>;
/// Get or create a managed blob record (for deduplication tracking)
async fn get_or_create_blob(
&self,
hash: &ContentHash,
size: u64,
mime_type: &str,
) -> Result<ManagedBlob>;
/// Get a managed blob by its content hash
async fn get_blob(&self, hash: &ContentHash) -> Result<Option<ManagedBlob>>;
/// Increment the reference count for a blob
async fn increment_blob_ref(&self, hash: &ContentHash) -> Result<()>;
/// Decrement the reference count for a blob. Returns true if blob should be deleted.
async fn decrement_blob_ref(&self, hash: &ContentHash) -> Result<bool>;
/// Update the last_verified timestamp for a blob
async fn update_blob_verified(&self, hash: &ContentHash) -> Result<()>;
/// List orphaned blobs (reference_count = 0)
async fn list_orphaned_blobs(&self) -> Result<Vec<ManagedBlob>>;
/// Delete a blob record
async fn delete_blob(&self, hash: &ContentHash) -> Result<()>;
/// Get managed storage statistics
async fn managed_storage_stats(&self) -> Result<ManagedStorageStats>;
// ===== Sync Devices =====
/// Register a new sync device
async fn register_device(
&self,
device: &crate::sync::SyncDevice,
token_hash: &str,
) -> Result<crate::sync::SyncDevice>;
/// Get a sync device by ID
async fn get_device(&self, id: crate::sync::DeviceId) -> Result<crate::sync::SyncDevice>;
/// Get a sync device by its token hash
async fn get_device_by_token(
&self,
token_hash: &str,
) -> Result<Option<crate::sync::SyncDevice>>;
/// List all devices for a user
async fn list_user_devices(&self, user_id: UserId) -> Result<Vec<crate::sync::SyncDevice>>;
/// Update a sync device
async fn update_device(&self, device: &crate::sync::SyncDevice) -> Result<()>;
/// Delete a sync device
async fn delete_device(&self, id: crate::sync::DeviceId) -> Result<()>;
/// Update the last_seen_at timestamp for a device
async fn touch_device(&self, id: crate::sync::DeviceId) -> Result<()>;
// ===== Sync Log =====
/// Record a change in the sync log
async fn record_sync_change(&self, change: &crate::sync::SyncLogEntry) -> Result<()>;
/// Get changes since a cursor position
async fn get_changes_since(
&self,
cursor: i64,
limit: u64,
) -> Result<Vec<crate::sync::SyncLogEntry>>;
/// Get the current sync cursor (highest sequence number)
async fn get_current_sync_cursor(&self) -> Result<i64>;
/// Clean up old sync log entries
async fn cleanup_old_sync_log(&self, before: DateTime<Utc>) -> Result<u64>;
// ===== Device Sync State =====
/// Get sync state for a device and path
async fn get_device_sync_state(
&self,
device_id: crate::sync::DeviceId,
path: &str,
) -> Result<Option<crate::sync::DeviceSyncState>>;
/// Insert or update device sync state
async fn upsert_device_sync_state(&self, state: &crate::sync::DeviceSyncState) -> Result<()>;
/// List all pending sync items for a device
async fn list_pending_sync(
&self,
device_id: crate::sync::DeviceId,
) -> Result<Vec<crate::sync::DeviceSyncState>>;
// ===== Upload Sessions (Chunked Uploads) =====
/// Create a new upload session
async fn create_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()>;
/// Get an upload session by ID
async fn get_upload_session(&self, id: Uuid) -> Result<crate::sync::UploadSession>;
/// Update an upload session
async fn update_upload_session(&self, session: &crate::sync::UploadSession) -> Result<()>;
/// Record a received chunk
async fn record_chunk(&self, upload_id: Uuid, chunk: &crate::sync::ChunkInfo) -> Result<()>;
/// Get all chunks for an upload
async fn get_upload_chunks(&self, upload_id: Uuid) -> Result<Vec<crate::sync::ChunkInfo>>;
/// Clean up expired upload sessions
async fn cleanup_expired_uploads(&self) -> Result<u64>;
// ===== Sync Conflicts =====
/// Record a sync conflict
async fn record_conflict(&self, conflict: &crate::sync::SyncConflict) -> Result<()>;
/// Get unresolved conflicts for a device
async fn get_unresolved_conflicts(
&self,
device_id: crate::sync::DeviceId,
) -> Result<Vec<crate::sync::SyncConflict>>;
/// Resolve a conflict
async fn resolve_conflict(
&self,
id: Uuid,
resolution: crate::config::ConflictResolution,
) -> Result<()>;
// ===== Enhanced Sharing =====
/// Create a new share
async fn create_share(&self, share: &crate::sharing::Share) -> Result<crate::sharing::Share>;
/// Get a share by ID
async fn get_share(&self, id: crate::sharing::ShareId) -> Result<crate::sharing::Share>;
/// Get a share by its public token
async fn get_share_by_token(&self, token: &str) -> Result<crate::sharing::Share>;
/// List shares created by a user
async fn list_shares_by_owner(
&self,
owner_id: UserId,
pagination: &Pagination,
) -> Result<Vec<crate::sharing::Share>>;
/// List shares received by a user
async fn list_shares_for_user(
&self,
user_id: UserId,
pagination: &Pagination,
) -> Result<Vec<crate::sharing::Share>>;
/// List all shares for a specific target
async fn list_shares_for_target(
&self,
target: &crate::sharing::ShareTarget,
) -> Result<Vec<crate::sharing::Share>>;
/// Update a share
async fn update_share(&self, share: &crate::sharing::Share) -> Result<crate::sharing::Share>;
/// Delete a share
async fn delete_share(&self, id: crate::sharing::ShareId) -> Result<()>;
/// Record that a share was accessed
async fn record_share_access(&self, id: crate::sharing::ShareId) -> Result<()>;
/// Check share access for a user and target
async fn check_share_access(
&self,
user_id: Option<UserId>,
target: &crate::sharing::ShareTarget,
) -> Result<Option<crate::sharing::SharePermissions>>;
/// Get effective permissions for a media item (considering inheritance)
async fn get_effective_share_permissions(
&self,
user_id: Option<UserId>,
media_id: MediaId,
) -> Result<Option<crate::sharing::SharePermissions>>;
/// Batch delete shares
async fn batch_delete_shares(&self, ids: &[crate::sharing::ShareId]) -> Result<u64>;
/// Clean up expired shares
async fn cleanup_expired_shares(&self) -> Result<u64>;
// ===== Share Activity =====
/// Record share activity
async fn record_share_activity(&self, activity: &crate::sharing::ShareActivity) -> Result<()>;
/// Get activity for a share
async fn get_share_activity(
&self,
share_id: crate::sharing::ShareId,
pagination: &Pagination,
) -> Result<Vec<crate::sharing::ShareActivity>>;
// ===== Share Notifications =====
/// Create a share notification
async fn create_share_notification(
&self,
notification: &crate::sharing::ShareNotification,
) -> Result<()>;
/// Get unread notifications for a user
async fn get_unread_notifications(
&self,
user_id: UserId,
) -> Result<Vec<crate::sharing::ShareNotification>>;
/// Mark a notification as read
async fn mark_notification_read(&self, id: Uuid) -> Result<()>;
/// Mark all notifications as read for a user
async fn mark_all_notifications_read(&self, user_id: UserId) -> Result<()>;
}
/// Comprehensive library statistics.

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,297 @@
//! Chunked upload handling for large file sync.
use std::path::{Path, PathBuf};
use chrono::Utc;
use tokio::fs;
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
use tracing::{debug, info};
use uuid::Uuid;
use crate::error::{PinakesError, Result};
use super::{ChunkInfo, UploadSession};
/// Manager for chunked uploads.
#[derive(Debug, Clone)]
pub struct ChunkedUploadManager {
temp_dir: PathBuf,
}
impl ChunkedUploadManager {
/// Create a new chunked upload manager.
pub fn new(temp_dir: PathBuf) -> Self {
Self { temp_dir }
}
/// Initialize the temp directory.
pub async fn init(&self) -> Result<()> {
fs::create_dir_all(&self.temp_dir).await?;
Ok(())
}
/// Get the temp file path for an upload session.
pub fn temp_path(&self, session_id: Uuid) -> PathBuf {
self.temp_dir.join(format!("{}.upload", session_id))
}
/// Create the temp file for a new upload session.
pub async fn create_temp_file(&self, session: &UploadSession) -> Result<()> {
let path = self.temp_path(session.id);
// Create a sparse file of the expected size
let file = fs::File::create(&path).await?;
file.set_len(session.expected_size).await?;
debug!(
session_id = %session.id,
size = session.expected_size,
"created temp file for upload"
);
Ok(())
}
/// Write a chunk to the temp file.
pub async fn write_chunk(
&self,
session: &UploadSession,
chunk_index: u64,
data: &[u8],
) -> Result<ChunkInfo> {
let path = self.temp_path(session.id);
if !path.exists() {
return Err(PinakesError::UploadSessionNotFound(session.id.to_string()));
}
// Calculate offset
let offset = chunk_index * session.chunk_size;
// Validate chunk
if offset >= session.expected_size {
return Err(PinakesError::ChunkOutOfOrder {
expected: session.chunk_count - 1,
actual: chunk_index,
});
}
// Calculate expected chunk size
let expected_size = if chunk_index == session.chunk_count - 1 {
// Last chunk may be smaller
session.expected_size - offset
} else {
session.chunk_size
};
if data.len() as u64 != expected_size {
return Err(PinakesError::InvalidData(format!(
"chunk {} has wrong size: expected {}, got {}",
chunk_index,
expected_size,
data.len()
)));
}
// Write chunk to file at offset
let mut file = fs::OpenOptions::new().write(true).open(&path).await?;
file.seek(std::io::SeekFrom::Start(offset)).await?;
file.write_all(data).await?;
file.flush().await?;
// Compute chunk hash
let hash = blake3::hash(data).to_hex().to_string();
debug!(
session_id = %session.id,
chunk_index,
offset,
size = data.len(),
"wrote chunk"
);
Ok(ChunkInfo {
upload_id: session.id,
chunk_index,
offset,
size: data.len() as u64,
hash,
received_at: Utc::now(),
})
}
/// Verify and finalize the upload.
///
/// Checks that:
/// 1. All chunks are received
/// 2. File size matches expected
/// 3. Content hash matches expected
pub async fn finalize(
&self,
session: &UploadSession,
received_chunks: &[ChunkInfo],
) -> Result<PathBuf> {
let path = self.temp_path(session.id);
// Check all chunks received
if received_chunks.len() as u64 != session.chunk_count {
return Err(PinakesError::InvalidData(format!(
"missing chunks: expected {}, got {}",
session.chunk_count,
received_chunks.len()
)));
}
// Verify chunk indices
let mut indices: Vec<u64> = received_chunks.iter().map(|c| c.chunk_index).collect();
indices.sort();
for (i, idx) in indices.iter().enumerate() {
if *idx != i as u64 {
return Err(PinakesError::InvalidData(format!(
"chunk {} missing or out of order",
i
)));
}
}
// Verify file size
let metadata = fs::metadata(&path).await?;
if metadata.len() != session.expected_size {
return Err(PinakesError::InvalidData(format!(
"file size mismatch: expected {}, got {}",
session.expected_size,
metadata.len()
)));
}
// Verify content hash
let computed_hash = compute_file_hash(&path).await?;
if computed_hash != session.expected_hash.0 {
return Err(PinakesError::StorageIntegrity(format!(
"hash mismatch: expected {}, computed {}",
session.expected_hash, computed_hash
)));
}
info!(
session_id = %session.id,
hash = %session.expected_hash,
size = session.expected_size,
"finalized chunked upload"
);
Ok(path)
}
/// Cancel an upload and clean up temp file.
pub async fn cancel(&self, session_id: Uuid) -> Result<()> {
let path = self.temp_path(session_id);
if path.exists() {
fs::remove_file(&path).await?;
debug!(session_id = %session_id, "cancelled upload, removed temp file");
}
Ok(())
}
/// Clean up expired temp files.
pub async fn cleanup_expired(&self, max_age_hours: u64) -> Result<u64> {
let mut count = 0u64;
let max_age = std::time::Duration::from_secs(max_age_hours * 3600);
let mut entries = fs::read_dir(&self.temp_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension().map(|e| e == "upload").unwrap_or(false) {
if let Ok(metadata) = fs::metadata(&path).await {
if let Ok(modified) = metadata.modified() {
let age = std::time::SystemTime::now()
.duration_since(modified)
.unwrap_or_default();
if age > max_age {
let _ = fs::remove_file(&path).await;
count += 1;
}
}
}
}
}
if count > 0 {
info!(count, "cleaned up expired upload temp files");
}
Ok(count)
}
}
/// Compute the BLAKE3 hash of a file.
async fn compute_file_hash(path: &Path) -> Result<String> {
let mut file = fs::File::open(path).await?;
let mut hasher = blake3::Hasher::new();
let mut buf = vec![0u8; 64 * 1024];
loop {
let n = file.read(&mut buf).await?;
if n == 0 {
break;
}
hasher.update(&buf[..n]);
}
Ok(hasher.finalize().to_hex().to_string())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::model::ContentHash;
use crate::sync::UploadStatus;
use tempfile::tempdir;
#[tokio::test]
async fn test_chunked_upload() {
let dir = tempdir().unwrap();
let manager = ChunkedUploadManager::new(dir.path().to_path_buf());
manager.init().await.unwrap();
// Create test data
let data = b"Hello, World! This is test data for chunked upload.";
let hash = blake3::hash(data).to_hex().to_string();
let chunk_size = 20u64;
let session = UploadSession {
id: Uuid::now_v7(),
device_id: super::super::DeviceId::new(),
target_path: "/test/file.txt".to_string(),
expected_hash: ContentHash::new(hash.clone()),
expected_size: data.len() as u64,
chunk_size,
chunk_count: (data.len() as u64 + chunk_size - 1) / chunk_size,
status: UploadStatus::InProgress,
created_at: Utc::now(),
expires_at: Utc::now() + chrono::Duration::hours(24),
last_activity: Utc::now(),
};
manager.create_temp_file(&session).await.unwrap();
// Write chunks
let mut chunks = Vec::new();
for i in 0..session.chunk_count {
let start = (i * chunk_size) as usize;
let end = ((i + 1) * chunk_size).min(data.len() as u64) as usize;
let chunk_data = &data[start..end];
let chunk = manager.write_chunk(&session, i, chunk_data).await.unwrap();
chunks.push(chunk);
}
// Finalize
let final_path = manager.finalize(&session, &chunks).await.unwrap();
assert!(final_path.exists());
// Verify content
let content = fs::read(&final_path).await.unwrap();
assert_eq!(&content[..], data);
}
}

View file

@ -0,0 +1,144 @@
//! Conflict detection and resolution for sync.
use crate::config::ConflictResolution;
use super::DeviceSyncState;
/// Detect if there's a conflict between local and server state.
pub fn detect_conflict(state: &DeviceSyncState) -> Option<ConflictInfo> {
// If either side has no hash, no conflict possible
let local_hash = state.local_hash.as_ref()?;
let server_hash = state.server_hash.as_ref()?;
// Same hash = no conflict
if local_hash == server_hash {
return None;
}
// Both have different hashes = conflict
Some(ConflictInfo {
path: state.path.clone(),
local_hash: local_hash.clone(),
server_hash: server_hash.clone(),
local_mtime: state.local_mtime,
server_mtime: state.server_mtime,
})
}
/// Information about a detected conflict.
#[derive(Debug, Clone)]
pub struct ConflictInfo {
pub path: String,
pub local_hash: String,
pub server_hash: String,
pub local_mtime: Option<i64>,
pub server_mtime: Option<i64>,
}
/// Result of resolving a conflict.
#[derive(Debug, Clone)]
pub enum ConflictOutcome {
/// Use the server version
UseServer,
/// Use the local version (upload it)
UseLocal,
/// Keep both versions (rename one)
KeepBoth { new_local_path: String },
/// Requires manual intervention
Manual,
}
/// Resolve a conflict based on the configured strategy.
pub fn resolve_conflict(
conflict: &ConflictInfo,
resolution: ConflictResolution,
) -> ConflictOutcome {
match resolution {
ConflictResolution::ServerWins => ConflictOutcome::UseServer,
ConflictResolution::ClientWins => ConflictOutcome::UseLocal,
ConflictResolution::KeepBoth => {
let new_path = generate_conflict_path(&conflict.path, &conflict.local_hash);
ConflictOutcome::KeepBoth {
new_local_path: new_path,
}
}
ConflictResolution::Manual => ConflictOutcome::Manual,
}
}
/// Generate a new path for the conflicting local file.
/// Format: filename.conflict-<short_hash>.ext
fn generate_conflict_path(original_path: &str, local_hash: &str) -> String {
let short_hash = &local_hash[..8.min(local_hash.len())];
if let Some((base, ext)) = original_path.rsplit_once('.') {
format!("{}.conflict-{}.{}", base, short_hash, ext)
} else {
format!("{}.conflict-{}", original_path, short_hash)
}
}
/// Automatic conflict resolution based on modification times.
/// Useful when ConflictResolution is set to a time-based strategy.
pub fn resolve_by_mtime(conflict: &ConflictInfo) -> ConflictOutcome {
match (conflict.local_mtime, conflict.server_mtime) {
(Some(local), Some(server)) => {
if local > server {
ConflictOutcome::UseLocal
} else {
ConflictOutcome::UseServer
}
}
(Some(_), None) => ConflictOutcome::UseLocal,
(None, Some(_)) => ConflictOutcome::UseServer,
(None, None) => ConflictOutcome::UseServer, // Default to server
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::sync::FileSyncStatus;
#[test]
fn test_generate_conflict_path() {
assert_eq!(
generate_conflict_path("/path/to/file.txt", "abc12345"),
"/path/to/file.conflict-abc12345.txt"
);
assert_eq!(
generate_conflict_path("/path/to/file", "abc12345"),
"/path/to/file.conflict-abc12345"
);
}
#[test]
fn test_detect_conflict() {
let state_no_conflict = DeviceSyncState {
device_id: super::super::DeviceId::new(),
path: "/test".to_string(),
local_hash: Some("abc".to_string()),
server_hash: Some("abc".to_string()),
local_mtime: None,
server_mtime: None,
sync_status: FileSyncStatus::Synced,
last_synced_at: None,
conflict_info_json: None,
};
assert!(detect_conflict(&state_no_conflict).is_none());
let state_conflict = DeviceSyncState {
device_id: super::super::DeviceId::new(),
path: "/test".to_string(),
local_hash: Some("abc".to_string()),
server_hash: Some("def".to_string()),
local_mtime: None,
server_mtime: None,
sync_status: FileSyncStatus::Conflict,
last_synced_at: None,
conflict_info_json: None,
};
assert!(detect_conflict(&state_conflict).is_some());
}
}

View file

@ -0,0 +1,14 @@
//! Cross-device synchronization module.
//!
//! Provides device registration, change tracking, and conflict resolution
//! for syncing media libraries across multiple devices.
mod chunked;
mod conflict;
mod models;
mod protocol;
pub use chunked::*;
pub use conflict::*;
pub use models::*;
pub use protocol::*;

View file

@ -0,0 +1,380 @@
//! Sync domain models.
use std::fmt;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::config::ConflictResolution;
use crate::model::{ContentHash, MediaId};
use crate::users::UserId;
/// Unique identifier for a sync device.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct DeviceId(pub Uuid);
impl DeviceId {
pub fn new() -> Self {
Self(Uuid::now_v7())
}
}
impl Default for DeviceId {
fn default() -> Self {
Self::new()
}
}
impl fmt::Display for DeviceId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// Type of sync device.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum DeviceType {
Desktop,
Mobile,
Tablet,
Server,
Other,
}
impl Default for DeviceType {
fn default() -> Self {
Self::Other
}
}
impl fmt::Display for DeviceType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Desktop => write!(f, "desktop"),
Self::Mobile => write!(f, "mobile"),
Self::Tablet => write!(f, "tablet"),
Self::Server => write!(f, "server"),
Self::Other => write!(f, "other"),
}
}
}
impl std::str::FromStr for DeviceType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"desktop" => Ok(Self::Desktop),
"mobile" => Ok(Self::Mobile),
"tablet" => Ok(Self::Tablet),
"server" => Ok(Self::Server),
"other" => Ok(Self::Other),
_ => Err(format!("unknown device type: {}", s)),
}
}
}
/// A registered sync device.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncDevice {
pub id: DeviceId,
pub user_id: UserId,
pub name: String,
pub device_type: DeviceType,
pub client_version: String,
pub os_info: Option<String>,
pub last_sync_at: Option<DateTime<Utc>>,
pub last_seen_at: DateTime<Utc>,
pub sync_cursor: Option<i64>,
pub enabled: bool,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
impl SyncDevice {
pub fn new(
user_id: UserId,
name: String,
device_type: DeviceType,
client_version: String,
) -> Self {
let now = Utc::now();
Self {
id: DeviceId::new(),
user_id,
name,
device_type,
client_version,
os_info: None,
last_sync_at: None,
last_seen_at: now,
sync_cursor: None,
enabled: true,
created_at: now,
updated_at: now,
}
}
}
/// Type of change recorded in the sync log.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum SyncChangeType {
Created,
Modified,
Deleted,
Moved,
MetadataUpdated,
}
impl fmt::Display for SyncChangeType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Created => write!(f, "created"),
Self::Modified => write!(f, "modified"),
Self::Deleted => write!(f, "deleted"),
Self::Moved => write!(f, "moved"),
Self::MetadataUpdated => write!(f, "metadata_updated"),
}
}
}
impl std::str::FromStr for SyncChangeType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"created" => Ok(Self::Created),
"modified" => Ok(Self::Modified),
"deleted" => Ok(Self::Deleted),
"moved" => Ok(Self::Moved),
"metadata_updated" => Ok(Self::MetadataUpdated),
_ => Err(format!("unknown sync change type: {}", s)),
}
}
}
/// An entry in the sync log tracking a change.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncLogEntry {
pub id: Uuid,
pub sequence: i64,
pub change_type: SyncChangeType,
pub media_id: Option<MediaId>,
pub path: String,
pub content_hash: Option<ContentHash>,
pub file_size: Option<u64>,
pub metadata_json: Option<String>,
pub changed_by_device: Option<DeviceId>,
pub timestamp: DateTime<Utc>,
}
impl SyncLogEntry {
pub fn new(
change_type: SyncChangeType,
path: String,
media_id: Option<MediaId>,
content_hash: Option<ContentHash>,
) -> Self {
Self {
id: Uuid::now_v7(),
sequence: 0, // Will be assigned by database
change_type,
media_id,
path,
content_hash,
file_size: None,
metadata_json: None,
changed_by_device: None,
timestamp: Utc::now(),
}
}
}
/// Sync status for a file on a device.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum FileSyncStatus {
Synced,
PendingUpload,
PendingDownload,
Conflict,
Deleted,
}
impl fmt::Display for FileSyncStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Synced => write!(f, "synced"),
Self::PendingUpload => write!(f, "pending_upload"),
Self::PendingDownload => write!(f, "pending_download"),
Self::Conflict => write!(f, "conflict"),
Self::Deleted => write!(f, "deleted"),
}
}
}
impl std::str::FromStr for FileSyncStatus {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"synced" => Ok(Self::Synced),
"pending_upload" => Ok(Self::PendingUpload),
"pending_download" => Ok(Self::PendingDownload),
"conflict" => Ok(Self::Conflict),
"deleted" => Ok(Self::Deleted),
_ => Err(format!("unknown file sync status: {}", s)),
}
}
}
/// Sync state for a specific file on a specific device.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeviceSyncState {
pub device_id: DeviceId,
pub path: String,
pub local_hash: Option<String>,
pub server_hash: Option<String>,
pub local_mtime: Option<i64>,
pub server_mtime: Option<i64>,
pub sync_status: FileSyncStatus,
pub last_synced_at: Option<DateTime<Utc>>,
pub conflict_info_json: Option<String>,
}
/// A sync conflict that needs resolution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncConflict {
pub id: Uuid,
pub device_id: DeviceId,
pub path: String,
pub local_hash: String,
pub local_mtime: i64,
pub server_hash: String,
pub server_mtime: i64,
pub detected_at: DateTime<Utc>,
pub resolved_at: Option<DateTime<Utc>>,
pub resolution: Option<ConflictResolution>,
}
impl SyncConflict {
pub fn new(
device_id: DeviceId,
path: String,
local_hash: String,
local_mtime: i64,
server_hash: String,
server_mtime: i64,
) -> Self {
Self {
id: Uuid::now_v7(),
device_id,
path,
local_hash,
local_mtime,
server_hash,
server_mtime,
detected_at: Utc::now(),
resolved_at: None,
resolution: None,
}
}
}
/// Status of an upload session.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum UploadStatus {
Pending,
InProgress,
Completed,
Failed,
Expired,
Cancelled,
}
impl fmt::Display for UploadStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Pending => write!(f, "pending"),
Self::InProgress => write!(f, "in_progress"),
Self::Completed => write!(f, "completed"),
Self::Failed => write!(f, "failed"),
Self::Expired => write!(f, "expired"),
Self::Cancelled => write!(f, "cancelled"),
}
}
}
impl std::str::FromStr for UploadStatus {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"pending" => Ok(Self::Pending),
"in_progress" => Ok(Self::InProgress),
"completed" => Ok(Self::Completed),
"failed" => Ok(Self::Failed),
"expired" => Ok(Self::Expired),
"cancelled" => Ok(Self::Cancelled),
_ => Err(format!("unknown upload status: {}", s)),
}
}
}
/// A chunked upload session.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UploadSession {
pub id: Uuid,
pub device_id: DeviceId,
pub target_path: String,
pub expected_hash: ContentHash,
pub expected_size: u64,
pub chunk_size: u64,
pub chunk_count: u64,
pub status: UploadStatus,
pub created_at: DateTime<Utc>,
pub expires_at: DateTime<Utc>,
pub last_activity: DateTime<Utc>,
}
impl UploadSession {
pub fn new(
device_id: DeviceId,
target_path: String,
expected_hash: ContentHash,
expected_size: u64,
chunk_size: u64,
timeout_hours: u64,
) -> Self {
let now = Utc::now();
let chunk_count = (expected_size + chunk_size - 1) / chunk_size;
Self {
id: Uuid::now_v7(),
device_id,
target_path,
expected_hash,
expected_size,
chunk_size,
chunk_count,
status: UploadStatus::Pending,
created_at: now,
expires_at: now + chrono::Duration::hours(timeout_hours as i64),
last_activity: now,
}
}
}
/// Information about an uploaded chunk.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChunkInfo {
pub upload_id: Uuid,
pub chunk_index: u64,
pub offset: u64,
pub size: u64,
pub hash: String,
pub received_at: DateTime<Utc>,
}

View file

@ -0,0 +1,215 @@
//! Sync protocol implementation.
//!
//! Handles the bidirectional sync protocol between clients and server.
use chrono::Utc;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::error::Result;
use crate::model::{ContentHash, MediaId};
use crate::storage::DynStorageBackend;
use super::{DeviceId, DeviceSyncState, FileSyncStatus, SyncChangeType, SyncLogEntry};
/// Request from client to get changes since a cursor.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChangesRequest {
pub cursor: i64,
pub limit: Option<u64>,
}
/// Response containing changes since the cursor.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChangesResponse {
pub changes: Vec<SyncLogEntry>,
pub cursor: i64,
pub has_more: bool,
}
/// A change reported by the client.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClientChange {
pub path: String,
pub change_type: SyncChangeType,
pub content_hash: Option<String>,
pub file_size: Option<u64>,
pub local_mtime: Option<i64>,
pub metadata: Option<serde_json::Value>,
}
/// Request from client to report local changes.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReportChangesRequest {
pub device_id: String,
pub changes: Vec<ClientChange>,
}
/// Result of processing a client change.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "status", rename_all = "snake_case")]
pub enum ChangeResult {
/// Change accepted, no action needed
Accepted { path: String },
/// Conflict detected, needs resolution
Conflict {
path: String,
server_hash: String,
server_mtime: i64,
},
/// Upload required for new/modified file
UploadRequired {
path: String,
upload_url: String,
session_id: String,
},
/// Error processing change
Error { path: String, message: String },
}
/// Response to a report changes request.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReportChangesResponse {
pub results: Vec<ChangeResult>,
pub server_cursor: i64,
}
/// Acknowledgment from client that changes have been processed.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AckRequest {
pub device_id: String,
pub cursor: i64,
pub processed_paths: Vec<String>,
}
/// Get changes since a cursor position.
pub async fn get_changes(
storage: &DynStorageBackend,
cursor: i64,
limit: u64,
) -> Result<ChangesResponse> {
let limit = limit.min(1000); // Cap at 1000
let changes = storage.get_changes_since(cursor, limit + 1).await?;
let has_more = changes.len() > limit as usize;
let changes: Vec<_> = changes.into_iter().take(limit as usize).collect();
let new_cursor = changes.last().map(|c| c.sequence).unwrap_or(cursor);
Ok(ChangesResponse {
changes,
cursor: new_cursor,
has_more,
})
}
/// Record a change in the sync log.
pub async fn record_change(
storage: &DynStorageBackend,
change_type: SyncChangeType,
path: &str,
media_id: Option<MediaId>,
content_hash: Option<&ContentHash>,
file_size: Option<u64>,
changed_by_device: Option<DeviceId>,
) -> Result<SyncLogEntry> {
let entry = SyncLogEntry {
id: Uuid::now_v7(),
sequence: 0, // Will be assigned by database
change_type,
media_id,
path: path.to_string(),
content_hash: content_hash.cloned(),
file_size,
metadata_json: None,
changed_by_device,
timestamp: Utc::now(),
};
storage.record_sync_change(&entry).await?;
Ok(entry)
}
/// Update device cursor after processing changes.
pub async fn update_device_cursor(
storage: &DynStorageBackend,
device_id: DeviceId,
cursor: i64,
) -> Result<()> {
let mut device = storage.get_device(device_id).await?;
device.sync_cursor = Some(cursor);
device.last_sync_at = Some(Utc::now());
device.updated_at = Utc::now();
storage.update_device(&device).await?;
Ok(())
}
/// Mark a file as synced for a device.
pub async fn mark_synced(
storage: &DynStorageBackend,
device_id: DeviceId,
path: &str,
hash: &str,
mtime: Option<i64>,
) -> Result<()> {
let state = DeviceSyncState {
device_id,
path: path.to_string(),
local_hash: Some(hash.to_string()),
server_hash: Some(hash.to_string()),
local_mtime: mtime,
server_mtime: mtime,
sync_status: FileSyncStatus::Synced,
last_synced_at: Some(Utc::now()),
conflict_info_json: None,
};
storage.upsert_device_sync_state(&state).await?;
Ok(())
}
/// Mark a file as pending download for a device.
pub async fn mark_pending_download(
storage: &DynStorageBackend,
device_id: DeviceId,
path: &str,
server_hash: &str,
server_mtime: Option<i64>,
) -> Result<()> {
// Get existing state or create new
let state = match storage.get_device_sync_state(device_id, path).await? {
Some(mut s) => {
s.server_hash = Some(server_hash.to_string());
s.server_mtime = server_mtime;
s.sync_status = FileSyncStatus::PendingDownload;
s
}
None => DeviceSyncState {
device_id,
path: path.to_string(),
local_hash: None,
server_hash: Some(server_hash.to_string()),
local_mtime: None,
server_mtime,
sync_status: FileSyncStatus::PendingDownload,
last_synced_at: None,
conflict_info_json: None,
},
};
storage.upsert_device_sync_state(&state).await?;
Ok(())
}
/// Generate a device token using UUIDs for randomness.
pub fn generate_device_token() -> String {
// Concatenate two UUIDs for 256 bits of randomness
let uuid1 = uuid::Uuid::new_v4();
let uuid2 = uuid::Uuid::new_v4();
format!("{}{}", uuid1.simple(), uuid2.simple())
}
/// Hash a device token for storage.
pub fn hash_device_token(token: &str) -> String {
blake3::hash(token.as_bytes()).to_hex().to_string()
}

View file

@ -0,0 +1,265 @@
//! Upload processing for managed storage.
//!
//! Handles file uploads, metadata extraction, and MediaItem creation
//! for files stored in managed content-addressable storage.
use std::collections::HashMap;
use std::path::Path;
use chrono::Utc;
use tokio::io::AsyncRead;
use tracing::{debug, info};
use crate::error::{PinakesError, Result};
use crate::managed_storage::ManagedStorageService;
use crate::media_type::MediaType;
use crate::metadata;
use crate::model::{MediaId, MediaItem, StorageMode, UploadResult};
use crate::storage::DynStorageBackend;
/// Process an upload from an async reader.
///
/// This function:
/// 1. Stores the file in managed storage
/// 2. Checks for duplicates by content hash
/// 3. Extracts metadata from the file
/// 4. Creates or updates the MediaItem
pub async fn process_upload<R: AsyncRead + Unpin>(
storage: &DynStorageBackend,
managed: &ManagedStorageService,
reader: R,
original_filename: &str,
mime_type: Option<&str>,
) -> Result<UploadResult> {
// Store the file
let (content_hash, file_size) = managed.store_stream(reader).await?;
// Check if we already have a media item with this hash
if let Some(existing) = storage.get_media_by_hash(&content_hash).await? {
debug!(hash = %content_hash, media_id = %existing.id, "upload matched existing media item");
return Ok(UploadResult {
media_id: existing.id,
content_hash,
was_duplicate: true,
file_size,
});
}
// Determine media type from filename
let media_type = MediaType::from_path(Path::new(original_filename))
.unwrap_or_else(|| MediaType::custom("unknown"));
// Get the actual file path in managed storage for metadata extraction
let blob_path = managed.path(&content_hash);
// Extract metadata
let extracted = metadata::extract_metadata(&blob_path, media_type.clone()).ok();
// Create or get blob record
let mime = mime_type
.map(String::from)
.unwrap_or_else(|| media_type.mime_type().to_string());
let _blob = storage
.get_or_create_blob(&content_hash, file_size, &mime)
.await?;
// Create the media item
let now = Utc::now();
let media_id = MediaId::new();
let item = MediaItem {
id: media_id,
path: blob_path,
file_name: sanitize_filename(original_filename),
media_type,
content_hash: content_hash.clone(),
file_size,
title: extracted.as_ref().and_then(|m| m.title.clone()),
artist: extracted.as_ref().and_then(|m| m.artist.clone()),
album: extracted.as_ref().and_then(|m| m.album.clone()),
genre: extracted.as_ref().and_then(|m| m.genre.clone()),
year: extracted.as_ref().and_then(|m| m.year),
duration_secs: extracted.as_ref().and_then(|m| m.duration_secs),
description: extracted.as_ref().and_then(|m| m.description.clone()),
thumbnail_path: None,
custom_fields: HashMap::new(),
file_mtime: None,
date_taken: extracted.as_ref().and_then(|m| m.date_taken),
latitude: extracted.as_ref().and_then(|m| m.latitude),
longitude: extracted.as_ref().and_then(|m| m.longitude),
camera_make: extracted.as_ref().and_then(|m| m.camera_make.clone()),
camera_model: extracted.as_ref().and_then(|m| m.camera_model.clone()),
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::Managed,
original_filename: Some(original_filename.to_string()),
uploaded_at: Some(now),
storage_key: Some(content_hash.0.clone()),
created_at: now,
updated_at: now,
};
// Store the media item
storage.insert_managed_media(&item).await?;
info!(
media_id = %media_id,
hash = %content_hash,
filename = %original_filename,
size = file_size,
"processed upload"
);
Ok(UploadResult {
media_id,
content_hash,
was_duplicate: false,
file_size,
})
}
/// Process an upload from bytes.
pub async fn process_upload_bytes(
storage: &DynStorageBackend,
managed: &ManagedStorageService,
data: &[u8],
original_filename: &str,
mime_type: Option<&str>,
) -> Result<UploadResult> {
use std::io::Cursor;
let cursor = Cursor::new(data);
process_upload(storage, managed, cursor, original_filename, mime_type).await
}
/// Process an upload from a local file path.
///
/// This is useful for migrating existing external files to managed storage.
pub async fn process_upload_file(
storage: &DynStorageBackend,
managed: &ManagedStorageService,
path: &Path,
original_filename: Option<&str>,
) -> Result<UploadResult> {
let file = tokio::fs::File::open(path).await?;
let reader = tokio::io::BufReader::new(file);
let filename = original_filename.unwrap_or_else(|| {
path.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
});
let mime = mime_guess::from_path(path).first().map(|m| m.to_string());
process_upload(storage, managed, reader, filename, mime.as_deref()).await
}
/// Migrate an existing external media item to managed storage.
pub async fn migrate_to_managed(
storage: &DynStorageBackend,
managed: &ManagedStorageService,
media_id: MediaId,
) -> Result<()> {
let item = storage.get_media(media_id).await?;
if item.storage_mode == StorageMode::Managed {
return Err(PinakesError::InvalidOperation(
"media item is already in managed storage".into(),
));
}
// Check if the external file exists
if !item.path.exists() {
return Err(PinakesError::FileNotFound(item.path.clone()));
}
// Store the file in managed storage
let (new_hash, new_size) = managed.store_file(&item.path).await?;
// Verify the hash matches (it should, unless the file changed)
if new_hash.0 != item.content_hash.0 {
return Err(PinakesError::StorageIntegrity(format!(
"hash changed during migration: {} -> {}",
item.content_hash, new_hash
)));
}
// Get or create blob record
let mime = item.media_type.mime_type().to_string();
let _blob = storage
.get_or_create_blob(&new_hash, new_size, &mime)
.await?;
// Update the media item
let mut updated = item.clone();
updated.storage_mode = StorageMode::Managed;
updated.storage_key = Some(new_hash.0.clone());
updated.uploaded_at = Some(Utc::now());
updated.path = managed.path(&new_hash);
updated.updated_at = Utc::now();
storage.update_media(&updated).await?;
info!(
media_id = %media_id,
hash = %new_hash,
"migrated media item to managed storage"
);
Ok(())
}
/// Sanitize a filename for storage.
fn sanitize_filename(name: &str) -> String {
// Remove path separators and null bytes
name.replace(['/', '\\', '\0'], "_")
// Trim whitespace
.trim()
// Truncate to reasonable length
.chars()
.take(255)
.collect()
}
/// Delete a managed media item and clean up the blob if orphaned.
pub async fn delete_managed_media(
storage: &DynStorageBackend,
managed: &ManagedStorageService,
media_id: MediaId,
) -> Result<()> {
let item = storage.get_media(media_id).await?;
if item.storage_mode != StorageMode::Managed {
return Err(PinakesError::InvalidOperation(
"media item is not in managed storage".into(),
));
}
// Decrement blob reference count
let should_delete = storage.decrement_blob_ref(&item.content_hash).await?;
// Delete the media item
storage.delete_media(media_id).await?;
// If blob is orphaned, delete it from storage
if should_delete {
managed.delete(&item.content_hash).await?;
storage.delete_blob(&item.content_hash).await?;
info!(hash = %item.content_hash, "deleted orphaned blob");
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sanitize_filename() {
assert_eq!(sanitize_filename("test.txt"), "test.txt");
assert_eq!(sanitize_filename("path/to/file.txt"), "path_to_file.txt");
assert_eq!(sanitize_filename(" spaces "), "spaces");
assert_eq!(sanitize_filename("a".repeat(300).as_str()), "a".repeat(255));
}
}

View file

@ -43,6 +43,10 @@ async fn test_media_crud() {
camera_model: None,
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: now,
updated_at: now,
};
@ -129,6 +133,10 @@ async fn test_tags() {
camera_model: None,
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: now,
updated_at: now,
};
@ -189,6 +197,10 @@ async fn test_collections() {
camera_model: None,
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: now,
updated_at: now,
};
@ -244,6 +256,10 @@ async fn test_custom_fields() {
camera_model: None,
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: now,
updated_at: now,
};
@ -318,6 +334,10 @@ async fn test_search() {
camera_model: None,
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: now,
updated_at: now,
};
@ -457,6 +477,10 @@ async fn test_library_statistics_with_data() {
camera_model: None,
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: now,
updated_at: now,
};
@ -501,6 +525,10 @@ fn make_test_media(hash: &str) -> MediaItem {
camera_model: None,
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: now,
updated_at: now,
}

View file

@ -4,7 +4,7 @@ use std::sync::Arc;
use pinakes_core::integrity::detect_orphans;
use pinakes_core::media_type::{BuiltinMediaType, MediaType};
use pinakes_core::model::{ContentHash, MediaId, MediaItem};
use pinakes_core::model::{ContentHash, MediaId, MediaItem, StorageMode};
use pinakes_core::storage::{DynStorageBackend, StorageBackend, sqlite::SqliteBackend};
use tempfile::TempDir;
use uuid::Uuid;
@ -46,6 +46,10 @@ fn create_test_media_item(path: PathBuf, hash: &str) -> MediaItem {
camera_model: None,
rating: None,
perceptual_hash: None,
storage_mode: StorageMode::External,
original_filename: None,
uploaded_at: None,
storage_key: None,
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
}

View file

@ -26,6 +26,7 @@ governor = { workspace = true }
tower_governor = { workspace = true }
tokio-util = { version = "0.7", features = ["io"] }
argon2 = { workspace = true }
blake3 = { workspace = true }
rand = "0.9"
percent-encoding = "2"
http = "1.0"

View file

@ -72,6 +72,8 @@ pub fn create_router_with_tls(
// Public routes (no auth required)
let public_routes = Router::new()
.route("/s/{token}", get(routes::social::access_shared_media))
// Enhanced sharing: public share access
.route("/shared/{token}", get(routes::shares::access_shared))
// Kubernetes-style health probes (no auth required for orchestration)
.route("/health/live", get(routes::health::liveness))
.route("/health/ready", get(routes::health::readiness));
@ -216,6 +218,25 @@ pub fn create_router_with_tls(
.route(
"/media/{id}/stream/dash/{profile}/{segment}",
get(routes::streaming::dash_segment),
)
// Managed storage (read)
.route("/media/{id}/download", get(routes::upload::download_file))
.route("/managed/stats", get(routes::upload::managed_stats))
// Sync (read)
.route("/sync/devices", get(routes::sync::list_devices))
.route("/sync/devices/{id}", get(routes::sync::get_device))
.route("/sync/changes", get(routes::sync::get_changes))
.route("/sync/conflicts", get(routes::sync::list_conflicts))
.route("/sync/upload/{id}", get(routes::sync::get_upload_status))
.route("/sync/download/{*path}", get(routes::sync::download_file))
// Enhanced sharing (read)
.route("/shares/outgoing", get(routes::shares::list_outgoing))
.route("/shares/incoming", get(routes::shares::list_incoming))
.route("/shares/{id}", get(routes::shares::get_share))
.route("/shares/{id}/activity", get(routes::shares::get_activity))
.route(
"/notifications/shares",
get(routes::shares::get_notifications),
);
// Write routes: Editor+ required
@ -371,6 +392,49 @@ pub fn create_router_with_tls(
post(routes::transcode::start_transcode),
)
.route("/transcode/{id}", delete(routes::transcode::cancel_session))
// Managed storage (write)
.route("/upload", post(routes::upload::upload_file))
.route(
"/media/{id}/move-to-managed",
post(routes::upload::move_to_managed),
)
// Sync (write)
.route("/sync/devices", post(routes::sync::register_device))
.route("/sync/devices/{id}", put(routes::sync::update_device))
.route("/sync/devices/{id}", delete(routes::sync::delete_device))
.route(
"/sync/devices/{id}/token",
post(routes::sync::regenerate_token),
)
.route("/sync/report", post(routes::sync::report_changes))
.route("/sync/ack", post(routes::sync::acknowledge_changes))
.route(
"/sync/conflicts/{id}/resolve",
post(routes::sync::resolve_conflict),
)
.route("/sync/upload", post(routes::sync::create_upload))
.route(
"/sync/upload/{id}/chunks/{index}",
put(routes::sync::upload_chunk),
)
.route(
"/sync/upload/{id}/complete",
post(routes::sync::complete_upload),
)
.route("/sync/upload/{id}", delete(routes::sync::cancel_upload))
// Enhanced sharing (write)
.route("/shares", post(routes::shares::create_share))
.route("/shares/{id}", patch(routes::shares::update_share))
.route("/shares/{id}", delete(routes::shares::delete_share))
.route("/shares/batch/delete", post(routes::shares::batch_delete))
.route(
"/notifications/shares/{id}/read",
post(routes::shares::mark_notification_read),
)
.route(
"/notifications/shares/read-all",
post(routes::shares::mark_all_read),
)
.layer(middleware::from_fn(auth::require_editor));
// Admin-only routes: destructive/config operations

View file

@ -997,3 +997,418 @@ impl From<pinakes_core::transcode::TranscodeSession> for TranscodeSessionRespons
pub struct CreateTranscodeRequest {
pub profile: String,
}
// ===== Managed Storage / Upload =====
#[derive(Debug, Serialize)]
pub struct UploadResponse {
pub media_id: String,
pub content_hash: String,
pub was_duplicate: bool,
pub file_size: u64,
}
impl From<pinakes_core::model::UploadResult> for UploadResponse {
fn from(result: pinakes_core::model::UploadResult) -> Self {
Self {
media_id: result.media_id.0.to_string(),
content_hash: result.content_hash.0,
was_duplicate: result.was_duplicate,
file_size: result.file_size,
}
}
}
#[derive(Debug, Serialize)]
pub struct ManagedStorageStatsResponse {
pub total_blobs: u64,
pub total_size_bytes: u64,
pub orphaned_blobs: u64,
pub deduplication_ratio: f64,
}
impl From<pinakes_core::model::ManagedStorageStats> for ManagedStorageStatsResponse {
fn from(stats: pinakes_core::model::ManagedStorageStats) -> Self {
Self {
total_blobs: stats.total_blobs,
total_size_bytes: stats.total_size_bytes,
orphaned_blobs: stats.orphaned_blobs,
deduplication_ratio: stats.deduplication_ratio,
}
}
}
// ===== Sync =====
#[derive(Debug, Deserialize)]
pub struct RegisterDeviceRequest {
pub name: String,
pub device_type: String,
pub client_version: String,
pub os_info: Option<String>,
}
#[derive(Debug, Serialize)]
pub struct DeviceResponse {
pub id: String,
pub name: String,
pub device_type: String,
pub client_version: String,
pub os_info: Option<String>,
pub last_sync_at: Option<DateTime<Utc>>,
pub last_seen_at: DateTime<Utc>,
pub sync_cursor: Option<i64>,
pub enabled: bool,
pub created_at: DateTime<Utc>,
}
impl From<pinakes_core::sync::SyncDevice> for DeviceResponse {
fn from(d: pinakes_core::sync::SyncDevice) -> Self {
Self {
id: d.id.0.to_string(),
name: d.name,
device_type: d.device_type.to_string(),
client_version: d.client_version,
os_info: d.os_info,
last_sync_at: d.last_sync_at,
last_seen_at: d.last_seen_at,
sync_cursor: d.sync_cursor,
enabled: d.enabled,
created_at: d.created_at,
}
}
}
#[derive(Debug, Serialize)]
pub struct DeviceRegistrationResponse {
pub device: DeviceResponse,
pub device_token: String,
}
#[derive(Debug, Deserialize)]
pub struct UpdateDeviceRequest {
pub name: Option<String>,
pub enabled: Option<bool>,
}
#[derive(Debug, Deserialize)]
pub struct GetChangesParams {
pub cursor: Option<i64>,
pub limit: Option<u64>,
}
#[derive(Debug, Serialize)]
pub struct SyncChangeResponse {
pub id: String,
pub sequence: i64,
pub change_type: String,
pub media_id: Option<String>,
pub path: String,
pub content_hash: Option<String>,
pub file_size: Option<u64>,
pub timestamp: DateTime<Utc>,
}
impl From<pinakes_core::sync::SyncLogEntry> for SyncChangeResponse {
fn from(e: pinakes_core::sync::SyncLogEntry) -> Self {
Self {
id: e.id.to_string(),
sequence: e.sequence,
change_type: e.change_type.to_string(),
media_id: e.media_id.map(|id| id.0.to_string()),
path: e.path,
content_hash: e.content_hash.map(|h| h.0),
file_size: e.file_size,
timestamp: e.timestamp,
}
}
}
#[derive(Debug, Serialize)]
pub struct ChangesResponse {
pub changes: Vec<SyncChangeResponse>,
pub cursor: i64,
pub has_more: bool,
}
#[derive(Debug, Deserialize)]
pub struct ClientChangeReport {
pub path: String,
pub change_type: String,
pub content_hash: Option<String>,
pub file_size: Option<u64>,
pub local_mtime: Option<i64>,
}
#[derive(Debug, Deserialize)]
pub struct ReportChangesRequest {
pub changes: Vec<ClientChangeReport>,
}
#[derive(Debug, Serialize)]
pub struct ReportChangesResponse {
pub accepted: Vec<String>,
pub conflicts: Vec<ConflictResponse>,
pub upload_required: Vec<String>,
}
#[derive(Debug, Serialize)]
pub struct ConflictResponse {
pub id: String,
pub path: String,
pub local_hash: String,
pub server_hash: String,
pub detected_at: DateTime<Utc>,
}
impl From<pinakes_core::sync::SyncConflict> for ConflictResponse {
fn from(c: pinakes_core::sync::SyncConflict) -> Self {
Self {
id: c.id.to_string(),
path: c.path,
local_hash: c.local_hash,
server_hash: c.server_hash,
detected_at: c.detected_at,
}
}
}
#[derive(Debug, Deserialize)]
pub struct ResolveConflictRequest {
pub resolution: String,
}
#[derive(Debug, Deserialize)]
pub struct CreateUploadSessionRequest {
pub target_path: String,
pub expected_hash: String,
pub expected_size: u64,
pub chunk_size: Option<u64>,
}
#[derive(Debug, Serialize)]
pub struct UploadSessionResponse {
pub id: String,
pub target_path: String,
pub expected_hash: String,
pub expected_size: u64,
pub chunk_size: u64,
pub chunk_count: u64,
pub status: String,
pub created_at: DateTime<Utc>,
pub expires_at: DateTime<Utc>,
}
impl From<pinakes_core::sync::UploadSession> for UploadSessionResponse {
fn from(s: pinakes_core::sync::UploadSession) -> Self {
Self {
id: s.id.to_string(),
target_path: s.target_path,
expected_hash: s.expected_hash.0,
expected_size: s.expected_size,
chunk_size: s.chunk_size,
chunk_count: s.chunk_count,
status: s.status.to_string(),
created_at: s.created_at,
expires_at: s.expires_at,
}
}
}
#[derive(Debug, Serialize)]
pub struct ChunkUploadedResponse {
pub chunk_index: u64,
pub received: bool,
}
#[derive(Debug, Deserialize)]
pub struct AcknowledgeChangesRequest {
pub cursor: i64,
}
// ===== Enhanced Sharing =====
#[derive(Debug, Deserialize)]
pub struct CreateShareRequest {
pub target_type: String,
pub target_id: String,
pub recipient_type: String,
pub recipient_user_id: Option<Uuid>,
pub recipient_group_id: Option<Uuid>,
pub password: Option<String>,
pub permissions: Option<SharePermissionsRequest>,
pub note: Option<String>,
pub expires_in_hours: Option<u64>,
pub inherit_to_children: Option<bool>,
}
#[derive(Debug, Deserialize)]
pub struct SharePermissionsRequest {
pub can_view: Option<bool>,
pub can_download: Option<bool>,
pub can_edit: Option<bool>,
pub can_delete: Option<bool>,
pub can_reshare: Option<bool>,
pub can_add: Option<bool>,
}
#[derive(Debug, Serialize)]
pub struct ShareResponse {
pub id: String,
pub target_type: String,
pub target_id: String,
pub owner_id: String,
pub recipient_type: String,
pub recipient_user_id: Option<String>,
pub recipient_group_id: Option<String>,
pub public_token: Option<String>,
pub permissions: SharePermissionsResponse,
pub note: Option<String>,
pub expires_at: Option<DateTime<Utc>>,
pub access_count: u64,
pub last_accessed: Option<DateTime<Utc>>,
pub inherit_to_children: bool,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
}
#[derive(Debug, Serialize)]
pub struct SharePermissionsResponse {
pub can_view: bool,
pub can_download: bool,
pub can_edit: bool,
pub can_delete: bool,
pub can_reshare: bool,
pub can_add: bool,
}
impl From<pinakes_core::sharing::SharePermissions> for SharePermissionsResponse {
fn from(p: pinakes_core::sharing::SharePermissions) -> Self {
Self {
can_view: p.can_view,
can_download: p.can_download,
can_edit: p.can_edit,
can_delete: p.can_delete,
can_reshare: p.can_reshare,
can_add: p.can_add,
}
}
}
impl From<pinakes_core::sharing::Share> for ShareResponse {
fn from(s: pinakes_core::sharing::Share) -> Self {
let (target_type, target_id) = match &s.target {
pinakes_core::sharing::ShareTarget::Media { media_id } => {
("media".to_string(), media_id.0.to_string())
}
pinakes_core::sharing::ShareTarget::Collection { collection_id } => {
("collection".to_string(), collection_id.to_string())
}
pinakes_core::sharing::ShareTarget::Tag { tag_id } => {
("tag".to_string(), tag_id.to_string())
}
pinakes_core::sharing::ShareTarget::SavedSearch { search_id } => {
("saved_search".to_string(), search_id.to_string())
}
};
let (recipient_type, recipient_user_id, recipient_group_id, public_token) =
match &s.recipient {
pinakes_core::sharing::ShareRecipient::PublicLink { token, .. } => {
("public_link".to_string(), None, None, Some(token.clone()))
}
pinakes_core::sharing::ShareRecipient::User { user_id } => {
("user".to_string(), Some(user_id.0.to_string()), None, None)
}
pinakes_core::sharing::ShareRecipient::Group { group_id } => {
("group".to_string(), None, Some(group_id.to_string()), None)
}
pinakes_core::sharing::ShareRecipient::Federated { .. } => {
("federated".to_string(), None, None, None)
}
};
Self {
id: s.id.0.to_string(),
target_type,
target_id,
owner_id: s.owner_id.0.to_string(),
recipient_type,
recipient_user_id,
recipient_group_id,
public_token,
permissions: s.permissions.into(),
note: s.note,
expires_at: s.expires_at,
access_count: s.access_count,
last_accessed: s.last_accessed,
inherit_to_children: s.inherit_to_children,
created_at: s.created_at,
updated_at: s.updated_at,
}
}
}
#[derive(Debug, Deserialize)]
pub struct UpdateShareRequest {
pub permissions: Option<SharePermissionsRequest>,
pub note: Option<String>,
pub expires_at: Option<DateTime<Utc>>,
pub inherit_to_children: Option<bool>,
}
#[derive(Debug, Serialize)]
pub struct ShareActivityResponse {
pub id: String,
pub share_id: String,
pub actor_id: Option<String>,
pub actor_ip: Option<String>,
pub action: String,
pub details: Option<String>,
pub timestamp: DateTime<Utc>,
}
impl From<pinakes_core::sharing::ShareActivity> for ShareActivityResponse {
fn from(a: pinakes_core::sharing::ShareActivity) -> Self {
Self {
id: a.id.to_string(),
share_id: a.share_id.0.to_string(),
actor_id: a.actor_id.map(|id| id.0.to_string()),
actor_ip: a.actor_ip,
action: a.action.to_string(),
details: a.details,
timestamp: a.timestamp,
}
}
}
#[derive(Debug, Serialize)]
pub struct ShareNotificationResponse {
pub id: String,
pub share_id: String,
pub notification_type: String,
pub is_read: bool,
pub created_at: DateTime<Utc>,
}
impl From<pinakes_core::sharing::ShareNotification> for ShareNotificationResponse {
fn from(n: pinakes_core::sharing::ShareNotification) -> Self {
Self {
id: n.id.to_string(),
share_id: n.share_id.0.to_string(),
notification_type: n.notification_type.to_string(),
is_read: n.is_read,
created_at: n.created_at,
}
}
}
#[derive(Debug, Deserialize)]
pub struct BatchDeleteSharesRequest {
pub share_ids: Vec<Uuid>,
}
#[derive(Debug, Deserialize)]
pub struct AccessSharedRequest {
pub password: Option<String>,
}

View file

@ -69,3 +69,31 @@ impl From<pinakes_core::error::PinakesError> for ApiError {
Self(e)
}
}
impl ApiError {
pub fn bad_request(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::InvalidOperation(
msg.into(),
))
}
pub fn not_found(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::NotFound(msg.into()))
}
pub fn internal(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::Database(msg.into()))
}
pub fn forbidden(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::Authorization(msg.into()))
}
pub fn unauthorized(msg: impl Into<String>) -> Self {
Self(pinakes_core::error::PinakesError::Authentication(
msg.into(),
))
}
}
pub type ApiResult<T> = Result<T, ApiError>;

View file

@ -45,17 +45,20 @@ struct Cli {
migrate_only: bool,
}
fn resolve_config_path(explicit: Option<&std::path::Path>) -> PathBuf {
/// Resolve the configuration file path.
/// Returns (path, was_explicit) where was_explicit indicates if the path was
/// explicitly provided by the user (vs discovered).
fn resolve_config_path(explicit: Option<&std::path::Path>) -> (PathBuf, bool) {
if let Some(path) = explicit {
return path.to_path_buf();
return (path.to_path_buf(), true);
}
// Check current directory
let local = PathBuf::from("pinakes.toml");
if local.exists() {
return local;
return (local, false);
}
// XDG default
Config::default_config_path()
(Config::default_config_path(), false)
}
#[tokio::main]
@ -89,11 +92,17 @@ async fn main() -> Result<()> {
}
}
let config_path = resolve_config_path(cli.config.as_deref());
let (config_path, was_explicit) = resolve_config_path(cli.config.as_deref());
let mut config = if config_path.exists() {
info!(path = %config_path.display(), "loading configuration from file");
Config::from_file(&config_path)?
} else if was_explicit {
// User explicitly provided a config path that doesn't exist - this is an error
return Err(anyhow::anyhow!(
"configuration file not found: {}",
config_path.display()
));
} else {
info!(
"using default configuration (no config file found at {})",
@ -486,6 +495,34 @@ async fn main() -> Result<()> {
});
}
// Initialize managed storage service if enabled
let managed_storage = {
let config_read = config_arc.read().await;
if config_read.managed_storage.enabled {
let service = pinakes_core::managed_storage::ManagedStorageService::new(
config_read.managed_storage.storage_dir.clone(),
config_read.managed_storage.max_upload_size,
config_read.managed_storage.verify_on_read,
);
match service.init().await {
Ok(()) => {
info!(
path = %config_read.managed_storage.storage_dir.display(),
"managed storage initialized"
);
Some(Arc::new(service))
}
Err(e) => {
tracing::error!(error = %e, "failed to initialize managed storage");
None
}
}
} else {
tracing::info!("managed storage disabled in configuration");
None
}
};
let state = AppState {
storage: storage.clone(),
config: config_arc.clone(),
@ -496,6 +533,7 @@ async fn main() -> Result<()> {
scheduler,
plugin_manager,
transcode_service,
managed_storage,
};
// Periodic session cleanup (every 15 minutes)

View file

@ -1,6 +1,6 @@
use axum::{
Json, Router,
extract::{Path, Query, State},
extract::{Extension, Path, Query, State},
http::StatusCode,
response::IntoResponse,
routing::{get, put},
@ -13,7 +13,7 @@ use pinakes_core::{
model::{AuthorInfo, BookMetadata, MediaId, Pagination, ReadingProgress, ReadingStatus},
};
use crate::{dto::MediaResponse, error::ApiError, state::AppState};
use crate::{auth::resolve_user_id, dto::MediaResponse, error::ApiError, state::AppState};
/// Book metadata response DTO
#[derive(Debug, Serialize, Deserialize)]
@ -240,15 +240,15 @@ pub async fn get_author_books(
/// Get reading progress for a book
pub async fn get_reading_progress(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(media_id): Path<Uuid>,
) -> Result<impl IntoResponse, ApiError> {
// TODO: Get user_id from auth context
let user_id = Uuid::new_v4(); // Placeholder
let user_id = resolve_user_id(&state.storage, &username).await?;
let media_id = MediaId(media_id);
let progress = state
.storage
.get_reading_progress(user_id, media_id)
.get_reading_progress(user_id.0, media_id)
.await?
.ok_or(ApiError(PinakesError::NotFound(
"Reading progress not found".to_string(),
@ -260,16 +260,16 @@ pub async fn get_reading_progress(
/// Update reading progress for a book
pub async fn update_reading_progress(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(media_id): Path<Uuid>,
Json(req): Json<UpdateProgressRequest>,
) -> Result<impl IntoResponse, ApiError> {
// TODO: Get user_id from auth context
let user_id = Uuid::new_v4(); // Placeholder
let user_id = resolve_user_id(&state.storage, &username).await?;
let media_id = MediaId(media_id);
state
.storage
.update_reading_progress(user_id, media_id, req.current_page)
.update_reading_progress(user_id.0, media_id, req.current_page)
.await?;
Ok(StatusCode::NO_CONTENT)
@ -278,14 +278,14 @@ pub async fn update_reading_progress(
/// Get user's reading list
pub async fn get_reading_list(
State(state): State<AppState>,
Extension(username): Extension<String>,
Query(params): Query<ReadingListQuery>,
) -> Result<impl IntoResponse, ApiError> {
// TODO: Get user_id from auth context
let user_id = Uuid::new_v4(); // Placeholder
let user_id = resolve_user_id(&state.storage, &username).await?;
let items = state
.storage
.get_reading_list(user_id, params.status)
.get_reading_list(user_id.0, params.status)
.await?;
let response: Vec<MediaResponse> = items.into_iter().map(MediaResponse::from).collect();

View file

@ -19,11 +19,14 @@ pub mod saved_searches;
pub mod scan;
pub mod scheduled_tasks;
pub mod search;
pub mod shares;
pub mod social;
pub mod statistics;
pub mod streaming;
pub mod subtitles;
pub mod sync;
pub mod tags;
pub mod transcode;
pub mod upload;
pub mod users;
pub mod webhooks;

View file

@ -27,6 +27,12 @@ pub struct TimelineQuery {
pub group_by: GroupBy,
pub year: Option<i32>,
pub month: Option<u32>,
#[serde(default = "default_timeline_limit")]
pub limit: u64,
}
fn default_timeline_limit() -> u64 {
10000
}
/// Timeline group response
@ -62,12 +68,12 @@ pub async fn get_timeline(
State(state): State<AppState>,
Query(query): Query<TimelineQuery>,
) -> Result<impl IntoResponse, ApiError> {
// Query photos with date_taken
// Query photos with date_taken (limit is configurable, defaults to 10000)
let all_media = state
.storage
.list_media(&pinakes_core::model::Pagination {
offset: 0,
limit: 10000, // TODO: Make this more efficient with streaming
limit: query.limit.min(50000), // Cap at 50000 for safety
sort: Some("date_taken DESC".to_string()),
})
.await?;

View file

@ -0,0 +1,543 @@
use axum::{
Json,
extract::{ConnectInfo, Extension, Path, Query, State},
http::StatusCode,
};
use chrono::Utc;
use std::net::SocketAddr;
use uuid::Uuid;
use crate::auth::resolve_user_id;
use crate::dto::{
AccessSharedRequest, BatchDeleteSharesRequest, CreateShareRequest, MediaResponse,
PaginationParams, ShareActivityResponse, ShareNotificationResponse, ShareResponse,
UpdateShareRequest,
};
use crate::error::{ApiError, ApiResult};
use crate::state::AppState;
use pinakes_core::model::MediaId;
use pinakes_core::model::Pagination;
use pinakes_core::sharing::{
Share, ShareActivity, ShareActivityAction, ShareId, ShareNotification, ShareNotificationType,
SharePermissions, ShareRecipient, ShareTarget, generate_share_token, hash_share_password,
verify_share_password,
};
use pinakes_core::users::UserId;
/// Create a new share
/// POST /api/shares
pub async fn create_share(
State(state): State<AppState>,
Extension(username): Extension<String>,
Json(req): Json<CreateShareRequest>,
) -> ApiResult<Json<ShareResponse>> {
let config = state.config.read().await;
if !config.sharing.enabled {
return Err(ApiError::bad_request("Sharing is not enabled"));
}
// Validate public links are allowed
if req.recipient_type == "public_link" && !config.sharing.allow_public_links {
return Err(ApiError::bad_request("Public links are not allowed"));
}
drop(config);
let owner_id = resolve_user_id(&state.storage, &username).await?;
// Parse target
let target_id: Uuid = req
.target_id
.parse()
.map_err(|_| ApiError::bad_request("Invalid target_id"))?;
let target = match req.target_type.as_str() {
"media" => ShareTarget::Media {
media_id: MediaId(target_id),
},
"collection" => ShareTarget::Collection {
collection_id: target_id,
},
"tag" => ShareTarget::Tag { tag_id: target_id },
"saved_search" => ShareTarget::SavedSearch {
search_id: target_id,
},
_ => return Err(ApiError::bad_request("Invalid target_type")),
};
// Parse recipient
let recipient = match req.recipient_type.as_str() {
"public_link" => {
let token = generate_share_token();
let password_hash = req.password.as_ref().map(|p| hash_share_password(p));
ShareRecipient::PublicLink {
token,
password_hash,
}
}
"user" => {
let recipient_user_id = req.recipient_user_id.ok_or_else(|| {
ApiError::bad_request("recipient_user_id required for user share")
})?;
ShareRecipient::User {
user_id: UserId(recipient_user_id),
}
}
"group" => {
let group_id = req.recipient_group_id.ok_or_else(|| {
ApiError::bad_request("recipient_group_id required for group share")
})?;
ShareRecipient::Group { group_id }
}
_ => return Err(ApiError::bad_request("Invalid recipient_type")),
};
// Parse permissions
let permissions = if let Some(perms) = req.permissions {
SharePermissions {
can_view: perms.can_view.unwrap_or(true),
can_download: perms.can_download.unwrap_or(false),
can_edit: perms.can_edit.unwrap_or(false),
can_delete: perms.can_delete.unwrap_or(false),
can_reshare: perms.can_reshare.unwrap_or(false),
can_add: perms.can_add.unwrap_or(false),
}
} else {
SharePermissions::view_only()
};
// Calculate expiration
let expires_at = req
.expires_in_hours
.map(|hours| Utc::now() + chrono::Duration::hours(hours as i64));
let share = Share {
id: ShareId(Uuid::now_v7()),
target,
owner_id,
recipient,
permissions,
note: req.note,
expires_at,
access_count: 0,
last_accessed: None,
inherit_to_children: req.inherit_to_children.unwrap_or(true),
parent_share_id: None,
created_at: Utc::now(),
updated_at: Utc::now(),
};
let created = state
.storage
.create_share(&share)
.await
.map_err(|e| ApiError::internal(format!("Failed to create share: {}", e)))?;
// Send notification to recipient if it's a user share
if let ShareRecipient::User { user_id } = &created.recipient {
let notification = ShareNotification {
id: Uuid::now_v7(),
user_id: *user_id,
share_id: created.id,
notification_type: ShareNotificationType::NewShare,
is_read: false,
created_at: Utc::now(),
};
// Ignore notification errors
let _ = state.storage.create_share_notification(&notification).await;
}
Ok(Json(created.into()))
}
/// List outgoing shares (shares I created)
/// GET /api/shares/outgoing
pub async fn list_outgoing(
State(state): State<AppState>,
Extension(username): Extension<String>,
Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let pagination = Pagination {
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50),
sort: params.sort,
};
let shares = state
.storage
.list_shares_by_owner(user_id, &pagination)
.await
.map_err(|e| ApiError::internal(format!("Failed to list shares: {}", e)))?;
Ok(Json(shares.into_iter().map(Into::into).collect()))
}
/// List incoming shares (shares shared with me)
/// GET /api/shares/incoming
pub async fn list_incoming(
State(state): State<AppState>,
Extension(username): Extension<String>,
Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let pagination = Pagination {
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50),
sort: params.sort,
};
let shares = state
.storage
.list_shares_for_user(user_id, &pagination)
.await
.map_err(|e| ApiError::internal(format!("Failed to list shares: {}", e)))?;
Ok(Json(shares.into_iter().map(Into::into).collect()))
}
/// Get share details
/// GET /api/shares/{id}
pub async fn get_share(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<Json<ShareResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let share = state
.storage
.get_share(ShareId(id))
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Check authorization
let is_owner = share.owner_id == user_id;
let is_recipient = match &share.recipient {
ShareRecipient::User {
user_id: recipient_id,
} => *recipient_id == user_id,
_ => false,
};
if !is_owner && !is_recipient {
return Err(ApiError::forbidden("Not authorized to view this share"));
}
Ok(Json(share.into()))
}
/// Update a share
/// PATCH /api/shares/{id}
pub async fn update_share(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
Json(req): Json<UpdateShareRequest>,
) -> ApiResult<Json<ShareResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let mut share = state
.storage
.get_share(ShareId(id))
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Only owner can update
if share.owner_id != user_id {
return Err(ApiError::forbidden("Only the owner can update this share"));
}
// Update fields
if let Some(perms) = req.permissions {
share.permissions = SharePermissions {
can_view: perms.can_view.unwrap_or(share.permissions.can_view),
can_download: perms.can_download.unwrap_or(share.permissions.can_download),
can_edit: perms.can_edit.unwrap_or(share.permissions.can_edit),
can_delete: perms.can_delete.unwrap_or(share.permissions.can_delete),
can_reshare: perms.can_reshare.unwrap_or(share.permissions.can_reshare),
can_add: perms.can_add.unwrap_or(share.permissions.can_add),
};
}
if let Some(note) = req.note {
share.note = Some(note);
}
if let Some(expires_at) = req.expires_at {
share.expires_at = Some(expires_at);
}
if let Some(inherit) = req.inherit_to_children {
share.inherit_to_children = inherit;
}
share.updated_at = Utc::now();
let updated = state
.storage
.update_share(&share)
.await
.map_err(|e| ApiError::internal(format!("Failed to update share: {}", e)))?;
// Notify recipient of update
if let ShareRecipient::User { user_id } = &updated.recipient {
let notification = ShareNotification {
id: Uuid::now_v7(),
user_id: *user_id,
share_id: updated.id,
notification_type: ShareNotificationType::ShareUpdated,
is_read: false,
created_at: Utc::now(),
};
let _ = state.storage.create_share_notification(&notification).await;
}
Ok(Json(updated.into()))
}
/// Delete (revoke) a share
/// DELETE /api/shares/{id}
pub async fn delete_share(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let share = state
.storage
.get_share(ShareId(id))
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Only owner can delete
if share.owner_id != user_id {
return Err(ApiError::forbidden("Only the owner can revoke this share"));
}
// Notify recipient before deletion
if let ShareRecipient::User { user_id } = &share.recipient {
let notification = ShareNotification {
id: Uuid::now_v7(),
user_id: *user_id,
share_id: share.id,
notification_type: ShareNotificationType::ShareRevoked,
is_read: false,
created_at: Utc::now(),
};
let _ = state.storage.create_share_notification(&notification).await;
}
state
.storage
.delete_share(ShareId(id))
.await
.map_err(|e| ApiError::internal(format!("Failed to delete share: {}", e)))?;
Ok(StatusCode::NO_CONTENT)
}
/// Batch delete shares
/// POST /api/shares/batch/delete
pub async fn batch_delete(
State(state): State<AppState>,
Extension(username): Extension<String>,
Json(req): Json<BatchDeleteSharesRequest>,
) -> ApiResult<Json<serde_json::Value>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let share_ids: Vec<ShareId> = req.share_ids.into_iter().map(ShareId).collect();
// Verify ownership of all shares
for share_id in &share_ids {
let share = state
.storage
.get_share(*share_id)
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
if share.owner_id != user_id {
return Err(ApiError::forbidden(format!(
"Not authorized to delete share {}",
share_id.0
)));
}
}
let deleted = state
.storage
.batch_delete_shares(&share_ids)
.await
.map_err(|e| ApiError::internal(format!("Failed to batch delete: {}", e)))?;
Ok(Json(serde_json::json!({ "deleted": deleted })))
}
/// Access a public shared resource
/// GET /api/shared/{token}
pub async fn access_shared(
State(state): State<AppState>,
Path(token): Path<String>,
Query(params): Query<AccessSharedRequest>,
ConnectInfo(addr): ConnectInfo<SocketAddr>,
) -> ApiResult<Json<MediaResponse>> {
let share = state
.storage
.get_share_by_token(&token)
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Check expiration
if let Some(expires_at) = share.expires_at {
if Utc::now() > expires_at {
return Err(ApiError::not_found("Share has expired"));
}
}
// Check password if required
if let ShareRecipient::PublicLink { password_hash, .. } = &share.recipient {
if let Some(hash) = password_hash {
let provided_password = params
.password
.as_ref()
.ok_or_else(|| ApiError::unauthorized("Password required"))?;
if !verify_share_password(provided_password, hash) {
// Log failed attempt
let activity = ShareActivity {
id: Uuid::now_v7(),
share_id: share.id,
actor_id: None,
actor_ip: Some(addr.ip().to_string()),
action: ShareActivityAction::PasswordFailed,
details: None,
timestamp: Utc::now(),
};
let _ = state.storage.record_share_activity(&activity).await;
return Err(ApiError::unauthorized("Invalid password"));
}
}
}
// Record access
state
.storage
.record_share_access(share.id)
.await
.map_err(|e| ApiError::internal(format!("Failed to record access: {}", e)))?;
// Log the access
let activity = ShareActivity {
id: Uuid::now_v7(),
share_id: share.id,
actor_id: None,
actor_ip: Some(addr.ip().to_string()),
action: ShareActivityAction::Accessed,
details: None,
timestamp: Utc::now(),
};
let _ = state.storage.record_share_activity(&activity).await;
// Return the shared content
match &share.target {
ShareTarget::Media { media_id } => {
let item = state
.storage
.get_media(*media_id)
.await
.map_err(|e| ApiError::not_found(format!("Media not found: {}", e)))?;
Ok(Json(item.into()))
}
_ => {
// For collections/tags, return a placeholder
// Full implementation would return the collection contents
Err(ApiError::bad_request(
"Collection/tag sharing not yet fully implemented",
))
}
}
}
/// Get share activity log
/// GET /api/shares/{id}/activity
pub async fn get_activity(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
Query(params): Query<PaginationParams>,
) -> ApiResult<Json<Vec<ShareActivityResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let share = state
.storage
.get_share(ShareId(id))
.await
.map_err(|e| ApiError::not_found(format!("Share not found: {}", e)))?;
// Only owner can view activity
if share.owner_id != user_id {
return Err(ApiError::forbidden(
"Only the owner can view share activity",
));
}
let pagination = Pagination {
offset: params.offset.unwrap_or(0),
limit: params.limit.unwrap_or(50),
sort: params.sort,
};
let activity = state
.storage
.get_share_activity(ShareId(id), &pagination)
.await
.map_err(|e| ApiError::internal(format!("Failed to get activity: {}", e)))?;
Ok(Json(activity.into_iter().map(Into::into).collect()))
}
/// Get unread share notifications
/// GET /api/notifications/shares
pub async fn get_notifications(
State(state): State<AppState>,
Extension(username): Extension<String>,
) -> ApiResult<Json<Vec<ShareNotificationResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let notifications = state
.storage
.get_unread_notifications(user_id)
.await
.map_err(|e| ApiError::internal(format!("Failed to get notifications: {}", e)))?;
Ok(Json(notifications.into_iter().map(Into::into).collect()))
}
/// Mark a notification as read
/// POST /api/notifications/shares/{id}/read
pub async fn mark_notification_read(
State(state): State<AppState>,
Extension(_username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
state
.storage
.mark_notification_read(id)
.await
.map_err(|e| ApiError::internal(format!("Failed to mark as read: {}", e)))?;
Ok(StatusCode::OK)
}
/// Mark all notifications as read
/// POST /api/notifications/shares/read-all
pub async fn mark_all_read(
State(state): State<AppState>,
Extension(username): Extension<String>,
) -> ApiResult<StatusCode> {
let user_id = resolve_user_id(&state.storage, &username).await?;
state
.storage
.mark_all_notifications_read(user_id)
.await
.map_err(|e| ApiError::internal(format!("Failed to mark all as read: {}", e)))?;
Ok(StatusCode::OK)
}

View file

@ -0,0 +1,743 @@
use axum::{
Json,
body::Body,
extract::{Extension, Path, Query, State},
http::{HeaderMap, StatusCode, header},
response::IntoResponse,
};
use chrono::Utc;
use tokio_util::io::ReaderStream;
use uuid::Uuid;
use crate::auth::resolve_user_id;
use crate::dto::{
AcknowledgeChangesRequest, ChangesResponse, ChunkUploadedResponse, ConflictResponse,
CreateUploadSessionRequest, DeviceRegistrationResponse, DeviceResponse, GetChangesParams,
RegisterDeviceRequest, ReportChangesRequest, ReportChangesResponse, ResolveConflictRequest,
SyncChangeResponse, UpdateDeviceRequest, UploadSessionResponse,
};
use crate::error::{ApiError, ApiResult};
use crate::state::AppState;
use pinakes_core::config::ConflictResolution;
use pinakes_core::model::ContentHash;
use pinakes_core::sync::{
ChunkInfo, DeviceId, DeviceType, SyncChangeType, SyncConflict, SyncDevice, SyncLogEntry,
UploadSession, UploadStatus, generate_device_token, hash_device_token, update_device_cursor,
};
use std::path::Path as FilePath;
const DEFAULT_CHUNK_SIZE: u64 = 4 * 1024 * 1024; // 4MB
const DEFAULT_CHANGES_LIMIT: u64 = 100;
/// Register a new sync device
/// POST /api/sync/devices
pub async fn register_device(
State(state): State<AppState>,
Extension(username): Extension<String>,
Json(req): Json<RegisterDeviceRequest>,
) -> ApiResult<Json<DeviceRegistrationResponse>> {
let config = state.config.read().await;
if !config.sync.enabled {
return Err(ApiError::bad_request("Sync is not enabled"));
}
drop(config);
let user_id = resolve_user_id(&state.storage, &username).await?;
let device_type = req
.device_type
.parse::<DeviceType>()
.map_err(|_| ApiError::bad_request("Invalid device type"))?;
// Generate device token
let device_token = generate_device_token();
let token_hash = hash_device_token(&device_token);
let now = Utc::now();
let device = SyncDevice {
id: DeviceId(Uuid::now_v7()),
user_id,
name: req.name,
device_type,
client_version: req.client_version,
os_info: req.os_info,
last_sync_at: None,
last_seen_at: now,
sync_cursor: Some(0),
enabled: true,
created_at: now,
updated_at: now,
};
let registered = state
.storage
.register_device(&device, &token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to register device: {}", e)))?;
Ok(Json(DeviceRegistrationResponse {
device: registered.into(),
device_token,
}))
}
/// List user's sync devices
/// GET /api/sync/devices
pub async fn list_devices(
State(state): State<AppState>,
Extension(username): Extension<String>,
) -> ApiResult<Json<Vec<DeviceResponse>>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let devices = state
.storage
.list_user_devices(user_id)
.await
.map_err(|e| ApiError::internal(format!("Failed to list devices: {}", e)))?;
Ok(Json(devices.into_iter().map(Into::into).collect()))
}
/// Get device details
/// GET /api/sync/devices/{id}
pub async fn get_device(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<Json<DeviceResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let device = state
.storage
.get_device(DeviceId(id))
.await
.map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?;
// Verify ownership
if device.user_id != user_id {
return Err(ApiError::forbidden("Not authorized to access this device"));
}
Ok(Json(device.into()))
}
/// Update a device
/// PUT /api/sync/devices/{id}
pub async fn update_device(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
Json(req): Json<UpdateDeviceRequest>,
) -> ApiResult<Json<DeviceResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let mut device = state
.storage
.get_device(DeviceId(id))
.await
.map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?;
// Verify ownership
if device.user_id != user_id {
return Err(ApiError::forbidden("Not authorized to update this device"));
}
if let Some(name) = req.name {
device.name = name;
}
if let Some(enabled) = req.enabled {
device.enabled = enabled;
}
state
.storage
.update_device(&device)
.await
.map_err(|e| ApiError::internal(format!("Failed to update device: {}", e)))?;
Ok(Json(device.into()))
}
/// Delete a device
/// DELETE /api/sync/devices/{id}
pub async fn delete_device(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let device = state
.storage
.get_device(DeviceId(id))
.await
.map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?;
// Verify ownership
if device.user_id != user_id {
return Err(ApiError::forbidden("Not authorized to delete this device"));
}
state
.storage
.delete_device(DeviceId(id))
.await
.map_err(|e| ApiError::internal(format!("Failed to delete device: {}", e)))?;
Ok(StatusCode::NO_CONTENT)
}
/// Regenerate device token
/// POST /api/sync/devices/{id}/token
pub async fn regenerate_token(
State(state): State<AppState>,
Extension(username): Extension<String>,
Path(id): Path<Uuid>,
) -> ApiResult<Json<DeviceRegistrationResponse>> {
let user_id = resolve_user_id(&state.storage, &username).await?;
let device = state
.storage
.get_device(DeviceId(id))
.await
.map_err(|e| ApiError::not_found(format!("Device not found: {}", e)))?;
// Verify ownership
if device.user_id != user_id {
return Err(ApiError::forbidden(
"Not authorized to regenerate token for this device",
));
}
// Generate new token
let new_token = generate_device_token();
let token_hash = hash_device_token(&new_token);
// Re-register with new token (this updates the token hash)
let updated = state
.storage
.register_device(&device, &token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to regenerate token: {}", e)))?;
Ok(Json(DeviceRegistrationResponse {
device: updated.into(),
device_token: new_token,
}))
}
/// Get changes since cursor
/// GET /api/sync/changes
pub async fn get_changes(
State(state): State<AppState>,
Query(params): Query<GetChangesParams>,
) -> ApiResult<Json<ChangesResponse>> {
let config = state.config.read().await;
if !config.sync.enabled {
return Err(ApiError::bad_request("Sync is not enabled"));
}
drop(config);
let cursor = params.cursor.unwrap_or(0);
let limit = params.limit.unwrap_or(DEFAULT_CHANGES_LIMIT);
let changes = state
.storage
.get_changes_since(cursor, limit + 1)
.await
.map_err(|e| ApiError::internal(format!("Failed to get changes: {}", e)))?;
let has_more = changes.len() > limit as usize;
let changes: Vec<SyncChangeResponse> = changes
.into_iter()
.take(limit as usize)
.map(Into::into)
.collect();
let new_cursor = changes.last().map(|c| c.sequence).unwrap_or(cursor);
Ok(Json(ChangesResponse {
changes,
cursor: new_cursor,
has_more,
}))
}
/// Report local changes from client
/// POST /api/sync/report
pub async fn report_changes(
State(state): State<AppState>,
Extension(_username): Extension<String>,
Json(req): Json<ReportChangesRequest>,
) -> ApiResult<Json<ReportChangesResponse>> {
let config = state.config.read().await;
if !config.sync.enabled {
return Err(ApiError::bad_request("Sync is not enabled"));
}
let conflict_resolution = config.sync.default_conflict_resolution.clone();
drop(config);
let mut accepted = Vec::new();
let mut conflicts = Vec::new();
let mut upload_required = Vec::new();
for change in req.changes {
// Check for conflicts
if let Some(content_hash) = &change.content_hash {
let server_state = state
.storage
.get_media_by_path(FilePath::new(&change.path))
.await
.ok()
.flatten();
if let Some(server_item) = server_state {
let client_hash = ContentHash(content_hash.clone());
if server_item.content_hash != client_hash {
// Conflict detected
let conflict = SyncConflict {
id: Uuid::now_v7(),
device_id: DeviceId(Uuid::nil()), // Will be set by device context
path: change.path.clone(),
local_hash: content_hash.clone(),
local_mtime: change.local_mtime.unwrap_or(0),
server_hash: server_item.content_hash.to_string(),
server_mtime: server_item.updated_at.timestamp(),
detected_at: Utc::now(),
resolved_at: None,
resolution: None,
};
// Auto-resolve if configured
match conflict_resolution {
ConflictResolution::ServerWins => {
// Client should download server version
accepted.push(change.path);
}
ConflictResolution::ClientWins => {
// Client should upload
upload_required.push(change.path);
}
ConflictResolution::KeepBoth | ConflictResolution::Manual => {
conflicts.push(conflict.into());
}
}
continue;
}
}
}
// No conflict, check if upload is needed
match change.change_type.as_str() {
"created" | "modified" => {
if change.content_hash.is_some() {
upload_required.push(change.path);
} else {
accepted.push(change.path);
}
}
"deleted" => {
// Record deletion
let entry = SyncLogEntry {
id: Uuid::now_v7(),
sequence: 0, // Will be assigned by storage
change_type: SyncChangeType::Deleted,
media_id: None,
path: change.path.clone(),
content_hash: None,
file_size: None,
metadata_json: None,
changed_by_device: None,
timestamp: Utc::now(),
};
if state.storage.record_sync_change(&entry).await.is_ok() {
accepted.push(change.path);
}
}
_ => {
accepted.push(change.path);
}
}
}
Ok(Json(ReportChangesResponse {
accepted,
conflicts,
upload_required,
}))
}
/// Acknowledge processed changes
/// POST /api/sync/ack
pub async fn acknowledge_changes(
State(state): State<AppState>,
Extension(_username): Extension<String>,
headers: HeaderMap,
Json(req): Json<AcknowledgeChangesRequest>,
) -> ApiResult<StatusCode> {
// Get device from header or context
let device_token = headers
.get("X-Device-Token")
.and_then(|v| v.to_str().ok())
.ok_or_else(|| ApiError::bad_request("Missing X-Device-Token header"))?;
let token_hash = hash_device_token(device_token);
let device = state
.storage
.get_device_by_token(&token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to get device: {}", e)))?
.ok_or_else(|| ApiError::unauthorized("Invalid device token"))?;
// Update device cursor
update_device_cursor(&state.storage, device.id, req.cursor)
.await
.map_err(|e| ApiError::internal(format!("Failed to update cursor: {}", e)))?;
Ok(StatusCode::OK)
}
/// List unresolved conflicts
/// GET /api/sync/conflicts
pub async fn list_conflicts(
State(state): State<AppState>,
Extension(_username): Extension<String>,
headers: HeaderMap,
) -> ApiResult<Json<Vec<ConflictResponse>>> {
let device_token = headers
.get("X-Device-Token")
.and_then(|v| v.to_str().ok())
.ok_or_else(|| ApiError::bad_request("Missing X-Device-Token header"))?;
let token_hash = hash_device_token(device_token);
let device = state
.storage
.get_device_by_token(&token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to get device: {}", e)))?
.ok_or_else(|| ApiError::unauthorized("Invalid device token"))?;
let conflicts = state
.storage
.get_unresolved_conflicts(device.id)
.await
.map_err(|e| ApiError::internal(format!("Failed to get conflicts: {}", e)))?;
Ok(Json(conflicts.into_iter().map(Into::into).collect()))
}
/// Resolve a sync conflict
/// POST /api/sync/conflicts/{id}/resolve
pub async fn resolve_conflict(
State(state): State<AppState>,
Extension(_username): Extension<String>,
Path(id): Path<Uuid>,
Json(req): Json<ResolveConflictRequest>,
) -> ApiResult<StatusCode> {
let resolution = match req.resolution.as_str() {
"server_wins" => ConflictResolution::ServerWins,
"client_wins" => ConflictResolution::ClientWins,
"keep_both" => ConflictResolution::KeepBoth,
_ => return Err(ApiError::bad_request("Invalid resolution type")),
};
state
.storage
.resolve_conflict(id, resolution)
.await
.map_err(|e| ApiError::internal(format!("Failed to resolve conflict: {}", e)))?;
Ok(StatusCode::OK)
}
/// Create an upload session for chunked upload
/// POST /api/sync/upload
pub async fn create_upload(
State(state): State<AppState>,
Extension(_username): Extension<String>,
headers: HeaderMap,
Json(req): Json<CreateUploadSessionRequest>,
) -> ApiResult<Json<UploadSessionResponse>> {
let config = state.config.read().await;
if !config.sync.enabled {
return Err(ApiError::bad_request("Sync is not enabled"));
}
let upload_timeout_hours = config.sync.upload_timeout_hours;
drop(config);
let device_token = headers
.get("X-Device-Token")
.and_then(|v| v.to_str().ok())
.ok_or_else(|| ApiError::bad_request("Missing X-Device-Token header"))?;
let token_hash = hash_device_token(device_token);
let device = state
.storage
.get_device_by_token(&token_hash)
.await
.map_err(|e| ApiError::internal(format!("Failed to get device: {}", e)))?
.ok_or_else(|| ApiError::unauthorized("Invalid device token"))?;
let chunk_size = req.chunk_size.unwrap_or(DEFAULT_CHUNK_SIZE);
let chunk_count = (req.expected_size + chunk_size - 1) / chunk_size;
let now = Utc::now();
let session = UploadSession {
id: Uuid::now_v7(),
device_id: device.id,
target_path: req.target_path,
expected_hash: ContentHash(req.expected_hash),
expected_size: req.expected_size,
chunk_size,
chunk_count,
status: UploadStatus::Pending,
created_at: now,
expires_at: now + chrono::Duration::hours(upload_timeout_hours as i64),
last_activity: now,
};
state
.storage
.create_upload_session(&session)
.await
.map_err(|e| ApiError::internal(format!("Failed to create upload session: {}", e)))?;
Ok(Json(session.into()))
}
/// Upload a chunk
/// PUT /api/sync/upload/{id}/chunks/{index}
pub async fn upload_chunk(
State(state): State<AppState>,
Path((session_id, chunk_index)): Path<(Uuid, u64)>,
_headers: HeaderMap,
body: axum::body::Bytes,
) -> ApiResult<Json<ChunkUploadedResponse>> {
let session = state
.storage
.get_upload_session(session_id)
.await
.map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?;
if session.status == UploadStatus::Expired {
return Err(ApiError::bad_request("Upload session has expired"));
}
if chunk_index >= session.chunk_count {
return Err(ApiError::bad_request("Invalid chunk index"));
}
// Calculate chunk hash
let hash = blake3::hash(&body);
let chunk_hash = hash.to_hex().to_string();
let chunk = ChunkInfo {
upload_id: session_id,
chunk_index,
offset: chunk_index * session.chunk_size,
size: body.len() as u64,
hash: chunk_hash,
received_at: Utc::now(),
};
state
.storage
.record_chunk(session_id, &chunk)
.await
.map_err(|e| ApiError::internal(format!("Failed to record chunk: {}", e)))?;
// Store the chunk data (would integrate with managed storage)
// For now, this is a placeholder - actual implementation would write to temp storage
Ok(Json(ChunkUploadedResponse {
chunk_index,
received: true,
}))
}
/// Get upload session status
/// GET /api/sync/upload/{id}
pub async fn get_upload_status(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<Json<UploadSessionResponse>> {
let session = state
.storage
.get_upload_session(id)
.await
.map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?;
Ok(Json(session.into()))
}
/// Complete an upload session
/// POST /api/sync/upload/{id}/complete
pub async fn complete_upload(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let mut session = state
.storage
.get_upload_session(id)
.await
.map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?;
// Verify all chunks received
let chunks = state
.storage
.get_upload_chunks(id)
.await
.map_err(|e| ApiError::internal(format!("Failed to get chunks: {}", e)))?;
if chunks.len() != session.chunk_count as usize {
return Err(ApiError::bad_request(format!(
"Missing chunks: expected {}, got {}",
session.chunk_count,
chunks.len()
)));
}
// Mark session as completed
session.status = UploadStatus::Completed;
state
.storage
.update_upload_session(&session)
.await
.map_err(|e| ApiError::internal(format!("Failed to update session: {}", e)))?;
// Record the sync change
let entry = SyncLogEntry {
id: Uuid::now_v7(),
sequence: 0,
change_type: SyncChangeType::Created,
media_id: None,
path: session.target_path,
content_hash: Some(session.expected_hash),
file_size: Some(session.expected_size),
metadata_json: None,
changed_by_device: Some(session.device_id),
timestamp: Utc::now(),
};
state
.storage
.record_sync_change(&entry)
.await
.map_err(|e| ApiError::internal(format!("Failed to record change: {}", e)))?;
Ok(StatusCode::OK)
}
/// Cancel an upload session
/// DELETE /api/sync/upload/{id}
pub async fn cancel_upload(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let mut session = state
.storage
.get_upload_session(id)
.await
.map_err(|e| ApiError::not_found(format!("Upload session not found: {}", e)))?;
session.status = UploadStatus::Cancelled;
state
.storage
.update_upload_session(&session)
.await
.map_err(|e| ApiError::internal(format!("Failed to cancel session: {}", e)))?;
Ok(StatusCode::NO_CONTENT)
}
/// Download a file for sync (supports Range header)
/// GET /api/sync/download/{*path}
pub async fn download_file(
State(state): State<AppState>,
Path(path): Path<String>,
headers: HeaderMap,
) -> ApiResult<impl IntoResponse> {
let item = state
.storage
.get_media_by_path(FilePath::new(&path))
.await
.map_err(|e| ApiError::internal(format!("Failed to get media: {}", e)))?
.ok_or_else(|| ApiError::not_found("File not found"))?;
let file = tokio::fs::File::open(&item.path)
.await
.map_err(|e| ApiError::not_found(format!("File not found: {}", e)))?;
let metadata = file
.metadata()
.await
.map_err(|e| ApiError::internal(format!("Failed to get metadata: {}", e)))?;
let file_size = metadata.len();
// Check for Range header
if let Some(range_header) = headers.get(header::RANGE) {
if let Ok(range_str) = range_header.to_str() {
if let Some(range) = parse_range_header(range_str, file_size) {
// Partial content response
let (start, end) = range;
let length = end - start + 1;
let file = tokio::fs::File::open(&item.path)
.await
.map_err(|e| ApiError::internal(format!("Failed to reopen file: {}", e)))?;
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
return Ok((
StatusCode::PARTIAL_CONTENT,
[
(header::CONTENT_TYPE, item.media_type.mime_type()),
(header::CONTENT_LENGTH, length.to_string()),
(
header::CONTENT_RANGE,
format!("bytes {}-{}/{}", start, end, file_size),
),
(header::ACCEPT_RANGES, "bytes".to_string()),
],
body,
)
.into_response());
}
}
}
// Full content response
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
Ok((
StatusCode::OK,
[
(header::CONTENT_TYPE, item.media_type.mime_type()),
(header::CONTENT_LENGTH, file_size.to_string()),
(header::ACCEPT_RANGES, "bytes".to_string()),
],
body,
)
.into_response())
}
/// Parse HTTP Range header
fn parse_range_header(range: &str, file_size: u64) -> Option<(u64, u64)> {
let range = range.strip_prefix("bytes=")?;
let parts: Vec<&str> = range.split('-').collect();
if parts.len() != 2 {
return None;
}
let start: u64 = parts[0].parse().ok()?;
let end: u64 = if parts[1].is_empty() {
file_size - 1
} else {
parts[1].parse().ok()?
};
if start > end || end >= file_size {
return None;
}
Some((start, end))
}

View file

@ -0,0 +1,169 @@
use axum::{
Json,
extract::{Multipart, Path, State},
http::{StatusCode, header},
response::IntoResponse,
};
use tokio_util::io::ReaderStream;
use uuid::Uuid;
use crate::dto::{ManagedStorageStatsResponse, UploadResponse};
use crate::error::{ApiError, ApiResult};
use crate::state::AppState;
use pinakes_core::model::MediaId;
use pinakes_core::upload;
/// Upload a file to managed storage
/// POST /api/upload
pub async fn upload_file(
State(state): State<AppState>,
mut multipart: Multipart,
) -> ApiResult<Json<UploadResponse>> {
let managed_storage = state
.managed_storage
.as_ref()
.ok_or_else(|| ApiError::bad_request("Managed storage is not enabled"))?;
let config = state.config.read().await;
if !config.managed_storage.enabled {
return Err(ApiError::bad_request("Managed storage is not enabled"));
}
drop(config);
// Extract file from multipart
let field = multipart
.next_field()
.await
.map_err(|e| ApiError::bad_request(format!("Failed to read multipart field: {}", e)))?
.ok_or_else(|| ApiError::bad_request("No file provided"))?;
let original_filename = field
.file_name()
.map(|s| s.to_string())
.unwrap_or_else(|| "unknown".to_string());
let content_type = field
.content_type()
.map(|s| s.to_string())
.unwrap_or_else(|| "application/octet-stream".to_string());
let data = field
.bytes()
.await
.map_err(|e| ApiError::bad_request(format!("Failed to read file data: {}", e)))?;
// Process the upload
let result = upload::process_upload_bytes(
&state.storage,
managed_storage.as_ref(),
&data,
&original_filename,
Some(&content_type),
)
.await
.map_err(|e| ApiError::internal(format!("Upload failed: {}", e)))?;
Ok(Json(result.into()))
}
/// Download a managed file
/// GET /api/media/{id}/download
pub async fn download_file(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<impl IntoResponse> {
let media_id = MediaId(id);
let item = state
.storage
.get_media(media_id)
.await
.map_err(|e| ApiError::not_found(format!("Media not found: {}", e)))?;
let managed_storage = state
.managed_storage
.as_ref()
.ok_or_else(|| ApiError::bad_request("Managed storage is not enabled"))?;
// Check if this is a managed file
if item.storage_mode != pinakes_core::model::StorageMode::Managed {
// For external files, stream from their original path
let file = tokio::fs::File::open(&item.path)
.await
.map_err(|e| ApiError::not_found(format!("File not found: {}", e)))?;
let stream = ReaderStream::new(file);
let body = axum::body::Body::from_stream(stream);
let content_type = item.media_type.mime_type();
let filename = item.original_filename.unwrap_or(item.file_name);
return Ok((
[
(header::CONTENT_TYPE, content_type),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
body,
));
}
// For managed files, stream from content-addressable storage
let file = managed_storage
.open(&item.content_hash)
.await
.map_err(|e| ApiError::not_found(format!("Blob not found: {}", e)))?;
let stream = ReaderStream::new(file);
let body = axum::body::Body::from_stream(stream);
let content_type = item.media_type.mime_type();
let filename = item.original_filename.unwrap_or(item.file_name);
Ok((
[
(header::CONTENT_TYPE, content_type),
(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", filename),
),
],
body,
))
}
/// Migrate an external file to managed storage
/// POST /api/media/{id}/move-to-managed
pub async fn move_to_managed(
State(state): State<AppState>,
Path(id): Path<Uuid>,
) -> ApiResult<StatusCode> {
let managed_storage = state
.managed_storage
.as_ref()
.ok_or_else(|| ApiError::bad_request("Managed storage is not enabled"))?;
let media_id = MediaId(id);
upload::migrate_to_managed(&state.storage, managed_storage.as_ref(), media_id)
.await
.map_err(|e| ApiError::internal(format!("Migration failed: {}", e)))?;
Ok(StatusCode::NO_CONTENT)
}
/// Get managed storage statistics
/// GET /api/managed/stats
pub async fn managed_stats(
State(state): State<AppState>,
) -> ApiResult<Json<ManagedStorageStatsResponse>> {
let stats = state
.storage
.managed_storage_stats()
.await
.map_err(|e| ApiError::internal(format!("Failed to get stats: {}", e)))?;
Ok(Json(stats.into()))
}

View file

@ -6,6 +6,7 @@ use tokio::sync::RwLock;
use pinakes_core::cache::CacheLayer;
use pinakes_core::config::Config;
use pinakes_core::jobs::JobQueue;
use pinakes_core::managed_storage::ManagedStorageService;
use pinakes_core::plugin::PluginManager;
use pinakes_core::scan::ScanProgress;
use pinakes_core::scheduler::TaskScheduler;
@ -26,4 +27,5 @@ pub struct AppState {
pub scheduler: Arc<TaskScheduler>,
pub plugin_manager: Option<Arc<PluginManager>>,
pub transcode_service: Option<Arc<TranscodeService>>,
pub managed_storage: Option<Arc<ManagedStorageService>>,
}

View file

@ -11,9 +11,9 @@ use tower::ServiceExt;
use pinakes_core::cache::CacheLayer;
use pinakes_core::config::{
AccountsConfig, AnalyticsConfig, CloudConfig, Config, DirectoryConfig, EnrichmentConfig,
JobsConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig, SqliteConfig,
StorageBackendType, StorageConfig, ThumbnailConfig, TlsConfig, TranscodingConfig, UiConfig,
UserAccount, UserRole, WebhookConfig,
JobsConfig, ManagedStorageConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig,
SharingConfig, SqliteConfig, StorageBackendType, StorageConfig, SyncConfig, ThumbnailConfig,
TlsConfig, TranscodingConfig, UiConfig, UserAccount, UserRole, WebhookConfig,
};
use pinakes_core::jobs::JobQueue;
use pinakes_core::storage::StorageBackend;
@ -127,6 +127,9 @@ fn default_config() -> Config {
cloud: CloudConfig::default(),
analytics: AnalyticsConfig::default(),
photos: PhotoConfig::default(),
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
}
}
@ -156,6 +159,7 @@ async fn setup_app() -> axum::Router {
scheduler: Arc::new(scheduler),
plugin_manager: None,
transcode_service: None,
managed_storage: None,
};
pinakes_server::app::create_router(state)
@ -227,6 +231,7 @@ async fn setup_app_with_auth() -> (axum::Router, String, String, String) {
scheduler: Arc::new(scheduler),
plugin_manager: None,
transcode_service: None,
managed_storage: None,
};
let app = pinakes_server::app::create_router(state);

View file

@ -11,9 +11,9 @@ use tower::ServiceExt;
use pinakes_core::cache::CacheLayer;
use pinakes_core::config::{
AccountsConfig, AnalyticsConfig, CloudConfig, Config, DirectoryConfig, EnrichmentConfig,
JobsConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig, SqliteConfig,
StorageBackendType, StorageConfig, ThumbnailConfig, TlsConfig, TranscodingConfig, UiConfig,
WebhookConfig,
JobsConfig, ManagedStorageConfig, PhotoConfig, PluginsConfig, ScanningConfig, ServerConfig,
SharingConfig, SqliteConfig, StorageBackendType, StorageConfig, SyncConfig, ThumbnailConfig,
TlsConfig, TranscodingConfig, UiConfig, WebhookConfig,
};
use pinakes_core::jobs::JobQueue;
use pinakes_core::plugin::PluginManager;
@ -93,6 +93,9 @@ async fn setup_app_with_plugins() -> (axum::Router, Arc<PluginManager>, tempfile
cloud: CloudConfig::default(),
analytics: AnalyticsConfig::default(),
photos: PhotoConfig::default(),
managed_storage: ManagedStorageConfig::default(),
sync: SyncConfig::default(),
sharing: SharingConfig::default(),
};
let job_queue = JobQueue::new(1, |_id, _kind, _cancel, _jobs| tokio::spawn(async {}));
@ -114,6 +117,7 @@ async fn setup_app_with_plugins() -> (axum::Router, Arc<PluginManager>, tempfile
scheduler: Arc::new(scheduler),
plugin_manager: Some(plugin_manager.clone()),
transcode_service: None,
managed_storage: None,
};
let router = pinakes_server::app::create_router(state);

View file

@ -1412,7 +1412,6 @@ pub fn App() -> Element {
// Check if already importing - if so, add to queue
// Get preview files if available for per-file progress
// Use parallel import with per-batch progress
@ -1439,26 +1438,30 @@ pub fn App() -> Element {
// Update progress after batch
// Extended import state
if *import_in_progress.read() {
import_queue.write().push(file_name);
show_toast("Added to import queue".into(), false);
return;

View file

@ -0,0 +1,30 @@
-- V15: Managed File Storage
-- Adds server-side content-addressable storage for uploaded files
-- Add storage mode to media_items (external = file on disk, managed = in content-addressable storage)
ALTER TABLE media_items ADD COLUMN storage_mode TEXT NOT NULL DEFAULT 'external';
-- Original filename for managed uploads (preserved separately from file_name which may be normalized)
ALTER TABLE media_items ADD COLUMN original_filename TEXT;
-- When the file was uploaded to managed storage
ALTER TABLE media_items ADD COLUMN uploaded_at TIMESTAMPTZ;
-- Storage key for looking up the blob (usually same as content_hash for deduplication)
ALTER TABLE media_items ADD COLUMN storage_key TEXT;
-- Managed blobs table - tracks deduplicated file storage
CREATE TABLE managed_blobs (
content_hash TEXT PRIMARY KEY NOT NULL,
file_size BIGINT NOT NULL,
mime_type TEXT NOT NULL,
reference_count INTEGER NOT NULL DEFAULT 1,
stored_at TIMESTAMPTZ NOT NULL,
last_verified TIMESTAMPTZ
);
-- Index for finding managed media items
CREATE INDEX idx_media_storage_mode ON media_items(storage_mode);
-- Index for finding orphaned blobs (reference_count = 0)
CREATE INDEX idx_blobs_reference_count ON managed_blobs(reference_count);

View file

@ -0,0 +1,103 @@
-- V16: Cross-Device Sync System
-- Adds device registration, change tracking, and chunked upload support
-- Sync devices table
CREATE TABLE sync_devices (
id TEXT PRIMARY KEY NOT NULL,
user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
name TEXT NOT NULL,
device_type TEXT NOT NULL,
client_version TEXT NOT NULL,
os_info TEXT,
device_token_hash TEXT NOT NULL UNIQUE,
last_sync_at TIMESTAMPTZ,
last_seen_at TIMESTAMPTZ NOT NULL,
sync_cursor BIGINT DEFAULT 0,
enabled BOOLEAN NOT NULL DEFAULT TRUE,
created_at TIMESTAMPTZ NOT NULL,
updated_at TIMESTAMPTZ NOT NULL
);
CREATE INDEX idx_sync_devices_user ON sync_devices(user_id);
CREATE INDEX idx_sync_devices_token ON sync_devices(device_token_hash);
-- Sync log table - tracks all changes for sync
CREATE TABLE sync_log (
id TEXT PRIMARY KEY NOT NULL,
sequence BIGSERIAL UNIQUE NOT NULL,
change_type TEXT NOT NULL,
media_id TEXT REFERENCES media_items(id) ON DELETE SET NULL,
path TEXT NOT NULL,
content_hash TEXT,
file_size BIGINT,
metadata_json TEXT,
changed_by_device TEXT REFERENCES sync_devices(id) ON DELETE SET NULL,
timestamp TIMESTAMPTZ NOT NULL
);
CREATE INDEX idx_sync_log_sequence ON sync_log(sequence);
CREATE INDEX idx_sync_log_path ON sync_log(path);
CREATE INDEX idx_sync_log_timestamp ON sync_log(timestamp);
-- Device sync state - tracks sync status per device per file
CREATE TABLE device_sync_state (
device_id TEXT NOT NULL REFERENCES sync_devices(id) ON DELETE CASCADE,
path TEXT NOT NULL,
local_hash TEXT,
server_hash TEXT,
local_mtime BIGINT,
server_mtime BIGINT,
sync_status TEXT NOT NULL,
last_synced_at TIMESTAMPTZ,
conflict_info_json TEXT,
PRIMARY KEY (device_id, path)
);
CREATE INDEX idx_device_sync_status ON device_sync_state(device_id, sync_status);
-- Upload sessions for chunked uploads
CREATE TABLE upload_sessions (
id TEXT PRIMARY KEY NOT NULL,
device_id TEXT NOT NULL REFERENCES sync_devices(id) ON DELETE CASCADE,
target_path TEXT NOT NULL,
expected_hash TEXT NOT NULL,
expected_size BIGINT NOT NULL,
chunk_size BIGINT NOT NULL,
chunk_count BIGINT NOT NULL,
status TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL,
expires_at TIMESTAMPTZ NOT NULL,
last_activity TIMESTAMPTZ NOT NULL
);
CREATE INDEX idx_upload_sessions_device ON upload_sessions(device_id);
CREATE INDEX idx_upload_sessions_status ON upload_sessions(status);
CREATE INDEX idx_upload_sessions_expires ON upload_sessions(expires_at);
-- Upload chunks - tracks received chunks
CREATE TABLE upload_chunks (
upload_id TEXT NOT NULL REFERENCES upload_sessions(id) ON DELETE CASCADE,
chunk_index BIGINT NOT NULL,
offset BIGINT NOT NULL,
size BIGINT NOT NULL,
hash TEXT NOT NULL,
received_at TIMESTAMPTZ NOT NULL,
PRIMARY KEY (upload_id, chunk_index)
);
-- Sync conflicts
CREATE TABLE sync_conflicts (
id TEXT PRIMARY KEY NOT NULL,
device_id TEXT NOT NULL REFERENCES sync_devices(id) ON DELETE CASCADE,
path TEXT NOT NULL,
local_hash TEXT NOT NULL,
local_mtime BIGINT NOT NULL,
server_hash TEXT NOT NULL,
server_mtime BIGINT NOT NULL,
detected_at TIMESTAMPTZ NOT NULL,
resolved_at TIMESTAMPTZ,
resolution TEXT
);
CREATE INDEX idx_sync_conflicts_device ON sync_conflicts(device_id);
CREATE INDEX idx_sync_conflicts_unresolved ON sync_conflicts(device_id) WHERE resolved_at IS NULL;

View file

@ -0,0 +1,83 @@
-- V17: Enhanced Sharing System
-- Replaces simple share_links with comprehensive sharing capabilities
-- Enhanced shares table
CREATE TABLE shares (
id TEXT PRIMARY KEY NOT NULL,
target_type TEXT NOT NULL CHECK (target_type IN ('media', 'collection', 'tag', 'saved_search')),
target_id TEXT NOT NULL,
owner_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
recipient_type TEXT NOT NULL CHECK (recipient_type IN ('public_link', 'user', 'group', 'federated')),
recipient_user_id TEXT REFERENCES users(id) ON DELETE CASCADE,
recipient_group_id TEXT,
recipient_federated_handle TEXT,
recipient_federated_server TEXT,
public_token TEXT UNIQUE,
public_password_hash TEXT,
perm_view BOOLEAN NOT NULL DEFAULT TRUE,
perm_download BOOLEAN NOT NULL DEFAULT FALSE,
perm_edit BOOLEAN NOT NULL DEFAULT FALSE,
perm_delete BOOLEAN NOT NULL DEFAULT FALSE,
perm_reshare BOOLEAN NOT NULL DEFAULT FALSE,
perm_add BOOLEAN NOT NULL DEFAULT FALSE,
note TEXT,
expires_at TIMESTAMPTZ,
access_count BIGINT NOT NULL DEFAULT 0,
last_accessed TIMESTAMPTZ,
inherit_to_children BOOLEAN NOT NULL DEFAULT TRUE,
parent_share_id TEXT REFERENCES shares(id) ON DELETE CASCADE,
created_at TIMESTAMPTZ NOT NULL,
updated_at TIMESTAMPTZ NOT NULL,
UNIQUE(owner_id, target_type, target_id, recipient_type, recipient_user_id)
);
CREATE INDEX idx_shares_owner ON shares(owner_id);
CREATE INDEX idx_shares_recipient_user ON shares(recipient_user_id);
CREATE INDEX idx_shares_target ON shares(target_type, target_id);
CREATE INDEX idx_shares_token ON shares(public_token);
CREATE INDEX idx_shares_expires ON shares(expires_at);
-- Share activity log
CREATE TABLE share_activity (
id TEXT PRIMARY KEY NOT NULL,
share_id TEXT NOT NULL REFERENCES shares(id) ON DELETE CASCADE,
actor_id TEXT REFERENCES users(id) ON DELETE SET NULL,
actor_ip TEXT,
action TEXT NOT NULL,
details TEXT,
timestamp TIMESTAMPTZ NOT NULL
);
CREATE INDEX idx_share_activity_share ON share_activity(share_id);
CREATE INDEX idx_share_activity_timestamp ON share_activity(timestamp);
-- Share notifications
CREATE TABLE share_notifications (
id TEXT PRIMARY KEY NOT NULL,
user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
share_id TEXT NOT NULL REFERENCES shares(id) ON DELETE CASCADE,
notification_type TEXT NOT NULL,
is_read BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL
);
CREATE INDEX idx_share_notifications_user ON share_notifications(user_id);
CREATE INDEX idx_share_notifications_unread ON share_notifications(user_id) WHERE is_read = FALSE;
-- Migrate existing share_links to new shares table
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = 'share_links') THEN
INSERT INTO shares (
id, target_type, target_id, owner_id, recipient_type,
public_token, public_password_hash, perm_view, perm_download,
access_count, expires_at, created_at, updated_at
)
SELECT
id, 'media', media_id, created_by, 'public_link',
token, password_hash, TRUE, TRUE,
view_count, expires_at, created_at, created_at
FROM share_links
ON CONFLICT DO NOTHING;
END IF;
END $$;

View file

@ -0,0 +1,30 @@
-- V15: Managed File Storage
-- Adds server-side content-addressable storage for uploaded files
-- Add storage mode to media_items (external = file on disk, managed = in content-addressable storage)
ALTER TABLE media_items ADD COLUMN storage_mode TEXT NOT NULL DEFAULT 'external';
-- Original filename for managed uploads (preserved separately from file_name which may be normalized)
ALTER TABLE media_items ADD COLUMN original_filename TEXT;
-- When the file was uploaded to managed storage
ALTER TABLE media_items ADD COLUMN uploaded_at TEXT;
-- Storage key for looking up the blob (usually same as content_hash for deduplication)
ALTER TABLE media_items ADD COLUMN storage_key TEXT;
-- Managed blobs table - tracks deduplicated file storage
CREATE TABLE managed_blobs (
content_hash TEXT PRIMARY KEY NOT NULL,
file_size INTEGER NOT NULL,
mime_type TEXT NOT NULL,
reference_count INTEGER NOT NULL DEFAULT 1,
stored_at TEXT NOT NULL,
last_verified TEXT
);
-- Index for finding managed media items
CREATE INDEX idx_media_storage_mode ON media_items(storage_mode);
-- Index for finding orphaned blobs (reference_count = 0)
CREATE INDEX idx_blobs_reference_count ON managed_blobs(reference_count);

View file

@ -0,0 +1,117 @@
-- V16: Cross-Device Sync System
-- Adds device registration, change tracking, and chunked upload support
-- Sync devices table
CREATE TABLE sync_devices (
id TEXT PRIMARY KEY NOT NULL,
user_id TEXT NOT NULL,
name TEXT NOT NULL,
device_type TEXT NOT NULL,
client_version TEXT NOT NULL,
os_info TEXT,
device_token_hash TEXT NOT NULL UNIQUE,
last_sync_at TEXT,
last_seen_at TEXT NOT NULL,
sync_cursor INTEGER DEFAULT 0,
enabled INTEGER NOT NULL DEFAULT 1,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
);
CREATE INDEX idx_sync_devices_user ON sync_devices(user_id);
CREATE INDEX idx_sync_devices_token ON sync_devices(device_token_hash);
-- Sync log table - tracks all changes for sync
CREATE TABLE sync_log (
id TEXT PRIMARY KEY NOT NULL,
sequence INTEGER NOT NULL UNIQUE,
change_type TEXT NOT NULL,
media_id TEXT,
path TEXT NOT NULL,
content_hash TEXT,
file_size INTEGER,
metadata_json TEXT,
changed_by_device TEXT,
timestamp TEXT NOT NULL,
FOREIGN KEY (media_id) REFERENCES media_items(id) ON DELETE SET NULL,
FOREIGN KEY (changed_by_device) REFERENCES sync_devices(id) ON DELETE SET NULL
);
CREATE INDEX idx_sync_log_sequence ON sync_log(sequence);
CREATE INDEX idx_sync_log_path ON sync_log(path);
CREATE INDEX idx_sync_log_timestamp ON sync_log(timestamp);
-- Sequence counter for sync log
CREATE TABLE sync_sequence (
id INTEGER PRIMARY KEY CHECK (id = 1),
current_value INTEGER NOT NULL DEFAULT 0
);
INSERT INTO sync_sequence (id, current_value) VALUES (1, 0);
-- Device sync state - tracks sync status per device per file
CREATE TABLE device_sync_state (
device_id TEXT NOT NULL,
path TEXT NOT NULL,
local_hash TEXT,
server_hash TEXT,
local_mtime INTEGER,
server_mtime INTEGER,
sync_status TEXT NOT NULL,
last_synced_at TEXT,
conflict_info_json TEXT,
PRIMARY KEY (device_id, path),
FOREIGN KEY (device_id) REFERENCES sync_devices(id) ON DELETE CASCADE
);
CREATE INDEX idx_device_sync_status ON device_sync_state(device_id, sync_status);
-- Upload sessions for chunked uploads
CREATE TABLE upload_sessions (
id TEXT PRIMARY KEY NOT NULL,
device_id TEXT NOT NULL,
target_path TEXT NOT NULL,
expected_hash TEXT NOT NULL,
expected_size INTEGER NOT NULL,
chunk_size INTEGER NOT NULL,
chunk_count INTEGER NOT NULL,
status TEXT NOT NULL,
created_at TEXT NOT NULL,
expires_at TEXT NOT NULL,
last_activity TEXT NOT NULL,
FOREIGN KEY (device_id) REFERENCES sync_devices(id) ON DELETE CASCADE
);
CREATE INDEX idx_upload_sessions_device ON upload_sessions(device_id);
CREATE INDEX idx_upload_sessions_status ON upload_sessions(status);
CREATE INDEX idx_upload_sessions_expires ON upload_sessions(expires_at);
-- Upload chunks - tracks received chunks
CREATE TABLE upload_chunks (
upload_id TEXT NOT NULL,
chunk_index INTEGER NOT NULL,
offset INTEGER NOT NULL,
size INTEGER NOT NULL,
hash TEXT NOT NULL,
received_at TEXT NOT NULL,
PRIMARY KEY (upload_id, chunk_index),
FOREIGN KEY (upload_id) REFERENCES upload_sessions(id) ON DELETE CASCADE
);
-- Sync conflicts
CREATE TABLE sync_conflicts (
id TEXT PRIMARY KEY NOT NULL,
device_id TEXT NOT NULL,
path TEXT NOT NULL,
local_hash TEXT NOT NULL,
local_mtime INTEGER NOT NULL,
server_hash TEXT NOT NULL,
server_mtime INTEGER NOT NULL,
detected_at TEXT NOT NULL,
resolved_at TEXT,
resolution TEXT,
FOREIGN KEY (device_id) REFERENCES sync_devices(id) ON DELETE CASCADE
);
CREATE INDEX idx_sync_conflicts_device ON sync_conflicts(device_id);
CREATE INDEX idx_sync_conflicts_unresolved ON sync_conflicts(device_id, resolved_at) WHERE resolved_at IS NULL;

View file

@ -0,0 +1,85 @@
-- V17: Enhanced Sharing System
-- Replaces simple share_links with comprehensive sharing capabilities
-- Enhanced shares table
CREATE TABLE shares (
id TEXT PRIMARY KEY NOT NULL,
target_type TEXT NOT NULL CHECK (target_type IN ('media', 'collection', 'tag', 'saved_search')),
target_id TEXT NOT NULL,
owner_id TEXT NOT NULL,
recipient_type TEXT NOT NULL CHECK (recipient_type IN ('public_link', 'user', 'group', 'federated')),
recipient_user_id TEXT,
recipient_group_id TEXT,
recipient_federated_handle TEXT,
recipient_federated_server TEXT,
public_token TEXT UNIQUE,
public_password_hash TEXT,
perm_view INTEGER NOT NULL DEFAULT 1,
perm_download INTEGER NOT NULL DEFAULT 0,
perm_edit INTEGER NOT NULL DEFAULT 0,
perm_delete INTEGER NOT NULL DEFAULT 0,
perm_reshare INTEGER NOT NULL DEFAULT 0,
perm_add INTEGER NOT NULL DEFAULT 0,
note TEXT,
expires_at TEXT,
access_count INTEGER NOT NULL DEFAULT 0,
last_accessed TEXT,
inherit_to_children INTEGER NOT NULL DEFAULT 1,
parent_share_id TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY (owner_id) REFERENCES users(id) ON DELETE CASCADE,
FOREIGN KEY (recipient_user_id) REFERENCES users(id) ON DELETE CASCADE,
FOREIGN KEY (parent_share_id) REFERENCES shares(id) ON DELETE CASCADE,
UNIQUE(owner_id, target_type, target_id, recipient_type, recipient_user_id)
);
CREATE INDEX idx_shares_owner ON shares(owner_id);
CREATE INDEX idx_shares_recipient_user ON shares(recipient_user_id);
CREATE INDEX idx_shares_target ON shares(target_type, target_id);
CREATE INDEX idx_shares_token ON shares(public_token);
CREATE INDEX idx_shares_expires ON shares(expires_at);
-- Share activity log
CREATE TABLE share_activity (
id TEXT PRIMARY KEY NOT NULL,
share_id TEXT NOT NULL,
actor_id TEXT,
actor_ip TEXT,
action TEXT NOT NULL,
details TEXT,
timestamp TEXT NOT NULL,
FOREIGN KEY (share_id) REFERENCES shares(id) ON DELETE CASCADE,
FOREIGN KEY (actor_id) REFERENCES users(id) ON DELETE SET NULL
);
CREATE INDEX idx_share_activity_share ON share_activity(share_id);
CREATE INDEX idx_share_activity_timestamp ON share_activity(timestamp);
-- Share notifications
CREATE TABLE share_notifications (
id TEXT PRIMARY KEY NOT NULL,
user_id TEXT NOT NULL,
share_id TEXT NOT NULL,
notification_type TEXT NOT NULL,
is_read INTEGER NOT NULL DEFAULT 0,
created_at TEXT NOT NULL,
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE,
FOREIGN KEY (share_id) REFERENCES shares(id) ON DELETE CASCADE
);
CREATE INDEX idx_share_notifications_user ON share_notifications(user_id);
CREATE INDEX idx_share_notifications_unread ON share_notifications(user_id, is_read) WHERE is_read = 0;
-- Migrate existing share_links to new shares table (if share_links exists)
INSERT OR IGNORE INTO shares (
id, target_type, target_id, owner_id, recipient_type,
public_token, public_password_hash, perm_view, perm_download,
access_count, expires_at, created_at, updated_at
)
SELECT
id, 'media', media_id, created_by, 'public_link',
token, password_hash, 1, 1,
view_count, expires_at, created_at, created_at
FROM share_links
WHERE EXISTS (SELECT 1 FROM sqlite_master WHERE type='table' AND name='share_links');